title = {BiobankCloud: a Platform for the Secure Storage, Sharing, and Processing of Large Biomedical Data Sets},
  author = {Alysson Bessani and J\"orgen Brandt and Marc Bux and Vinicius Cogo and Lora Dimitrova and Jim Dowling and Ali Gholami and Kamal Hakimzadeh and Michael Hummel and Mahmoud Ismail and Erwin Laure and Ulf Leser and Jan-Eric Litton and Roxanna Martinez and Salman Niazi and Jane Reichel and Karin Zimmermann},
  booktitle = {The First International Workshop on Data Management and Analytics for Medicine and Healthcare (DMAH 2015)},
  year = {2015},
  month = {September},
  url = {},
  abstract = {Biobanks store and catalog human biological material that is increasingly being digitized using next-generation sequencing (NGS). There is, however, a computational bottleneck, as existing software systems are not scalable and secure enough to store and process the incoming wave of genomic data from NGS machines. In the BiobankCloud project, we are building a Hadoop-based platform for the secure storage, sharing, and parallel processing of genomic data. We extended Hadoop to include support for multi-tenant studies, reduced storage requirements with erasure coding, and added support for extensible and consistent metadata. On top of Hadoop, we built a scalable scientific workflow engine featuring a proper workflow definition language focusing on simple integration and chaining of existing tools, adaptive scheduling on Apache Yarn, and support for iterative dataflows. Our platform also supports the secure sharing of data across different, distributed Hadoop clusters. The software is easily installed and comes with a user-friendly web interface for running, managing, and accessing data sets behind a secure 2-factor authentication. Initial tests have shown that the engine scales well to dozens of nodes. The entire system is open-source and includes pre-defined workflows for popular tasks in biomedical data analysis, such as variant identification, differential transcriptome analysis using RNA-Seq, and analysis of miRNA-Seq and ChIP-Seq data.}
  title = {Cuneiform: A Functional Language for Large Scale Scientific Data Analysis},
  author = {Brandt, J{\"o}rgen and Bux, Marc and Leser, Ulf},
  booktitle = {Proceedings of the Workshops of the EDBT/ICDT},
  year = {2015},
  address = {Brussels, Belgium},
  month = {March},
  pages = {17--26},
  volume = {1330},
  abstract = {The need to analyze massive scientific data sets on the one hand and the availability of distributed compute resources with an increasing number of CPU cores on the other hand have promoted the development of a variety of languages and systems for parallel, distributed data analysis. Among them are data-parallel query languages such as Pig Latin or Spark as well as scientific workflow languages such as Swift or Pegasus DAX. While data-parallel query languages focus on the exploitation of data parallelism, scientific workflow languages focus on the integration of external tools and libraries. However, a language that combines easy integration of arbitrary tools, treated as black boxes, with the ability to fully exploit data parallelism does not exist yet. Here, we present Cuneiform, a novel language for large-scale scientific data analysis. We highlight its functionality with respect to a set of desirable features for such languages, introduce its syntax and semantics by example, and show its flexibility and conciseness with use cases, including a complex real-life workflow from the area of genome research. Cuneiform scripts are executed dynamically on the workflow execution platform Hi-WAY which is based on Hadoop YARN. The language Cuneiform, including tool support for programming, workflow visualization, debugging, logging, and provenance-tracing, and the parallel execution engine Hi-WAY are fully implemented.},
  url = {}
  author = {Bux, Marc and Brandt, J\"{o}rgen and Lipka, Carsten and Hakimzadeh, Kamal and Dowling, Jim and Leser, Ulf},
  title = {SAASFEE: Scalable Scientific Workflow Execution Engine},
  journal = {Proc. VLDB Endow.},
  issue_date = {August 2015},
  volume = {8},
  number = {12},
  month = aug,
  year = {2015},
  issn = {2150-8097},
  pages = {1892--1895},
  numpages = {4},
  url = {},
  doi = {10.14778/2824032.2824094},
  acmid = {2824094},
  publisher = {VLDB Endowment},
  abstract = {Across many fields of science, primary data sets like sensor read-outs, time series, and genomic sequences are analyzed by complex chains of specialized tools and scripts exchanging intermediate results in domain-specific file formats. Scientific workflow management systems (SWfMSs) support the development and execution of these tool chains by providing workflow specification languages, graphical editors, fault-tolerant execution engines, etc. However, many SWfMSs are not prepared to handle large data sets because of inadequate support for distributed computing. On the other hand, most SWfMSs that do support distributed computing only allow static task execution orders. We present SAASFEE, a SWfMS which runs arbitrarily complex workflows on Hadoop YARN. Workflows are specified in Cuneiform, a functional workflow language focusing on parallelization and easy integration of existing software. Cuneiform workflows are executed on Hi-WAY, a higher-level scheduler for running workflows on YARN. Distinct features of SAASFEE are the ability to execute iterative workflows, an adaptive task scheduler, re-executable provenance traces, and compatibility to selected other workflow systems. In the demonstration, we present all components of SAASFEE using real-life workflows from the field of genomics.}
  title = {Hi-WAY: Execution of Scientific Workflows on Hadoop YARN},
  author = {Bux, Marc and Brandt, J{\"{o}}rgen and Witt, Carl and Dowling, Jim and Leser, Ulf},
  booktitle = {Proceedings of the 20th International Conference on Extending Database Technology (EDBT).},
  year = {2017},
  address = {Venice, Italy},
  abstract = {Scientific workflows provide a means to model, execute, and exchange the increasingly complex analysis pipelines necessary for today’s data-driven science. However, existing scientific workflow management systems (SWfMSs) are often limited to a single workflow language and lack adequate support for large-scale data analysis. On the other hand, current distributed dataflow systems are based on a semistructured data model, which makes integration of arbitrary tools cumbersome or forces re-implementation. We present the scientific workflow execution engine Hi-WAY, which implements a strict black-box view on tools to be integrated and data to be processed. Its generic yet powerful execution model allows Hi-WAY to execute workflows specified in a multitude of different languages. Hi-WAY compiles workflows into schedules for Hadoop YARN, harnessing its proven scalability. It allows for iterative and recursive workflow structures and optimizes performance through adaptive and data-aware scheduling. Reproducibility of workflow executions is achieved through automated setup of infrastructures and re-executable provenance traces. In this application paper we discuss limitations of current SWfMSs regarding scalable data analysis, describe the architecture of Hi-WAY, highlight its most important features, and report on several large-scale experiments from different scientific domains.}
  title = {Computation semantics of the functional scientific workflow language Cuneiform},
  volume = {27},
  doi = {10.1017/S0956796817000119},
  journal = {Journal of Functional Programming},
  publisher = {Cambridge University Press},
  author = {Brandt, Jörgen and Reisig, Wolfgang and Leser, Ulf},
  year = {2017},
  pages = {e22},
  abstract = {Cuneiform is a minimal functional programming language for large-scale scientific data analysis. Implementing a strict black-box view on external operators and data, it allows the direct embedding of code in a variety of external languages like Python or R, provides data-parallel higher order operators for processing large partitioned data sets, allows conditionals and general recursion, and has a naturally parallelizable evaluation strategy suitable for multi-core servers and distributed execution environments like Hadoop, HTCondor, or distributed Erlang. Cuneiform has been applied in several data-intensive research areas including remote sensing, machine learning, and bioinformatics, all of which critically depend on the flexible assembly of pre-existing tools and libraries written in different languages into complex pipelines. This paper introduces the computation semantics for Cuneiform. It presents Cuneiform's abstract syntax, a simple type system, and the semantics of evaluation. Providing an unambiguous specification of the behavior of Cuneiform eases the implementation of interpreters which we showcase by providing a concise reference implementation in Erlang. The similarity of Cuneiform's syntax to the simply typed lambda calculus puts Cuneiform in perspective and allows a straightforward discussion of its design in the context of functional programming. Moreover, the simple type system allows the deduction of the language's safety up to black-box operators. Last, the formulation of the semantics also permits the verification of compilers to and from other workflow languages.}

This file was generated by bibtex2html 1.98.