summaryrefslogtreecommitdiffstats
path: root/crates/rebel
diff options
context:
space:
mode:
Diffstat (limited to 'crates/rebel')
-rw-r--r--crates/rebel/Cargo.toml23
-rw-r--r--crates/rebel/src/driver.rs481
-rw-r--r--crates/rebel/src/main.rs87
-rw-r--r--crates/rebel/src/recipe.rs167
-rw-r--r--crates/rebel/src/template.rs41
5 files changed, 799 insertions, 0 deletions
diff --git a/crates/rebel/Cargo.toml b/crates/rebel/Cargo.toml
new file mode 100644
index 0000000..9eba0fa
--- /dev/null
+++ b/crates/rebel/Cargo.toml
@@ -0,0 +1,23 @@
+[package]
+name = "rebel"
+version = "0.1.0"
+authors = ["Matthias Schiffer <mschiffer@universe-factory.net>"]
+license = "MIT"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+rebel-common = { path = "../rebel-common" }
+rebel-parse = { path = "../rebel-parse" }
+rebel-resolve = { path = "../rebel-resolve" }
+rebel-runner = { path = "../rebel-runner" }
+
+clap = { version = "4.0.0", features = ["derive"] }
+handlebars = "5.1.2"
+indoc = "2.0.4"
+lazy_static = "1.4.0"
+nix = { version = "0.28.0", features = ["poll", "signal"] }
+serde = { version = "1", features = ["derive"] }
+serde_yaml = "0.9"
+walkdir = "2"
diff --git a/crates/rebel/src/driver.rs b/crates/rebel/src/driver.rs
new file mode 100644
index 0000000..e4de2a7
--- /dev/null
+++ b/crates/rebel/src/driver.rs
@@ -0,0 +1,481 @@
+use std::{
+ collections::{HashMap, HashSet},
+ iter,
+ os::unix::{net::UnixStream, prelude::*},
+};
+
+use indoc::indoc;
+use nix::{
+ poll,
+ sys::{
+ signal,
+ signalfd::{SfdFlags, SignalFd},
+ },
+};
+
+use rebel_common::{error::*, string_hash::*, types::*};
+use rebel_resolve::{
+ self as resolve,
+ context::{Context, OutputRef, TaskRef},
+ paths,
+ task::*,
+};
+use rebel_runner::Runner;
+
+use crate::template;
+
+#[derive(Debug)]
+pub struct CompletionState<'ctx> {
+ ctx: &'ctx Context,
+ tasks_done: HashMap<TaskRef<'ctx>, TaskOutput>,
+}
+
+impl<'ctx> CompletionState<'ctx> {
+ pub fn new(ctx: &'ctx Context) -> Self {
+ CompletionState {
+ ctx,
+ tasks_done: Default::default(),
+ }
+ }
+
+ // Treats both "depends" and "parent" as dependencies
+ fn deps_satisfied(&self, task_ref: &TaskRef) -> bool {
+ resolve::get_dependent_tasks(self.ctx, task_ref)
+ .map_err(|_| Error::new(format!("invalid dependency for {}", task_ref)))
+ .unwrap()
+ .into_iter()
+ .all(|dep| self.tasks_done.contains_key(&dep))
+ }
+
+ fn fetch_deps(&self, task: &TaskRef<'ctx>) -> Result<Vec<Dependency>> {
+ let task_def = &self.ctx[task];
+ task_def
+ .fetch
+ .iter()
+ .map(|Fetch { name, sha256 }| {
+ Ok(Dependency::Fetch {
+ name: template::ENGINE.eval(name, &task.args).with_context(|| {
+ format!("Failed to evaluate fetch filename for task {}", task)
+ })?,
+ target_dir: paths::TASK_DLDIR.to_string(),
+ sha256: *sha256,
+ })
+ })
+ .collect()
+ }
+
+ fn dep_closure<I>(&self, deps: I, path: &'ctx str) -> impl Iterator<Item = Dependency> + '_
+ where
+ I: IntoIterator<Item = OutputRef<'ctx>>,
+ {
+ resolve::runtime_depends(self.ctx, deps)
+ .expect("invalid runtime depends")
+ .into_iter()
+ .filter_map(|dep| self.tasks_done[&dep.task].outputs.get(dep.output))
+ .map(|&output| Dependency::Task {
+ output,
+ path: path.to_string(),
+ })
+ }
+
+ fn build_deps(&self, task: &TaskRef<'ctx>) -> Result<impl Iterator<Item = Dependency> + '_> {
+ Ok(self.dep_closure(
+ self.ctx
+ .get_build_depends(task)
+ .with_context(|| format!("invalid build depends for {}", task))?,
+ "",
+ ))
+ }
+
+ fn host_deps(&self, task: &TaskRef<'ctx>) -> Result<impl Iterator<Item = Dependency> + '_> {
+ Ok(self.dep_closure(
+ self.ctx
+ .get_host_depends(task)
+ .with_context(|| format!("invalid depends for {}", task))?,
+ paths::TASK_SYSROOT,
+ ))
+ }
+
+ fn task_deps(&self, task: &TaskRef<'ctx>) -> Result<HashSet<Dependency>> {
+ let fetch_deps = self.fetch_deps(task)?.into_iter();
+ let build_deps = self.build_deps(task)?;
+ let host_deps = self.host_deps(task)?;
+
+ Ok(fetch_deps.chain(build_deps).chain(host_deps).collect())
+ }
+
+ fn task_ancestors(&self, task_ref: &TaskRef<'ctx>) -> Vec<LayerHash> {
+ let Some(parent) = self
+ .ctx
+ .get_parent_depend(task_ref)
+ .expect("invalid parent depends")
+ else {
+ return vec![];
+ };
+
+ let mut chain = self.task_ancestors(&parent);
+ if let Some(layer) = self.tasks_done[&parent].layer {
+ chain.push(layer);
+ }
+ chain
+ }
+
+ fn print_summary(&self) {
+ println!();
+ println!("Summary:");
+
+ let mut tasks: Box<[_]> = self.tasks_done.iter().collect();
+ tasks.sort_by_cached_key(|(task, _)| format!("{:#}", task));
+ for (task_ref, task) in tasks.iter() {
+ println!();
+ println!("{:#}", task_ref);
+ if let Some(hash) = task.input_hash {
+ println!(" input: {}", hash);
+ }
+ if let Some(hash) = task.layer {
+ println!(" layer: {}", hash);
+ }
+ if !task.outputs.is_empty() {
+ println!(" outputs:");
+
+ let mut outputs: Box<[_]> = task.outputs.iter().collect();
+ outputs.sort_by_key(|(output, _)| *output);
+ for (output, hash) in outputs.iter() {
+ println!(" {}: {}", output, hash);
+ }
+ }
+ }
+ }
+}
+
+#[derive(Debug)]
+enum SpawnResult {
+ Spawned(UnixStream),
+ Skipped(TaskOutput),
+}
+
+#[derive(Debug, PartialEq, Eq, Hash)]
+enum TaskWaitResult {
+ Failed,
+ Interrupted,
+}
+
+#[derive(Debug)]
+pub struct Driver<'ctx> {
+ rdeps: HashMap<TaskRef<'ctx>, Vec<TaskRef<'ctx>>>,
+ force_run: HashSet<TaskRef<'ctx>>,
+ tasks_blocked: HashSet<TaskRef<'ctx>>,
+ tasks_runnable: Vec<TaskRef<'ctx>>,
+ tasks_running: HashMap<RawFd, (UnixStream, TaskRef<'ctx>)>,
+ state: CompletionState<'ctx>,
+}
+
+impl<'ctx> Driver<'ctx> {
+ pub fn new(
+ ctx: &'ctx Context,
+ taskset: HashSet<TaskRef<'ctx>>,
+ force_run: HashSet<TaskRef<'ctx>>,
+ ) -> Result<Self> {
+ let mut driver = Driver {
+ rdeps: Default::default(),
+ force_run,
+ tasks_blocked: Default::default(),
+ tasks_runnable: Default::default(),
+ tasks_running: Default::default(),
+ state: CompletionState::new(ctx),
+ };
+
+ for task in taskset {
+ let mut has_depends = false;
+ for dep in resolve::get_dependent_tasks(ctx, &task)
+ .map_err(|_| Error::new(format!("invalid dependency for {}", task)))?
+ {
+ let rdep = driver.rdeps.entry(dep.clone()).or_default();
+ rdep.push(task.clone());
+ has_depends = true;
+ }
+
+ if has_depends {
+ driver.tasks_blocked.insert(task);
+ } else {
+ driver.tasks_runnable.push(task);
+ }
+ }
+
+ Ok(driver)
+ }
+
+ const PREAMBLE: &'static str = indoc! {"
+ export PATH={{build.prefix}}/sbin:{{build.prefix}}/bin:$PATH
+ cd {{workdir}}
+
+ export SOURCE_DATE_EPOCH=1
+
+ export AR_FOR_BUILD=ar
+ export AS_FOR_BUILD=as
+ export DLLTOOL_FOR_BUILD=dlltool
+ export CC_FOR_BUILD=gcc
+ export CXX_FOR_BUILD=g++
+ export GCC_FOR_BUILD=gcc
+ export GFORTRAN_FOR_BUILD=gfortran
+ export GOC_FOR_BUILD=goc
+ export LD_FOR_BUILD=ld
+ export LIPO_FOR_BUILD=lipo
+ export NM_FOR_BUILD=nm
+ export OBJCOPY_FOR_BUILD=objcopy
+ export OBJDUMP_FOR_BUILD=objdump
+ export RANLIB_FOR_BUILD=ranlib
+ export STRIP_FOR_BUILD=strip
+ export WINDRES_FOR_BUILD=windres
+ export WINDMC_FOR_BUILD=windmc
+ "};
+ const PREAMBLE_HOST: &'static str = indoc! {"
+ export AR={{build_to_host.cross_compile}}ar
+ export AS={{build_to_host.cross_compile}}as
+ export DLLTOOL={{build_to_host.cross_compile}}dlltool
+ export CC={{build_to_host.cross_compile}}gcc
+ export CXX={{build_to_host.cross_compile}}g++
+ export GCC={{build_to_host.cross_compile}}gcc
+ export GFORTRAN={{build_to_host.cross_compile}}gfortran
+ export GOC={{build_to_host.cross_compile}}goc
+ export LD={{build_to_host.cross_compile}}ld
+ export LIPO={{build_to_host.cross_compile}}lipo
+ export NM={{build_to_host.cross_compile}}nm
+ export OBJCOPY={{build_to_host.cross_compile}}objcopy
+ export OBJDUMP={{build_to_host.cross_compile}}objdump
+ export RANLIB={{build_to_host.cross_compile}}ranlib
+ export STRIP={{build_to_host.cross_compile}}strip
+ export WINDRES={{build_to_host.cross_compile}}windres
+ export WINDMC={{build_to_host.cross_compile}}windmc
+ "};
+ const PREAMBLE_TARGET: &'static str = indoc! {"
+ export AR_FOR_TARGET={{build_to_target.cross_compile}}ar
+ export AS_FOR_TARGET={{build_to_target.cross_compile}}as
+ export DLLTOOL_FOR_TARGET={{build_to_target.cross_compile}}dlltool
+ export CC_FOR_TARGET={{build_to_target.cross_compile}}gcc
+ export CXX_FOR_TARGET={{build_to_target.cross_compile}}g++
+ export GCC_FOR_TARGET={{build_to_target.cross_compile}}gcc
+ export GFORTRAN_FOR_TARGET={{build_to_target.cross_compile}}gfortran
+ export GOC_FOR_TARGET={{build_to_target.cross_compile}}goc
+ export LD_FOR_TARGET={{build_to_target.cross_compile}}ld
+ export LIPO_FOR_TARGET={{build_to_target.cross_compile}}lipo
+ export NM_FOR_TARGET={{build_to_target.cross_compile}}nm
+ export OBJCOPY_FOR_TARGET={{build_to_target.cross_compile}}objcopy
+ export OBJDUMP_FOR_TARGET={{build_to_target.cross_compile}}objdump
+ export RANLIB_FOR_TARGET={{build_to_target.cross_compile}}ranlib
+ export STRIP_FOR_TARGET={{build_to_target.cross_compile}}strip
+ export WINDRES_FOR_TARGET={{build_to_target.cross_compile}}windres
+ export WINDMC_FOR_TARGET={{build_to_target.cross_compile}}windmc
+ "};
+
+ fn task_preamble(task_ref: &TaskRef<'ctx>) -> Vec<&'static str> {
+ let mut ret = vec![Self::PREAMBLE];
+
+ if task_ref.args.contains_key("build_to_host") {
+ ret.push(Self::PREAMBLE_HOST);
+ }
+ if task_ref.args.contains_key("build_to_target") {
+ ret.push(Self::PREAMBLE_TARGET);
+ }
+ ret
+ }
+
+ fn update_runnable(&mut self, task_ref: TaskRef<'ctx>, task_output: TaskOutput) {
+ let rdeps = self.rdeps.get(&task_ref);
+
+ self.state.tasks_done.insert(task_ref, task_output);
+
+ for rdep in rdeps.unwrap_or(&Vec::new()) {
+ if !self.tasks_blocked.contains(rdep) {
+ continue;
+ }
+ if self.state.deps_satisfied(rdep) {
+ self.tasks_blocked.remove(rdep);
+ self.tasks_runnable.push(rdep.clone());
+ }
+ }
+ }
+
+ fn spawn_task(&self, task_ref: &TaskRef<'ctx>, runner: &Runner) -> Result<SpawnResult> {
+ let task_def = &self.state.ctx[task_ref];
+ if task_def.action.is_empty() {
+ println!("Skipping empty task {:#}", task_ref);
+ return Ok(SpawnResult::Skipped(TaskOutput::default()));
+ }
+
+ let task_deps = self.state.task_deps(task_ref)?;
+ let task_output = task_def
+ .output
+ .iter()
+ .map(|(name, Output { path, .. })| {
+ let output_path = if let Some(path) = path {
+ format!("{}/{}", paths::TASK_DESTDIR, path)
+ } else {
+ paths::TASK_DESTDIR.to_string()
+ };
+ (name.clone(), output_path)
+ })
+ .collect();
+
+ let ancestors = self.state.task_ancestors(task_ref);
+
+ let mut run = Self::task_preamble(task_ref);
+ run.push(&task_def.action.run);
+
+ let command = template::ENGINE
+ .eval_sh(&run.concat(), &task_ref.args)
+ .with_context(|| {
+ format!("Failed to evaluate command template for task {}", task_ref)
+ })?;
+
+ let rootfs = self.state.ctx.get_rootfs();
+ let task = Task {
+ label: format!("{:#}", task_ref),
+ command,
+ workdir: paths::TASK_WORKDIR.to_string(),
+ rootfs: rootfs.0,
+ ancestors,
+ depends: task_deps,
+ outputs: task_output,
+ pins: HashMap::from([rootfs.clone()]),
+ force_run: self.force_run.contains(task_ref),
+ };
+
+ Ok(SpawnResult::Spawned(runner.spawn(&task)))
+ }
+
+ fn run_task(&mut self, task_ref: TaskRef<'ctx>, runner: &Runner) -> Result<()> {
+ match self.spawn_task(&task_ref, runner)? {
+ SpawnResult::Spawned(socket) => {
+ assert!(self
+ .tasks_running
+ .insert(socket.as_raw_fd(), (socket, task_ref))
+ .is_none());
+ }
+ SpawnResult::Skipped(result) => {
+ self.update_runnable(task_ref, result);
+ }
+ }
+ Ok(())
+ }
+
+ fn run_tasks(&mut self, runner: &Runner) -> Result<()> {
+ while let Some(task_ref) = self.tasks_runnable.pop() {
+ self.run_task(task_ref, runner)?;
+ }
+ Ok(())
+ }
+
+ fn wait_for_task(&mut self, signal_fd: &mut SignalFd) -> Result<Option<TaskWaitResult>> {
+ let mut pollfds: Vec<_> = self
+ .tasks_running
+ .values()
+ .map(|(socket, _)| socket.as_fd())
+ .chain(iter::once(signal_fd.as_fd()))
+ .map(|fd| poll::PollFd::new(fd, poll::PollFlags::POLLIN))
+ .collect();
+
+ while poll::poll(&mut pollfds, poll::PollTimeout::NONE).context("poll()")? == 0 {}
+
+ let pollevents: Vec<_> = pollfds
+ .into_iter()
+ .map(|pollfd| {
+ (
+ pollfd.as_fd().as_raw_fd(),
+ pollfd.revents().expect("Unknown events in poll() return"),
+ )
+ })
+ .collect();
+
+ for (fd, events) in pollevents {
+ if !events.contains(poll::PollFlags::POLLIN) {
+ if events.intersects(!poll::PollFlags::POLLIN) {
+ return Err(Error::new(
+ "Unexpected error status for socket file descriptor",
+ ));
+ }
+ continue;
+ }
+
+ if fd == signal_fd.as_raw_fd() {
+ let _signal = signal_fd.read_signal().expect("read_signal()").unwrap();
+ return Ok(Some(TaskWaitResult::Interrupted));
+ }
+
+ let (socket, task_ref) = self.tasks_running.remove(&fd).unwrap();
+
+ match Runner::result(&socket) {
+ Ok(task_output) => {
+ self.update_runnable(task_ref, task_output);
+ }
+ Err(error) => {
+ eprintln!("{}", error);
+ return Ok(Some(TaskWaitResult::Failed));
+ }
+ }
+ }
+
+ Ok(None)
+ }
+
+ fn is_done(&self) -> bool {
+ self.tasks_blocked.is_empty()
+ && self.tasks_runnable.is_empty()
+ && self.tasks_running.is_empty()
+ }
+
+ fn setup_signalfd() -> Result<SignalFd> {
+ let mut signals = signal::SigSet::empty();
+ signals.add(signal::Signal::SIGINT);
+ signal::pthread_sigmask(signal::SigmaskHow::SIG_BLOCK, Some(&signals), None)
+ .expect("pthread_sigmask()");
+ SignalFd::with_flags(&signals, SfdFlags::SFD_CLOEXEC)
+ .context("Failed to create signal file descriptor")
+ }
+
+ fn raise_sigint() {
+ let mut signals = signal::SigSet::empty();
+ signals.add(signal::Signal::SIGINT);
+ signal::pthread_sigmask(signal::SigmaskHow::SIG_UNBLOCK, Some(&signals), None)
+ .expect("pthread_sigmask()");
+ signal::raise(signal::Signal::SIGINT).expect("raise()");
+ unreachable!();
+ }
+
+ pub fn run(&mut self, runner: &Runner, keep_going: bool) -> Result<bool> {
+ let mut success = true;
+ let mut interrupted = false;
+
+ let mut signal_fd = Self::setup_signalfd()?;
+
+ self.run_tasks(runner)?;
+
+ while !self.tasks_running.is_empty() {
+ match self.wait_for_task(&mut signal_fd)? {
+ Some(TaskWaitResult::Failed) => {
+ success = false;
+ }
+ Some(TaskWaitResult::Interrupted) => {
+ if interrupted {
+ Self::raise_sigint();
+ }
+ eprintln!("Interrupt received, not spawning new tasks. Interrupt again to stop immediately.");
+ interrupted = true;
+ }
+ None => {}
+ }
+ if !interrupted && (success || keep_going) {
+ self.run_tasks(runner)?;
+ }
+ }
+
+ if interrupted || !success {
+ return Ok(false);
+ }
+
+ assert!(self.is_done(), "No runnable tasks left");
+ self.state.print_summary();
+
+ Ok(true)
+ }
+}
diff --git a/crates/rebel/src/main.rs b/crates/rebel/src/main.rs
new file mode 100644
index 0000000..625b43d
--- /dev/null
+++ b/crates/rebel/src/main.rs
@@ -0,0 +1,87 @@
+mod driver;
+mod recipe;
+mod template;
+
+use std::{collections::HashSet, fs::File, path::Path};
+
+use clap::Parser;
+
+use rebel_common::error::*;
+use rebel_parse as parse;
+use rebel_resolve::{self as resolve, context, pin};
+use rebel_runner::{self as runner, Runner};
+
+#[derive(Parser)]
+#[clap(version, about)]
+struct Opts {
+ /// Allow N jobs at once.
+ /// Defaults to the number of available CPUs
+ #[clap(short, long)]
+ jobs: Option<usize>,
+ /// Keep going after some tasks have failed
+ #[clap(short, long)]
+ keep_going: bool,
+ /// The tasks to run
+ #[clap(name = "task", required = true)]
+ tasks: Vec<String>,
+}
+
+fn read_pins<P: AsRef<Path>>(path: P) -> Result<pin::Pins> {
+ let f = File::open(path)?;
+ let pins: pin::Pins = serde_yaml::from_reader(f)
+ .map_err(Error::new)
+ .context("YAML error")?;
+ Ok(pins)
+}
+
+fn main() {
+ let opts: Opts = Opts::parse();
+
+ let runner = unsafe { Runner::new(&runner::Options { jobs: opts.jobs }) }.unwrap();
+
+ let ctx = context::Context::new(
+ recipe::read_recipes("examples/recipes").unwrap(),
+ read_pins("examples/pins.yml").unwrap(),
+ )
+ .unwrap();
+
+ let mut rsv = resolve::Resolver::new(&ctx);
+ let mut force_run = HashSet::new();
+
+ for task in opts.tasks {
+ let Ok((parsed, flags)) = parse::task_ref::task_ref_with_flags(&task) else {
+ eprintln!("Invalid task syntax");
+ std::process::exit(1);
+ };
+ let task_ref = match ctx.lookup(parsed.id, parsed.args.host, parsed.args.target) {
+ Ok(task_ref) => task_ref,
+ Err(err) => {
+ eprintln!("{}", err);
+ std::process::exit(1);
+ }
+ };
+ let errors = rsv.add_goal(&task_ref);
+ if !errors.is_empty() {
+ for error in errors {
+ eprintln!("{}", error);
+ }
+ std::process::exit(1);
+ }
+ if flags.force_run {
+ force_run.insert(task_ref);
+ }
+ }
+ let taskset = rsv.into_taskset();
+ let mut driver = driver::Driver::new(&ctx, taskset, force_run).unwrap();
+ match driver.run(&runner, opts.keep_going) {
+ Ok(success) => {
+ if !success {
+ std::process::exit(1);
+ }
+ }
+ Err(error) => {
+ eprintln!("{}", error);
+ std::process::exit(1);
+ }
+ }
+}
diff --git a/crates/rebel/src/recipe.rs b/crates/rebel/src/recipe.rs
new file mode 100644
index 0000000..28cc84c
--- /dev/null
+++ b/crates/rebel/src/recipe.rs
@@ -0,0 +1,167 @@
+use std::{collections::HashMap, ffi::OsStr, fs::File, path::Path};
+
+use serde::{de::DeserializeOwned, Deserialize};
+use walkdir::WalkDir;
+
+use rebel_common::error::*;
+use rebel_resolve::task::{TaskDef, TaskMeta};
+
+#[derive(Clone, Debug, Deserialize, Default)]
+pub struct RecipeMeta {
+ pub name: Option<String>,
+ pub version: Option<String>,
+}
+
+#[derive(Debug, Deserialize)]
+struct Recipe {
+ #[serde(default)]
+ pub meta: RecipeMeta,
+ pub tasks: HashMap<String, TaskDef>,
+}
+
+#[derive(Debug, Deserialize)]
+struct Subrecipe {
+ pub tasks: HashMap<String, TaskDef>,
+}
+
+fn read_yaml<T: DeserializeOwned>(path: &Path) -> Result<T> {
+ let f = File::open(path).context("IO error")?;
+
+ let value: T = serde_yaml::from_reader(f)
+ .map_err(Error::new)
+ .context("YAML error")?;
+
+ Ok(value)
+}
+
+const RECIPE_NAME: &str = "build";
+const RECIPE_PREFIX: &str = "build.";
+
+fn recipe_name(path: &Path) -> Option<&str> {
+ if path.extension() != Some("yml".as_ref()) {
+ return None;
+ }
+
+ let stem = path.file_stem()?.to_str()?;
+ if stem == RECIPE_NAME {
+ return Some("");
+ }
+ stem.strip_prefix(RECIPE_PREFIX)
+}
+
+fn handle_recipe_tasks(
+ tasks: &mut HashMap<String, HashMap<String, Vec<TaskDef>>>,
+ recipe_tasks: HashMap<String, TaskDef>,
+ meta: &TaskMeta,
+) {
+ let task_map = match tasks.get_mut(&meta.recipe) {
+ Some(task_map) => task_map,
+ None => tasks.entry(meta.recipe.clone()).or_default(),
+ };
+
+ for (label, mut task) in recipe_tasks {
+ task.meta = meta.clone();
+ task_map.entry(label).or_default().push(task);
+ }
+}
+
+fn read_recipe_tasks(
+ path: &Path,
+ basename: &str,
+ tasks: &mut HashMap<String, HashMap<String, Vec<TaskDef>>>,
+) -> Result<RecipeMeta> {
+ let recipe_def = read_yaml::<Recipe>(path)?;
+
+ let name = recipe_def
+ .meta
+ .name
+ .as_deref()
+ .unwrap_or(basename)
+ .to_string();
+
+ let meta = TaskMeta {
+ basename: basename.to_string(),
+ recipename: "".to_string(),
+ recipe: basename.to_string(),
+ name,
+ version: recipe_def.meta.version.clone(),
+ };
+
+ handle_recipe_tasks(tasks, recipe_def.tasks, &meta);
+
+ Ok(recipe_def.meta)
+}
+
+fn read_subrecipe_tasks(
+ path: &Path,
+ basename: &str,
+ recipename: &str,
+ recipe_meta: &RecipeMeta,
+ tasks: &mut HashMap<String, HashMap<String, Vec<TaskDef>>>,
+) -> Result<()> {
+ let recipe = format!("{basename}/{recipename}");
+ let recipe_def = read_yaml::<Subrecipe>(path)?;
+
+ let name = recipe_meta.name.as_deref().unwrap_or(basename).to_string();
+
+ let meta = TaskMeta {
+ basename: basename.to_string(),
+ recipename: recipename.to_string(),
+ recipe: recipe.clone(),
+ name,
+ version: recipe_meta.version.clone(),
+ };
+
+ handle_recipe_tasks(tasks, recipe_def.tasks, &meta);
+
+ Ok(())
+}
+
+pub fn read_recipes<P: AsRef<Path>>(
+ path: P,
+) -> Result<HashMap<String, HashMap<String, Vec<TaskDef>>>> {
+ let mut tasks = HashMap::<String, HashMap<String, Vec<TaskDef>>>::new();
+ let mut recipe_metas = HashMap::<String, RecipeMeta>::new();
+
+ for entry in WalkDir::new(path)
+ .sort_by(|a, b| {
+ // Files are sorted first by stem, then by extension, so that
+ // recipe.yml will always be read before recipe.NAME.yml
+ let stem_cmp = a.path().file_stem().cmp(&b.path().file_stem());
+ let ext_cmp = a.path().extension().cmp(&b.path().extension());
+ stem_cmp.then(ext_cmp)
+ })
+ .into_iter()
+ .filter_map(|e| e.ok())
+ {
+ let path = entry.path();
+ if !path.is_file() {
+ continue;
+ }
+
+ let Some(recipename) = recipe_name(path) else {
+ continue;
+ };
+ let Some(basename) = path
+ .parent()
+ .and_then(Path::file_name)
+ .and_then(OsStr::to_str)
+ else {
+ continue;
+ };
+
+ if recipename.is_empty() {
+ recipe_metas.insert(
+ basename.to_string(),
+ read_recipe_tasks(path, basename, &mut tasks)?,
+ );
+ } else {
+ let Some(recipe_meta) = recipe_metas.get(basename) else {
+ continue;
+ };
+ read_subrecipe_tasks(path, basename, recipename, recipe_meta, &mut tasks)?;
+ }
+ }
+
+ Ok(tasks)
+}
diff --git a/crates/rebel/src/template.rs b/crates/rebel/src/template.rs
new file mode 100644
index 0000000..50fb334
--- /dev/null
+++ b/crates/rebel/src/template.rs
@@ -0,0 +1,41 @@
+use handlebars::Handlebars;
+use lazy_static::lazy_static;
+
+use rebel_common::error::*;
+use rebel_resolve::args::TaskArgs;
+
+fn escape_sh(s: &str) -> String {
+ format!("'{}'", s.replace('\'', "'\\''"))
+}
+
+#[derive(Debug)]
+pub struct TemplateEngine {
+ tpl: Handlebars<'static>,
+ tpl_sh: Handlebars<'static>,
+}
+
+impl TemplateEngine {
+ pub fn new() -> Self {
+ let mut tpl = Handlebars::new();
+ tpl.set_strict_mode(true);
+ tpl.register_escape_fn(handlebars::no_escape);
+
+ let mut tpl_sh = Handlebars::new();
+ tpl_sh.set_strict_mode(true);
+ tpl_sh.register_escape_fn(escape_sh);
+
+ TemplateEngine { tpl, tpl_sh }
+ }
+
+ pub fn eval(&self, input: &str, args: &TaskArgs) -> Result<String> {
+ self.tpl.render_template(input, args).map_err(Error::new)
+ }
+
+ pub fn eval_sh(&self, input: &str, args: &TaskArgs) -> Result<String> {
+ self.tpl_sh.render_template(input, args).map_err(Error::new)
+ }
+}
+
+lazy_static! {
+ pub static ref ENGINE: TemplateEngine = TemplateEngine::new();
+}