tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

main.rs (7477B)


      1 // This Source Code Form is subject to the terms of the Mozilla Public
      2 // License, v. 2.0. If a copy of the MPL was not distributed with this
      3 // file, You can obtain one at http://mozilla.org/MPL/2.0/.
      4 
      5 #![forbid(unsafe_code)]
      6 
      7 use std::collections::HashMap;
      8 use std::os::unix::fs::chown;
      9 use std::path::Path;
     10 use std::process::Command;
     11 
     12 use anyhow::{ensure, Context, Result};
     13 use fs_extra::dir::{move_dir, CopyOptions};
     14 use serde::Deserialize;
     15 
     16 mod config;
     17 mod taskcluster;
     18 
     19 use config::Config;
     20 
     21 fn log_step(msg: &str) {
     22    println!("[build-image] {}", msg);
     23 }
     24 
     25 fn read_image_digest(path: &str) -> Result<String> {
     26    let output = Command::new("/kaniko/skopeo")
     27        .arg("inspect")
     28        .arg(format!("docker-archive:{}", path))
     29        .stdout(std::process::Stdio::piped())
     30        .spawn()?
     31        .wait_with_output()?;
     32    ensure!(
     33        output.status.success(),
     34        format!("Could not inspect parent image: {}", output.status)
     35    );
     36 
     37    #[derive(Deserialize, Debug)]
     38    #[serde(rename_all = "PascalCase")]
     39    struct ImageInfo {
     40        #[serde(skip_serializing_if = "Option::is_none")]
     41        name: Option<String>,
     42        #[serde(skip_serializing_if = "Option::is_none")]
     43        tag: Option<String>,
     44        digest: String,
     45        // ...
     46    }
     47 
     48    let image_info: ImageInfo = serde_json::from_slice(&output.stdout)
     49        .with_context(|| format!("Could parse image info from {:?}", path))?;
     50    Ok(image_info.digest)
     51 }
     52 
     53 fn download_parent_image(
     54    cluster: &taskcluster::TaskCluster,
     55    task_id: &str,
     56    dest: &str,
     57 ) -> Result<String> {
     58    zstd::stream::copy_decode(
     59        cluster.stream_artifact(&task_id, "public/image.tar.zst")?,
     60        std::fs::File::create(dest)?,
     61    )
     62    .context("Could not download parent image.")?;
     63 
     64    read_image_digest(dest)
     65 }
     66 
     67 fn build_image(
     68    context_path: &str,
     69    dest: &str,
     70    debug: bool,
     71    build_args: HashMap<String, String>,
     72 ) -> Result<()> {
     73    let mut command = Command::new("/kaniko/executor");
     74    command
     75        .stderr(std::process::Stdio::inherit())
     76        .args(&["--context", &format!("tar://{}", context_path)])
     77        .args(&["--destination", "image"])
     78        .args(&["--dockerfile", "Dockerfile"])
     79        .args(&["--no-push", "--no-push-cache"])
     80        .args(&[
     81            "--cache=true",
     82            "--cache-dir",
     83            "/workspace/cache",
     84            "--cache-repo",
     85            "oci:/workspace/repo",
     86        ])
     87        .arg("--single-snapshot")
     88        // Compressed caching causes OOM with large images
     89        .arg("--compressed-caching=false")
     90        // FIXME: Generating reproducible layers currently causes OOM.
     91        // .arg("--reproducible")
     92        .arg("--ignore-var-run=false")
     93        .args(&["--tarPath", dest]);
     94    if debug {
     95        command.args(&["-v", "debug"]);
     96    }
     97    for (key, value) in build_args {
     98        command.args(&["--build-arg", &format!("{}={}", key, value)]);
     99    }
    100    let status = command.status()?;
    101    ensure!(
    102        status.success(),
    103        format!("Could not build image: {}", status)
    104    );
    105    Ok(())
    106 }
    107 
    108 fn repack_image(source: &str, dest: &str, image_name: &str) -> Result<()> {
    109    let status = Command::new("/kaniko/skopeo")
    110        .arg("copy")
    111        .arg(format!("docker-archive:{}", source))
    112        .arg(format!("docker-archive:{}:{}", dest, image_name))
    113        .stderr(std::process::Stdio::inherit())
    114        .status()?;
    115    ensure!(
    116        status.success(),
    117        format!("Could not repack image: {}", status)
    118    );
    119    Ok(())
    120 }
    121 
    122 fn main() -> Result<()> {
    123    // Kaniko expects everything to be in /kaniko, so if not running from there, move
    124    // everything there.
    125    if let Some(path) = std::env::current_exe()?.parent() {
    126        if path != Path::new("/kaniko") {
    127            let mut options = CopyOptions::new();
    128            options.copy_inside = true;
    129            move_dir(path, "/kaniko", &options)?;
    130        }
    131    }
    132 
    133    let config = Config::from_env().context("Could not parse environment variables.")?;
    134 
    135    let cluster = taskcluster::TaskCluster::from_env()?;
    136 
    137    let mut build_args = config.docker_build_args;
    138 
    139    build_args.insert("TASKCLUSTER_ROOT_URL".into(), cluster.root_url());
    140 
    141    let output_dir = Path::new("/workspace/out");
    142    if !output_dir.is_dir() {
    143        std::fs::create_dir_all(output_dir)?;
    144    }
    145 
    146    let context_path = Path::new("/workspace/context.tar.gz");
    147    if !context_path.is_file() {
    148        log_step("Downloading context.");
    149 
    150        std::io::copy(
    151            &mut cluster.stream_artifact(&config.context_task_id, &config.context_path)?,
    152            &mut std::fs::File::create(context_path)?,
    153        )
    154        .context("Could not download image context.")?;
    155    } else {
    156        log_step(&format!(
    157            "Using existing context from {}",
    158            context_path.display()
    159        ));
    160    }
    161 
    162    if let Some(parent_task_id) = config.parent_task_id {
    163        let parent_path = Path::new("/workspace/parent.tar");
    164        let digest = if parent_path.is_file() {
    165            log_step(&format!(
    166                "Using existing parent image from {}",
    167                parent_path.display()
    168            ));
    169            read_image_digest(parent_path.to_str().unwrap())?
    170        } else {
    171            log_step("Downloading image.");
    172            download_parent_image(&cluster, &parent_task_id, parent_path.to_str().unwrap())?
    173        };
    174 
    175        log_step(&format!("Parent image digest {}", &digest));
    176        std::fs::create_dir_all("/workspace/cache")?;
    177        std::fs::copy(parent_path, format!("/workspace/cache/{}", digest))?;
    178 
    179        build_args.insert(
    180            "DOCKER_IMAGE_PARENT".into(),
    181            format!("parent:latest@{}", digest),
    182        );
    183    }
    184 
    185    log_step("Building image.");
    186    build_image(
    187        context_path.to_str().unwrap(),
    188        output_dir.join("image-pre.tar").to_str().unwrap(),
    189        config.debug,
    190        build_args,
    191    )?;
    192    log_step("Repacking image.");
    193    repack_image(
    194        output_dir.join("image-pre.tar").to_str().unwrap(),
    195        output_dir.join("image.tar").to_str().unwrap(),
    196        &config.image_name,
    197    )?;
    198 
    199    log_step("Compressing image.");
    200    compress_file(
    201        output_dir.join("image.tar"),
    202        output_dir.join("image.tar.zst"),
    203        config.docker_image_zstd_level,
    204    )?;
    205 
    206    if let Some(owner) = config.chown_output {
    207        log_step(&format!("Changing ownership to {}", owner));
    208        chown_output_files(&owner, output_dir)?;
    209    }
    210 
    211    Ok(())
    212 }
    213 
    214 fn compress_file(
    215    source: impl AsRef<std::path::Path>,
    216    dest: impl AsRef<std::path::Path>,
    217    zstd_level: i32,
    218 ) -> Result<()> {
    219    Ok(zstd::stream::copy_encode(
    220        std::fs::File::open(source)?,
    221        std::fs::File::create(dest)?,
    222        zstd_level,
    223    )?)
    224 }
    225 
    226 fn chown_output_files(owner: &str, output_dir: &Path) -> Result<()> {
    227    let parts: Vec<&str> = owner.split(':').collect();
    228    ensure!(
    229        parts.len() == 2,
    230        "Owner must be in format 'uid:gid', got: {}",
    231        owner
    232    );
    233 
    234    let uid = parts[0]
    235        .parse::<u32>()
    236        .with_context(|| format!("Failed to parse uid: {}", parts[0]))?;
    237    let gid = parts[1]
    238        .parse::<u32>()
    239        .with_context(|| format!("Failed to parse gid: {}", parts[1]))?;
    240 
    241    for entry in std::fs::read_dir(output_dir)? {
    242        let entry = entry?;
    243        let path = entry.path();
    244        chown(&path, Some(uid), Some(gid))
    245            .with_context(|| format!("Failed to chown {}", path.display()))?;
    246    }
    247 
    248    Ok(())
    249 }