repo_name
stringclasses
1 value
path
stringlengths
27
72
content
stringlengths
19
51.7k
license
stringclasses
1 value
rust-bio-tools
./rust-bio-tools/tests/lib.rs
use bio::io::fastq;\nuse rust_htslib::bam;\nuse rust_htslib::bam::Read;\nuse std::fs;\nuse std::process::Command;\n\n/// Compare an output file to the expected output and delete the output file.\nfn test_output(result: &str, expected: &str) {\n assert!(Command::new('cmp')\n .arg(result)\n .arg(expected)\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n fs::remove_file(result).unwrap();\n}\n\n/// Compare two fastq files, ignoring the name lines\n/// Reads are sorted by their sequence, which is not 100% robust\n/// if mutations/ sequencing errors are considered.\nfn compare_fastq(result: &str, expected: &str, strand: bool) {\n let result_reader = fastq::Reader::from_file(result).unwrap();\n let mut result_recs: Vec<fastq::Record> =\n result_reader.records().filter_map(Result::ok).collect();\n result_recs.sort_by_key(|x| x.seq().to_owned());\n let expected_reader = fastq::Reader::from_file(expected).unwrap();\n let mut expected_recs: Vec<fastq::Record> =\n expected_reader.records().filter_map(Result::ok).collect();\n expected_recs.sort_by_key(|x| x.seq().to_owned());\n assert_eq!(result_recs.len(), expected_recs.len());\n for (result, expected) in result_recs.iter().zip(expected_recs.iter()) {\n assert_eq!(result.seq(), expected.seq());\n assert_eq!(result.qual(), expected.qual());\n if strand {\n assert_eq!(result.desc(), expected.desc())\n }\n }\n}\n\nfn compare_bam(result: &str, expected: &str) {\n let mut result_reader = bam::Reader::from_path(result).unwrap();\n let mut result_recs: Vec<bam::Record> =\n result_reader.records().filter_map(Result::ok).collect();\n result_recs.sort_by_key(|x| x.seq().as_bytes());\n let mut expected_reader = bam::Reader::from_path(expected).unwrap();\n let mut expected_recs: Vec<bam::Record> =\n expected_reader.records().filter_map(Result::ok).collect();\n expected_recs.sort_by_key(|x| x.seq().as_bytes());\n for (result, expected) in result_recs.iter().zip(expected_recs.iter()) {\n assert_eq!(result.seq().as_bytes(), expected.seq().as_bytes());\n assert_eq!(result.qual(), expected.qual());\n }\n}\n\n#[test]\nfn fastq_split() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt fastq-split tests/A.fastq tests/B.fastq < tests/test.fastq')\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n test_output('tests/A.fastq', 'tests/expected/A.fastq');\n test_output('tests/B.fastq', 'tests/expected/B.fastq');\n}\n\n#[test]\nfn fastq_filter() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg(\n 'target/debug/rbt fastq-filter tests/ids.txt < tests/test.fastq > tests/filtered.fastq'\n )\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n test_output('tests/filtered.fastq', 'tests/expected/B.fastq');\n}\n\n#[test]\nfn bam_depth() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt bam-depth tests/test.bam < tests/pos.txt > tests/depth.txt')\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n test_output('tests/depth.txt', 'tests/expected/depth.txt');\n}\n\n#[test]\nfn vcf_to_txt() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt vcf-to-txt --genotypes --fmt S --info T X SOMATIC < tests/test.vcf > tests/variant-table.txt')\n .spawn().unwrap().wait().unwrap().success());\n test_output(\n 'tests/variant-table.txt',\n 'tests/expected/variant-table.txt',\n );\n}\n\n#[test]\nfn vcf_to_txt_with_filter() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt vcf-to-txt --genotypes --fmt S --info T X SOMATIC --with-filter < tests/test-with-filter.vcf > tests/variant-table-with-filter.txt')\n .spawn().unwrap().wait().unwrap().success());\n test_output(\n 'tests/variant-table-with-filter.txt',\n 'tests/expected/variant-table-with-filter.txt',\n );\n}\n\n// FIXME: can't work out how to use should_panic macro\n//#[should_panic]\nfn vcf_to_txt_input_info_as_format() {\n assert!(String::from_utf8_lossy(\n &Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt vcf-to-txt --fmt T < tests/test.vcf')\n .output()\n .unwrap()\n .stderr\n )\n .contains(''Unable to find FORMAT \'T\' in the input file! Is \'T\' an INFO tag?''));\n}\n\n#[test]\nfn vcf_match() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt vcf-match -d 50 -l 20 tests/test3.vcf < tests/test2.vcf > tests/matching.bcf')\n .spawn().unwrap().wait().unwrap().success());\n test_output('tests/matching.bcf', 'tests/expected/matching.bcf');\n}\n\n#[test]\nfn vcf_match_same() {\n assert!(Command::new('bash').arg('-c')\n .arg('target/debug/rbt vcf-match -d 50 -l 20 tests/test4.vcf < tests/test4.vcf > tests/matching-same.bcf')\n .spawn().unwrap().wait().unwrap().success());\n test_output(\n 'tests/matching-same.bcf',\n 'tests/expected/matching-same.bcf',\n );\n}\n\n#[test]\nfn vcf_fix_iupac_alleles() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg(\n 'target/debug/rbt vcf-fix-iupac-alleles < tests/test-iupac.vcf > tests/iupac-fixed.bcf'\n )\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n test_output('tests/iupac-fixed.bcf', 'tests/expected/iupac-fixed.bcf');\n}\n\n#[test]\nfn vcf_baf() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt vcf-baf < tests/test-freebayes.vcf > tests/baf.bcf')\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n test_output('tests/baf.bcf', 'tests/expected/baf.bcf');\n}\n\n#[test]\nfn test_vcf_report() {\n assert!(\n Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt vcf-report tests/ref.fa -v a=tests/report-test.vcf -v b=tests/report-test.vcf -b a:tumor=tests/test-report.bam -b b:tumor=tests/test-report.bam -- tests/test-vcf-report')\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success()\n );\n let files1 = vec![\n (\n 'tests/test-vcf-report/indexes/index1.html',\n 'tests/expected/report/indexes/index1.html',\n ),\n (\n 'tests/test-vcf-report/genes/KRAS1.html',\n 'tests/expected/report/genes/KRAS1.html',\n ),\n ];\n\n let files2 = vec![\n (\n 'tests/test-vcf-report/details/a/ENST00000557334_5_c_35G_A.html',\n 'tests/expected/report/details/a/ENST00000557334_5_c_35G_A.html',\n ),\n (\n 'tests/test-vcf-report/details/b/ENST00000557334_5_c_35G_A.html',\n 'tests/expected/report/details/b/ENST00000557334_5_c_35G_A.html',\n ),\n ];\n\n for (result, expected) in files1 {\n // delete line 22 with timestamp and 15 with version\n // this may fail on OS X due to the wrong sed being installed\n assert!(Command::new('bash')\n .arg('-c')\n .arg('sed -i '22d;15d' '.to_owned() + result)\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n test_output(result, expected)\n }\n for (result, expected) in files2 {\n // Delete line 35 with timestamp and 28 with version\n // This may fail on OS X due to the wrong sed being installed\n assert!(Command::new('bash')\n .arg('-c')\n .arg('sed -i '36d;29d' '.to_owned() + result)\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n test_output(result, expected)\n }\n fs::remove_dir_all('tests/test-vcf-report').unwrap();\n}\n\n#[test]\nfn test_csv_report() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt csv-report tests/test_report.csv -- tests/test-csv-report')\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n\n let result = 'tests/test-csv-report/data/index1.js';\n let expected = 'tests/expected/csv-report/data/index1.js';\n test_output(result, expected);\n\n fs::remove_dir_all('tests/test-csv-report').unwrap();\n}\n\n#[test]\nfn test_collapse_reads_to_fragments_two_cluster() {\n assert!(\n Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt collapse-reads-to-fragments fastq --umi-len 3 -u --max-umi-dist 0 --max-seq-dist 2 tests/test-consensus.fastq tests/test-consensus.fastq /tmp/test-consensus.1.fastq /tmp/test-consensus.2.fastq')\n .spawn().unwrap().wait().unwrap().success());\n compare_fastq(\n '/tmp/test-consensus.1.fastq',\n 'tests/expected/test-consensus.1.fastq',\n false,\n );\n compare_fastq(\n '/tmp/test-consensus.2.fastq',\n 'tests/expected/test-consensus.2.fastq',\n false,\n );\n}\n\n#[test]\nfn test_collapse_reads_to_fragments_single_cluster() {\n assert!(\n Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt collapse-reads-to-fragments fastq --umi-len 3 -u --max-umi-dist 2 --max-seq-dist 2 tests/test-consensus.fastq tests/test-consensus.fastq /tmp/test-consensus_single.1.fastq /tmp/test-consensus_single.2.fastq')\n .spawn().unwrap().wait().unwrap().success());\n compare_fastq(\n '/tmp/test-consensus_single.1.fastq',\n 'tests/expected/test-consensus_single.1.fastq',\n false,\n );\n compare_fastq(\n '/tmp/test-consensus_single.2.fastq',\n 'tests/expected/test-consensus_single.2.fastq',\n false,\n );\n}\n\n#[test]\nfn test_collapse_reads_to_fragments_reads() {\n assert!(\n Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt collapse-reads-to-fragments fastq --umi-len 10 --max-umi-dist 0 --max-seq-dist 8 --insert-size 450 --std-dev 50 tests/overlapping-consensus.1.fastq tests/overlapping-consensus.2.fastq /tmp/test_overlapping-consensus.1.fastq /tmp/test_overlapping-consensus.2.fastq /tmp/test_overlapping-consensus.3.fastq')\n .spawn().unwrap().wait().unwrap().success());\n compare_fastq(\n '/tmp/test_overlapping-consensus.1.fastq',\n 'tests/expected/test_overlapping-consensus.1.fastq',\n false,\n );\n compare_fastq(\n '/tmp/test_overlapping-consensus.2.fastq',\n 'tests/expected/test_overlapping-consensus.2.fastq',\n false,\n );\n compare_fastq(\n '/tmp/test_overlapping-consensus.3.fastq',\n 'tests/expected/test_overlapping-consensus.3.fastq',\n false,\n );\n}\n\n#[test]\nfn test_collapse_reads_to_fragments_from_bam() {\n assert!(\n Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt collapse-reads-to-fragments bam tests/overlapping_consensus_marked.bam /tmp/bam_consensus_r1.fq /tmp/bam_consensus_r2.fq /tmp/bam_consensus_se.fq /tmp/overlapping_consensus_mapped.bam')\n .spawn().unwrap().wait().unwrap().success());\n compare_fastq(\n '/tmp/bam_consensus_r1.fq',\n 'tests/expected/bam_consensus_r1.fq',\n true,\n );\n compare_fastq(\n '/tmp/bam_consensus_r2.fq',\n 'tests/expected/bam_consensus_r2.fq',\n true,\n );\n compare_fastq(\n '/tmp/bam_consensus_se.fq',\n 'tests/expected/bam_consensus_se.fq',\n true,\n );\n compare_bam(\n '/tmp/overlapping_consensus_mapped.bam',\n 'tests/expected/overlapping_consensus_mapped.bam',\n );\n}\n\n#[test]\nfn test_vcf_annotate_dgidb() {\n let exec_test = Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt vcf-annotate-dgidb tests/annotate_dgidb_test.vcf | bcftools view - | wc -l').output()\n .expect('failed to execute process');\n assert!(exec_test.status.success());\n assert_eq!(String::from_utf8(exec_test.stdout).unwrap().trim(), '65');\n}\n\n#[test]\nfn test_stats_fasta_file() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt sequence-stats < tests/stats.fasta > /tmp/result.fasta.stats')\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n\n test_output(\n '/tmp/result.fasta.stats',\n 'tests/expected/result.fasta.stats',\n );\n}\n\n#[test]\nfn test_stats_fastq_file() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt sequence-stats -q < tests/stats.fastq > /tmp/result.fastq.stats')\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n\n test_output(\n '/tmp/result.fastq.stats',\n 'tests/expected/result.fastq.stats',\n );\n}\n\n#[test]\nfn test_vcf_split() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt vcf-split tests/test-vcf-split.vcf /tmp/vcf-split1.bcf /tmp/vcf-split2.bcf')\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n}\n\n#[test]\nfn test_vcf_split_chain() {\n assert!(Command::new('bash')\n .arg('-c')\n .arg('target/debug/rbt vcf-split tests/test-vcf-split-chain.vcf /tmp/vcf-split-chain1.bcf /tmp/vcf-split-chain2.bcf')\n .spawn()\n .unwrap()\n .wait()\n .unwrap()\n .success());\n test_output(\n '/tmp/vcf-split-chain1.bcf',\n 'tests/expected/vcf-split-chain1.bcf',\n );\n test_output(\n '/tmp/vcf-split-chain2.bcf',\n 'tests/expected/vcf-split-chain2.bcf',\n );\n}\n
mit
rust-bio-tools
./rust-bio-tools/src/bam/depth.rs
//! Compute the depth of coverage in a BAM file for a list of reference sequences and positions.\n//!\n//! ## Input:\n//! A BAM file and a positions file.\n//! The positions file contains the name of one reference sequence and one position per line (tab separated).\n//! Example:\n//! ```\n//! 16 1\n//! 17 1\n//! 17 2\n//! 17 38\n//! 17 39\n//! ```\n//!\n//! Positions are read from stdin, the BAM file is the first argument.\n//!\n//! ## Output:\n//! Depth are written to stdout as tab-separated lines, similar to the positions input.\n//! Example:\n//! ```\n//! 16 1 0\n//! 17 1 5\n//! 17 2 5\n//! 17 38 14\n//! 17 39 13\n//! ```\n//!\n//! ## Usage:\n//!\n//! ```bash\n//! $ rbt bam-depth tests/test.bam < tests/pos.txt > tests/depth.txt\n//! ```\n//! Where `pos.txt` is a positions file, as described above.\n//!\n//!\nuse anyhow::Result;\nuse log::info;\nuse std::cmp;\nuse std::io;\n\nuse serde::Deserialize;\n\nuse rust_htslib::bam;\nuse rust_htslib::bam::{FetchDefinition, Read};\nuse std::path::Path;\n\n#[derive(Deserialize, Debug)]\nstruct PosRecord {\n chrom: String,\n pos: u32,\n}\n\npub fn depth<P: AsRef<Path>>(\n bam_path: P,\n max_read_length: u32,\n include_flags: u16,\n exclude_flags: u16,\n min_mapq: u8,\n) -> Result<()> {\n let mut bam_reader = bam::IndexedReader::from_path(&bam_path)?;\n let bam_header = bam_reader.header().clone();\n let mut pos_reader = csv::ReaderBuilder::new()\n .has_headers(false)\n .delimiter(b'\t')\n .from_reader(io::stdin());\n let mut csv_writer = csv::WriterBuilder::new()\n .delimiter(b'\t')\n .from_writer(io::BufWriter::new(io::stdout()));\n\n for (i, record) in pos_reader.deserialize().enumerate() {\n let record: PosRecord = record?;\n\n // jump to correct position\n let tid = bam_header.tid(record.chrom.as_bytes()).unwrap() as i32;\n let start = cmp::max(record.pos as i64 - max_read_length as i64 - 1, 0);\n bam_reader.fetch(FetchDefinition::Region(\n tid,\n start as i64,\n start as i64 + (max_read_length * 2) as i64,\n ))?;\n\n // iterate over pileups\n let mut covered = false;\n for pileup in bam_reader.pileup() {\n let pileup = pileup?;\n covered = pileup.pos() == record.pos - 1;\n\n if covered {\n let depth = pileup\n .alignments()\n .filter(|alignment| {\n let record = alignment.record();\n let flags = record.flags();\n (!flags) & include_flags == 0\n && flags & exclude_flags == 0\n && record.mapq() >= min_mapq\n })\n .count();\n\n csv_writer.serialize((&record.chrom, record.pos, depth))?;\n break;\n } else if pileup.pos() > record.pos {\n break;\n }\n }\n if !covered {\n csv_writer.serialize((&record.chrom, record.pos, 0))?;\n }\n\n if (i + 1) % 100 == 0 {\n info!('{} records written.', i + 1);\n }\n }\n Ok(())\n}\n
mit
rust-bio-tools
./rust-bio-tools/src/bam/plot/plot_bam.rs
use crate::bcf::report::table_report::create_report_table::create_report_data;\nuse crate::bcf::report::table_report::create_report_table::manipulate_json;\nuse crate::common::Region;\nuse anyhow::Result;\nuse chrono::{DateTime, Local};\nuse itertools::Itertools;\nuse std::io;\nuse std::io::Write;\nuse std::path::Path;\nuse tera::{Context, Tera};\n\npub(crate) fn plot_bam<P: AsRef<Path> + std::fmt::Debug>(\n bam_paths: &[P],\n fasta_path: P,\n region: &Region,\n max_read_depth: u32,\n) -> Result<()> {\n let mut plots = Vec::new();\n\n let Region { target, start, end } = region.clone();\n for bam_path in bam_paths {\n let content = create_report_data(&fasta_path, None, bam_path, region, max_read_depth)?;\n let visualization = manipulate_json(content, start, end)?;\n\n plots.push(visualization);\n }\n\n let bams = bam_paths\n .iter()\n .map(|b| b.as_ref().iter().last().unwrap().to_str().unwrap())\n .collect_vec();\n\n let mut templates = Tera::default();\n templates.add_raw_template('bam_plot.html.tera', include_str!('bam_plot.html.tera'))?;\n let mut context = Context::new();\n let local: DateTime<Local> = Local::now();\n context.insert('time', &local.format('%a %b %e %T %Y').to_string());\n context.insert('version', &env!('CARGO_PKG_VERSION'));\n context.insert('plots', &plots);\n context.insert('bams', &bams);\n context.insert('chrom', &target);\n context.insert('start', &start);\n context.insert('end', &end);\n\n let html = templates.render('bam_plot.html.tera', &context)?;\n io::stdout().write_all(html.as_bytes())?;\n\n Ok(())\n}\n
mit
rust-bio-tools
./rust-bio-tools/src/bam/plot/mod.rs
pub mod plot_bam;\n
mit
rust-bio-tools
./rust-bio-tools/src/bam/anonymize_reads.rs
use anyhow::Result;\nuse bio::io::fasta;\nuse rand::prelude::{SliceRandom, ThreadRng};\nuse rand::seq::IteratorRandom;\nuse rust_htslib::bam;\nuse rust_htslib::bam::Read;\nuse std::collections::HashMap;\nuse std::ops::Range;\nuse std::path::Path;\nuse uuid::Uuid;\n\npub fn anonymize_reads<P: AsRef<Path> + std::fmt::Debug>(\n bam: P,\n input_ref: P,\n output_bam: P,\n output_ref: P,\n chr: String,\n interval: Range<u64>,\n keep_only_pairs: bool,\n) -> Result<()> {\n let start = interval.start;\n let end = interval.end;\n let mut fasta_reader = fasta::IndexedReader::from_file(&input_ref)?;\n fasta_reader.fetch(&chr, start, end)?;\n let mut reference = Vec::new();\n reference.resize((end - start) as usize, 0);\n fasta_reader.read(&mut reference)?;\n let mut rng = rand::thread_rng();\n let alphabet = [b'A', b'C', b'G', b'T'];\n\n //Build artificial reference\n let mut artificial_reference = Vec::new();\n add_random_bases(end - start, &mut artificial_reference, &mut rng, &alphabet)?;\n let mut altered_bases = init_altered_bases(&reference, &artificial_reference)?;\n let mut fa_writer = fasta::Writer::to_file(output_ref)?;\n let ref_id = Uuid::new_v4().to_hyphenated().to_string();\n fa_writer.write(&ref_id, None, &artificial_reference)?;\n\n let mut bam_reader = bam::IndexedReader::from_path(bam)?;\n bam_reader.fetch((chr.as_bytes(), start, end + 1))?;\n\n let mut header = bam::Header::new();\n header.push_record(\n bam::header::HeaderRecord::new(b'SQ')\n .push_tag(b'SN', &ref_id)\n .push_tag(b'LN', &(end - start)),\n );\n let mut bam_writer = bam::Writer::from_path(output_bam, &header, bam::Format::Bam)?;\n let mate_in_range = |record: &bam::Record| -> bool {\n (record.mtid() == record.tid())\n && (record.mpos() >= (start as i64))\n && (record.mpos() < (end as i64))\n };\n for result in bam_reader.records() {\n let mut record = result?;\n if (record.pos() >= start as i64)\n && (record.cigar().end_pos() < end as i64)\n && (!keep_only_pairs || mate_in_range(&record))\n {\n record.cache_cigar();\n //Check if mate record end within region\n let artificial_seq = if record.is_unmapped() || record.seq_len() == 0 {\n let mut seq = Vec::new();\n add_random_bases(record.seq_len() as u64, &mut seq, &mut rng, &alphabet)?;\n seq\n } else {\n build_sequence(\n &mut altered_bases,\n &record,\n start as usize,\n &mut rng,\n &alphabet,\n )?\n };\n let artificial_record = build_record(&record, &artificial_seq, start as i64)?;\n bam_writer.write(&artificial_record)?;\n }\n }\n Ok(())\n}\n\nfn init_altered_bases(\n original_ref: &[u8],\n artificial_reference: &[u8],\n) -> Result<HashMap<usize, HashMap<u8, u8>>> {\n let mut altered_bases = HashMap::new();\n for (i, (artifical_base, original_base)) in artificial_reference\n .iter()\n .zip(original_ref.iter())\n .enumerate()\n {\n altered_bases\n .entry(i)\n .or_insert_with(HashMap::new)\n .insert(*original_base, *artifical_base);\n }\n Ok(altered_bases)\n}\n\nfn build_record(record: &bam::Record, artificial_seq: &[u8], offset: i64) -> Result<bam::Record> {\n let mut artificial_record = bam::record::Record::new();\n artificial_record.set(\n record.qname(),\n Some(&record.cigar()),\n artificial_seq,\n record.qual(),\n );\n set_mandatory_fields(&mut artificial_record, record, offset)?;\n for aux_result in record.aux_iter() {\n let (tag, aux_field) = aux_result?;\n artificial_record.push_aux(tag, aux_field)?;\n }\n Ok(artificial_record)\n}\n\nfn build_sequence(\n altered_bases: &mut HashMap<usize, HashMap<u8, u8>>,\n record: &bam::Record,\n offset: usize,\n rng: &mut ThreadRng,\n alphabet: &[u8],\n) -> Result<Vec<u8>> {\n let mut artificial_seq = Vec::new();\n let record_seq = record.seq().as_bytes();\n let mut record_pos = 0;\n let mut ref_pos = record.pos() as usize - offset;\n //Create random seq for leading softclips\n for cigar in record.cigar_cached().unwrap().iter() {\n match cigar.char() {\n 'S' => {\n add_random_bases(cigar.len() as u64, &mut artificial_seq, rng, alphabet)?;\n record_pos += cigar.len() as usize;\n }\n 'M' | 'X' | '=' => {\n (0..cigar.len()).for_each(|_| {\n let base_mappings = altered_bases.get(&ref_pos).unwrap().clone();\n let altered_base = *altered_bases\n .get_mut(&ref_pos)\n .unwrap()\n .entry(*record_seq.get(record_pos).unwrap())\n .or_insert_with(|| {\n *alphabet\n .iter()\n .filter(|&x| !base_mappings.values().any(|y| x == y))\n .choose(rng)\n .unwrap()\n });\n artificial_seq.push(altered_base);\n ref_pos += 1;\n record_pos += 1;\n });\n // Add reference bases except for mismatches\n }\n 'I' => {\n add_random_bases(cigar.len() as u64, &mut artificial_seq, rng, alphabet)?;\n record_pos += cigar.len() as usize;\n }\n 'D' | 'N' => {\n ref_pos += cigar.len() as usize;\n }\n _ => {}\n }\n }\n\n Ok(artificial_seq)\n}\n\nfn set_mandatory_fields(\n target_rec: &mut bam::Record,\n source_rec: &bam::Record,\n offset: i64,\n) -> Result<()> {\n target_rec.set_pos(source_rec.pos() - offset);\n target_rec.set_tid(0);\n let (mtid, mpos) = if source_rec.mtid() == -1 {\n (-1, -1)\n } else if source_rec.mtid() == source_rec.tid() {\n (0, source_rec.mpos() - offset)\n } else {\n (1, source_rec.mpos())\n };\n target_rec.set_mtid(mtid);\n target_rec.set_mpos(mpos);\n target_rec.set_flags(source_rec.flags());\n target_rec.set_insert_size(source_rec.insert_size());\n target_rec.set_mapq(source_rec.mapq());\n Ok(())\n}\n\nfn add_random_bases(\n length: u64,\n seq: &mut Vec<u8>,\n rng: &mut ThreadRng,\n alphabet: &[u8],\n) -> Result<()> {\n (0..length).for_each(|_| seq.push(*alphabet.choose(rng).unwrap()));\n Ok(())\n}\n
mit
rust-bio-tools
./rust-bio-tools/src/bam/collapse_reads_to_fragments/calc_consensus.rs
use crate::common::CalcConsensus;\nuse bio::io::fastq;\nuse bio::stats::probs::LogProb;\nuse bio_types::sequence::SequenceRead;\nuse bio_types::sequence::SequenceReadPairOrientation;\nuse derive_new::new;\nuse itertools::Itertools;\nuse rust_htslib::bam;\nuse rust_htslib::bam::record::Aux;\nuse std::collections::{HashMap, HashSet};\nuse std::ops::BitOrAssign;\n\nconst ALLELES: &[u8] = b'ACGT';\n\npub fn get_umi_string(rec: &bam::record::Record) -> String {\n let umi = match rec.aux(b'RX') {\n Ok(Aux::String(value)) => {\n format!(' RX:Z:{}', value)\n }\n _ => String::from(''),\n };\n umi\n}\n\n#[derive(Eq, PartialEq)]\nenum StrandObservation {\n None,\n Forward,\n Reverse,\n Both,\n}\n\nimpl BitOrAssign for StrandObservation {\n fn bitor_assign(&mut self, rhs: Self) {\n if let StrandObservation::None = self {\n *self = rhs;\n } else if *self != rhs {\n *self = StrandObservation::Both;\n }\n }\n}\n\n#[derive(new)]\npub struct CalcOverlappingConsensus<'a> {\n recs1: &'a [bam::Record],\n recs2: &'a [bam::Record],\n r1_vec: &'a [bool],\n r2_vec: &'a [bool],\n seqids: &'a [usize],\n uuid: &'a str,\n read_ids: &'a mut Option<HashMap<usize, Vec<u8>>>,\n}\n\nimpl<'a> CalcOverlappingConsensus<'a> {\n pub fn calc_consensus(&self) -> (fastq::Record, LogProb) {\n let seq_len = self.r1_vec().len();\n let mut consensus_seq: Vec<u8> = Vec::with_capacity(seq_len);\n let mut consensus_qual: Vec<u8> = Vec::with_capacity(seq_len);\n let mut consensus_strand = b'SI:Z:'.to_vec();\n let read_orientations_opt = self.build_read_orientation_string();\n let mut consensus_lh = LogProb::ln_one();\n for i in 0..seq_len {\n match (\n self.recs1().len() == 1,\n self.map_read_pos(i, self.r1_vec()),\n self.map_read_pos(i, self.r2_vec()),\n ) {\n (true, Some(base_pos), None) => {\n let base = self.recs1()[0].seq().as_bytes()[base_pos];\n consensus_seq.push(base);\n consensus_qual.push(self.recs1()[0].qual()[base_pos] + 33);\n consensus_lh += Self::overall_allele_likelihood(self, &base, i);\n }\n (true, None, Some(base_pos)) => {\n let base = self.recs2()[0].seq().as_bytes()[base_pos];\n consensus_seq.push(base);\n consensus_qual.push(self.recs2()[0].qual()[base_pos] + 33);\n consensus_lh += Self::overall_allele_likelihood(self, &base, i);\n }\n _ => {\n let likelihoods = ALLELES\n .iter()\n .map(|a| Self::overall_allele_likelihood(self, a, i))\n .collect_vec();\n Self::build_consensus_sequence(\n likelihoods,\n &mut consensus_lh,\n &mut consensus_seq,\n &mut consensus_qual,\n 33.0,\n );\n }\n };\n self.build_consensus_strand(&mut consensus_strand, consensus_seq[i], i);\n }\n let name = if self.read_ids.is_some() {\n Self::build_verbose_read_name(self.uuid(), self.seqids(), self.read_ids)\n } else {\n format!(\n '{}_consensus-read-from:{}_reads',\n self.uuid(),\n self.seqids().len(),\n )\n };\n if let Some(mut read_orientations) = read_orientations_opt {\n consensus_strand.append(&mut read_orientations)\n }\n let umi = get_umi_string(&self.recs1()[0]);\n let description = format!('{}{}', String::from_utf8(consensus_strand).unwrap(), umi);\n let consensus_rec =\n fastq::Record::with_attrs(&name, Some(&description), &consensus_seq, &consensus_qual);\n (consensus_rec, consensus_lh)\n }\n\n fn recs1(&self) -> &[bam::Record] {\n self.recs1\n }\n\n fn recs2(&self) -> &[bam::Record] {\n self.recs2\n }\n\n fn r1_vec(&self) -> &[bool] {\n self.r1_vec\n }\n\n fn r2_vec(&self) -> &[bool] {\n self.r2_vec\n }\n\n fn build_consensus_strand(&self, consensus_strand: &mut Vec<u8>, ref_base: u8, pos: usize) {\n let mut strand = StrandObservation::None;\n let rec1_pos = self.map_read_pos(pos, self.r1_vec());\n let rec2_pos = self.map_read_pos(pos, self.r2_vec());\n let mut strand_observation = |recs: &[bam::Record], rec_pos: Option<usize>| {\n if let Some(pos) = rec_pos {\n recs.iter().for_each(|rec| {\n if rec.base(pos) == ref_base {\n match rec.is_reverse() {\n true => strand |= StrandObservation::Reverse,\n false => strand |= StrandObservation::Forward,\n };\n }\n });\n }\n };\n strand_observation(self.recs1(), rec1_pos);\n strand_observation(self.recs2(), rec2_pos);\n match strand {\n StrandObservation::Forward => consensus_strand.push(b'+'),\n StrandObservation::Reverse => consensus_strand.push(b'-'),\n StrandObservation::Both => consensus_strand.push(b'*'),\n StrandObservation::None => consensus_strand.push(b'.'),\n }\n }\n fn build_read_orientation_string(&self) -> Option<Vec<u8>> {\n let mut read_orientations_set: HashSet<_> = self\n .recs1()\n .iter()\n .filter_map(|rec| match rec.read_pair_orientation() {\n SequenceReadPairOrientation::F2F1 => Some(b'F2F1,'),\n SequenceReadPairOrientation::F2R1 => Some(b'F2R1,'),\n SequenceReadPairOrientation::F1F2 => Some(b'F1F2,'),\n SequenceReadPairOrientation::R2F1 => Some(b'R2F1,'),\n SequenceReadPairOrientation::F1R2 => Some(b'F1R2,'),\n SequenceReadPairOrientation::R2R1 => Some(b'R2R1,'),\n SequenceReadPairOrientation::R1F2 => Some(b'R1F2,'),\n SequenceReadPairOrientation::R1R2 => Some(b'R1R2,'),\n SequenceReadPairOrientation::None => None,\n })\n .collect();\n let mut read_orientations_string = b' RO:Z:'.to_vec();\n read_orientations_set\n .drain()\n .for_each(|entry| read_orientations_string.extend_from_slice(entry));\n match read_orientations_string.pop() {\n Some(b',') => Some(read_orientations_string),\n Some(b':') => None,\n Some(_) => unreachable!(),\n None => unreachable!(),\n }\n }\n fn map_read_pos(&self, consensus_pos: usize, alignment_vec: &[bool]) -> Option<usize> {\n match alignment_vec[consensus_pos] {\n true => Some(\n alignment_vec[0..(consensus_pos + 1)]\n .iter()\n .filter(|&v| *v)\n .count()\n - 1,\n ),\n false => None,\n }\n }\n}\n\nimpl<'a> CalcConsensus<'a, bam::Record> for CalcOverlappingConsensus<'a> {\n fn overall_allele_likelihood(&self, allele: &u8, pos: usize) -> LogProb {\n let mut lh = LogProb::ln_one();\n let rec1_pos = self.map_read_pos(pos, self.r1_vec());\n let rec2_pos = self.map_read_pos(pos, self.r2_vec());\n for (rec1, rec2) in self.recs1().iter().zip(self.recs2()) {\n if let Some(pos) = rec1_pos {\n lh += Self::allele_likelihood_in_rec(\n allele,\n &rec1.seq().as_bytes(),\n rec1.qual(),\n pos,\n 0,\n );\n };\n if let Some(pos) = rec2_pos {\n lh += Self::allele_likelihood_in_rec(\n allele,\n &rec2.seq().as_bytes(),\n rec2.qual(),\n pos,\n 0,\n );\n };\n }\n lh\n }\n\n fn seqids(&self) -> &'a [usize] {\n self.seqids\n }\n\n fn uuid(&self) -> &'a str {\n self.uuid\n }\n}\n\n#[derive(new)]\npub struct CalcNonOverlappingConsensus<'a> {\n recs: &'a [bam::Record],\n seqids: &'a [usize],\n uuid: &'a str,\n read_ids: &'a mut Option<HashMap<usize, Vec<u8>>>,\n}\n\nimpl<'a> CalcNonOverlappingConsensus<'a> {\n pub fn calc_consensus(&self) -> (fastq::Record, LogProb) {\n let seq_len = self.recs()[0].seq().len();\n let mut consensus_seq: Vec<u8> = Vec::with_capacity(seq_len);\n let mut consensus_qual: Vec<u8> = Vec::with_capacity(seq_len);\n let mut consensus_strand = b'SI:Z:'.to_vec();\n let mut cigar_map = HashMap::new();\n for record in self.recs() {\n let cached_cigar = record.raw_cigar();\n if !cigar_map.contains_key(cached_cigar) {\n cigar_map.insert(cached_cigar, Vec::new());\n }\n cigar_map.get_mut(cached_cigar).unwrap().push(record);\n }\n\n // Potential workflow for different read lengths\n // compute consensus of all reads with max len\n // compute offset of all shorter reads\n // pad shorter reads\n // drop first consensus, compute consensus of full length reads and padded reads\n // ignore padded bases for consensus computation\n\n let mut consensus_lh = LogProb::ln_one();\n\n for i in 0..seq_len {\n // Maximum a-posteriori estimate for the consensus base.\n // Find the allele (theta \in ACGT) with the highest likelihood\n // given the bases at this position, weighted with their quality values\n let likelihoods = ALLELES\n .iter()\n .map(|a| Self::overall_allele_likelihood(self, a, i))\n .collect_vec(); //Check this. See below\n Self::build_consensus_sequence(\n likelihoods,\n &mut consensus_lh,\n &mut consensus_seq,\n &mut consensus_qual,\n 33.0,\n );\n self.build_consensus_strand(&mut consensus_strand, consensus_seq[i], i);\n }\n let name = if self.read_ids.is_some() {\n Self::build_verbose_read_name(self.uuid(), self.seqids(), self.read_ids)\n } else {\n format!(\n '{}_consensus-read-from:{}_reads',\n self.uuid(),\n self.seqids().len(),\n )\n };\n let umi = get_umi_string(&self.recs()[0]);\n let description = format!('{}{}', String::from_utf8(consensus_strand).unwrap(), umi);\n let consensus_rec =\n fastq::Record::with_attrs(&name, Some(&description), &consensus_seq, &consensus_qual);\n (consensus_rec, consensus_lh)\n }\n pub fn recs(&self) -> &[bam::Record] {\n self.recs\n }\n fn build_consensus_strand(\n &self,\n consensus_strand: &mut Vec<u8>,\n ref_base: u8,\n current_pos: usize,\n ) {\n let mut strand = StrandObservation::None;\n self.recs().iter().for_each(|rec| {\n if rec.base(current_pos) == ref_base {\n match rec.is_reverse() {\n true => strand |= StrandObservation::Reverse,\n false => strand |= StrandObservation::Forward,\n };\n }\n });\n match strand {\n StrandObservation::Forward => consensus_strand.push(b'+'),\n StrandObservation::Reverse => consensus_strand.push(b'-'),\n StrandObservation::Both => consensus_strand.push(b'*'),\n StrandObservation::None => consensus_strand.push(b'.'),\n }\n }\n}\n\nimpl<'a> CalcConsensus<'a, bam::Record> for CalcNonOverlappingConsensus<'a> {\n fn overall_allele_likelihood(&self, allele: &u8, i: usize) -> LogProb {\n let mut lh = LogProb::ln_one(); // posterior: log(P(theta)) = 1\n for rec in self.recs() {\n lh += Self::allele_likelihood_in_rec(allele, &rec.seq().as_bytes(), rec.qual(), i, 0);\n }\n lh\n }\n fn seqids(&self) -> &'a [usize] {\n self.seqids\n }\n fn uuid(&self) -> &'a str {\n self.uuid\n }\n}\n
mit
rust-bio-tools
./rust-bio-tools/src/bam/collapse_reads_to_fragments/pipeline.rs
use super::calc_consensus::{CalcNonOverlappingConsensus, CalcOverlappingConsensus};\nuse super::unmark_record;\nuse anyhow::Result;\nuse bio::io::fastq;\nuse derive_new::new;\nuse rust_htslib::bam;\nuse rust_htslib::bam::record::Aux;\nuse rust_htslib::bam::Read;\nuse std::cmp::Ordering;\nuse std::collections::{BTreeMap, HashMap, HashSet};\nuse std::io;\nuse std::ops::Deref;\nuse std::ops::DerefMut;\nuse uuid::Uuid;\n\n#[derive(new)]\npub struct CallConsensusRead<W: io::Write> {\n bam_reader: bam::Reader,\n fq1_writer: fastq::Writer<W>,\n fq2_writer: fastq::Writer<W>,\n fq_se_writer: fastq::Writer<W>,\n bam_skipped_writer: bam::Writer,\n verbose_read_names: bool,\n}\n\ntype Position = i64;\ntype GroupIDs = HashSet<GroupId>;\ntype RecordIDs = Vec<RecordId>;\n\n#[derive(Hash, PartialEq, Eq, Debug)]\npub enum RecordId {\n Regular(Vec<u8>),\n Split(Vec<u8>),\n}\n\n#[derive(Hash, PartialEq, Eq, Clone, Debug)]\npub enum GroupId {\n Regular(u32),\n Split(u32),\n}\n\n#[derive(new, Debug)]\npub struct GroupEndIndex {\n #[new(default)]\n group_pos: HashMap<GroupId, Position>,\n #[new(default)]\n group_end_idx: BTreeMap<Position, GroupIDs>,\n}\n\nimpl GroupEndIndex {\n ///Inserts a new group id at given position\n ///If position is already saved for the group id the group-end-index will be updated\n pub fn insert(&mut self, group_id: GroupId, end_pos: i64) -> Result<()> {\n let update_end_pos = match self.group_pos.get(&group_id) {\n Some(&current_end_pos) => match current_end_pos < end_pos {\n true => {\n self.group_end_idx\n .get_mut(&current_end_pos)\n .map(|group_ids| group_ids.remove(&group_id));\n true\n }\n false => false,\n },\n None => true,\n };\n if update_end_pos {\n self.group_pos.insert(group_id.clone(), end_pos);\n self.group_end_idx\n .entry(end_pos)\n .or_insert_with(HashSet::new)\n .insert(group_id);\n }\n Ok(())\n }\n\n pub fn cut_lower_group_ids(&mut self, current_pos: Option<i64>) -> Result<Vec<GroupId>> {\n let group_ids: Vec<GroupId> = self\n .group_end_idx\n .range(\n ..current_pos.unwrap_or(\n self.group_end_idx\n .iter()\n .next_back()\n .map_or(0, |(entry, _)| *entry)\n + 1,\n ),\n )\n .flat_map(|(_, group_ids)| group_ids.clone())\n .collect();\n group_ids.iter().for_each(|group_id| {\n self.group_pos.remove(group_id);\n });\n match current_pos {\n Some(pos) => self.group_end_idx = self.group_end_idx.split_off(&pos),\n None => self.group_end_idx.clear(),\n }\n Ok(group_ids)\n }\n}\n\nimpl<W: io::Write> CallConsensusRead<W> {\n pub fn call_consensus_reads(&mut self) -> Result<()> {\n let mut group_end_idx = GroupEndIndex::new();\n let mut duplicate_groups: HashMap<GroupId, RecordIDs> = HashMap::new();\n let mut record_storage: HashMap<RecordId, RecordStorage> = HashMap::new();\n let mut current_chrom = None;\n let mut read_ids: Option<HashMap<usize, Vec<u8>>> = if self.verbose_read_names {\n Some(HashMap::new())\n } else {\n None\n };\n for (i, result) in self.bam_reader.records().enumerate() {\n let mut record = result?;\n if !record.is_unmapped() {\n let mut record_pos = None;\n match current_chrom == Some(record.tid()) {\n true => record_pos = Some(record.pos()),\n false => current_chrom = Some(record.tid()),\n }\n //Process completed duplicate groups\n calc_consensus_complete_groups(\n &mut group_end_idx,\n &mut duplicate_groups,\n record_pos,\n &mut record_storage,\n &mut self.fq1_writer,\n &mut self.fq2_writer,\n &mut self.fq_se_writer,\n &mut self.bam_skipped_writer,\n &mut read_ids,\n )?;\n }\n if record.is_unmapped() || record.is_mate_unmapped() {\n unmark_record(&mut record)?;\n self.bam_skipped_writer.write(&record)?;\n continue;\n }\n if record.is_supplementary() {\n //TODO Supplementary Alignment\n continue;\n }\n record.cache_cigar();\n let duplicate_id_option = match record.aux(b'DI') {\n Ok(Aux::I8(duplicate_id)) => Some(duplicate_id as u32),\n Ok(Aux::I16(duplicate_id)) => Some(duplicate_id as u32),\n Ok(Aux::I32(duplicate_id)) => Some(duplicate_id as u32),\n Ok(Aux::U8(duplicate_id)) => Some(duplicate_id as u32),\n Ok(Aux::U16(duplicate_id)) => Some(duplicate_id as u32),\n Ok(Aux::U32(duplicate_id)) => Some(duplicate_id),\n Err(_) => None,\n _ => unreachable!('Invalid type for tag 'DI''),\n };\n let record_name = record.qname();\n read_ids.as_mut().map(|x| x.insert(i, record_name.to_vec()));\n //Check if record has duplicate ID\n match duplicate_id_option {\n //Case: duplicate ID exists\n Some(duplicate_id) => {\n let regular_id = RecordId::Regular(record_name.to_owned());\n let record_end_pos = record.cigar_cached().unwrap().end_pos() - 1;\n match record_storage.get_mut(&regular_id) {\n //Case: Right record\n Some(storage_entry) => {\n //For right record save end position and duplicate group ID\n let group_id_opt = match storage_entry {\n RecordStorage::PairedRecords {\n ref mut r1_rec,\n ref mut r2_rec,\n } => {\n let group_id = if cigar_has_softclips(r1_rec)\n || cigar_has_softclips(&record)\n {\n unmark_record(r1_rec)?;\n self.bam_skipped_writer.write(r1_rec)?;\n unmark_record(&mut record)?;\n self.bam_skipped_writer.write(&record)?;\n None\n } else {\n duplicate_groups\n .entry(GroupId::Regular(duplicate_id))\n .or_insert_with(Vec::new)\n .push(RecordId::Regular(record_name.to_owned()));\n r2_rec.get_or_insert(IndexedRecord {\n rec: record,\n rec_id: i,\n });\n Some(GroupId::Regular(duplicate_id))\n };\n group_id\n }\n // This arm is reached if a mate is mapped to another chromosome.\n // In that case a new duplicate and record ID is required\n RecordStorage::SingleRecord { rec } => {\n let group_id = if cigar_has_softclips(rec)\n || cigar_has_softclips(&record)\n {\n unmark_record(rec)?;\n self.bam_skipped_writer.write(rec)?;\n unmark_record(&mut record)?;\n self.bam_skipped_writer.write(&record)?;\n None\n } else {\n duplicate_groups\n .entry(GroupId::Split(duplicate_id))\n .or_insert_with(Vec::new)\n .push(RecordId::Split(record_name.to_owned()));\n record_storage.insert(\n RecordId::Split(record_name.to_owned()),\n RecordStorage::SingleRecord {\n rec: IndexedRecord {\n rec: record,\n rec_id: i,\n },\n },\n );\n Some(GroupId::Split(duplicate_id))\n };\n group_id\n }\n };\n if let Some(group_id) = group_id_opt {\n group_end_idx.insert(group_id, record_end_pos)?;\n } else {\n record_storage.remove(&regular_id);\n };\n }\n //Case: Left record or record w/o mate\n None => {\n if !record.is_paired() {\n //If right or single record save end position and duplicate group ID\n if cigar_has_softclips(&record) {\n unmark_record(&mut record)?;\n self.bam_skipped_writer.write(&record)?;\n } else {\n duplicate_groups\n .entry(GroupId::Regular(duplicate_id))\n .or_insert_with(Vec::new)\n .push(RecordId::Regular(record_name.to_owned()));\n\n group_end_idx\n .insert(GroupId::Regular(duplicate_id), record_end_pos)?;\n record_storage.insert(\n RecordId::Regular(record_name.to_owned()),\n RecordStorage::SingleRecord {\n rec: IndexedRecord {\n rec: record,\n rec_id: i,\n },\n },\n );\n }\n } else {\n record_storage.insert(\n RecordId::Regular(record_name.to_owned()),\n RecordStorage::PairedRecords {\n r1_rec: IndexedRecord {\n rec: record,\n rec_id: i,\n },\n r2_rec: None,\n },\n );\n }\n }\n }\n }\n //Duplicate ID not existing\n //Record is written to bam file if it or its mate is unmapped\n //If record is right mate consensus is calculated\n //Else record is added to hashMap\n None => {\n match record_storage.get_mut(&RecordId::Regular(record_name.to_owned())) {\n //Case: Left record\n None => {\n if !record.is_paired() || record.tid() != record.mtid() {\n unmark_record(&mut record)?;\n self.bam_skipped_writer.write(&record)?;\n } else {\n record_storage.insert(\n RecordId::Regular(record_name.to_owned()),\n RecordStorage::PairedRecords {\n r1_rec: IndexedRecord {\n rec: record,\n rec_id: i,\n },\n r2_rec: None,\n },\n );\n }\n }\n //Case: Left record already stored\n Some(_record_pair) => {\n let (rec_id, mut l_rec) = match record_storage\n .remove(&RecordId::Regular(record_name.to_owned()))\n .unwrap()\n {\n RecordStorage::PairedRecords { r1_rec, .. } => {\n (r1_rec.rec_id, r1_rec.into_rec())\n }\n RecordStorage::SingleRecord { .. } => unreachable!(),\n };\n if cigar_has_softclips(&l_rec) || cigar_has_softclips(&record) {\n unmark_record(&mut l_rec)?;\n self.bam_skipped_writer.write(&l_rec)?;\n unmark_record(&mut record)?;\n self.bam_skipped_writer.write(&record)?;\n } else {\n let alignment_vectors = calc_read_alignments(&l_rec, &record);\n match alignment_vectors {\n Some((r1_alignment, r2_alignment)) => {\n let uuid = &Uuid::new_v4().to_hyphenated().to_string();\n\n self.fq_se_writer.write_record(\n &CalcOverlappingConsensus::new(\n &[l_rec],\n &[record],\n &r1_alignment,\n &r2_alignment,\n &[rec_id, i],\n uuid,\n &mut read_ids,\n )\n .calc_consensus()\n .0,\n )?;\n }\n None => {\n unmark_record(&mut l_rec)?;\n self.bam_skipped_writer.write(&l_rec)?;\n unmark_record(&mut record)?;\n self.bam_skipped_writer.write(&record)?;\n }\n };\n }\n }\n }\n }\n }\n }\n //Process remaining groups\n calc_consensus_complete_groups(\n &mut group_end_idx,\n &mut duplicate_groups,\n None,\n &mut record_storage,\n &mut self.fq1_writer,\n &mut self.fq2_writer,\n &mut self.fq_se_writer,\n &mut self.bam_skipped_writer,\n &mut read_ids,\n )?;\n Ok(())\n }\n}\n\n#[allow(clippy::too_many_arguments)]\npub fn calc_consensus_complete_groups<'a, W: io::Write>(\n group_end_idx: &mut GroupEndIndex,\n duplicate_groups: &mut HashMap<GroupId, RecordIDs>,\n end_pos: Option<i64>,\n record_storage: &mut HashMap<RecordId, RecordStorage>,\n fq1_writer: &'a mut fastq::Writer<W>,\n fq2_writer: &'a mut fastq::Writer<W>,\n fq_se_writer: &'a mut fastq::Writer<W>,\n bam_skipped_writer: &'a mut bam::Writer,\n read_ids: &'a mut Option<HashMap<usize, Vec<u8>>>,\n) -> Result<()> {\n let group_ids = group_end_idx.cut_lower_group_ids(end_pos)?;\n for group_id in group_ids {\n let cigar_groups =\n group_reads_by_cigar(duplicate_groups.remove(&group_id).unwrap(), record_storage)?;\n for cigar_group in cigar_groups.values() {\n match cigar_group {\n CigarGroup::PairedRecords {\n r1_recs,\n r2_recs,\n r1_seqids,\n r2_seqids,\n } => {\n let alignment_vectors = calc_read_alignments(&r1_recs[0], &r2_recs[0]);\n match alignment_vectors {\n Some((r1_alignment, r2_alignment)) => {\n let uuid = &Uuid::new_v4().to_hyphenated().to_string();\n let mut seqids = r1_seqids.clone();\n seqids.append(&mut r2_seqids.clone());\n fq_se_writer.write_record(\n &CalcOverlappingConsensus::new(\n r1_recs,\n r2_recs,\n &r1_alignment,\n &r2_alignment,\n &seqids,\n uuid,\n read_ids,\n )\n .calc_consensus()\n .0,\n )?;\n }\n None => {\n // If reads do not overlap or CIGAR in overlapping region differs R1 and R2 are handled sepperatly\n if r1_recs.len() > 1 {\n let uuid = &Uuid::new_v4().to_hyphenated().to_string();\n fq1_writer.write_record(\n &CalcNonOverlappingConsensus::new(\n r1_recs, r1_seqids, uuid, read_ids,\n )\n .calc_consensus()\n .0,\n )?;\n fq2_writer.write_record(\n &CalcNonOverlappingConsensus::new(\n r2_recs, r2_seqids, uuid, read_ids,\n )\n .calc_consensus()\n .0,\n )?;\n } else {\n let mut r1_rec = r1_recs[0].clone();\n unmark_record(&mut r1_rec)?;\n bam_skipped_writer.write(&r1_rec)?;\n let mut r2_rec = r2_recs[0].clone();\n unmark_record(&mut r2_rec)?;\n bam_skipped_writer.write(&r2_rec)?;\n }\n }\n };\n }\n CigarGroup::SingleRecords { recs, seqids } => match recs.len().cmp(&1) {\n Ordering::Greater => {\n let uuid = &Uuid::new_v4().to_hyphenated().to_string();\n fq_se_writer.write_record(\n &CalcNonOverlappingConsensus::new(recs, seqids, uuid, read_ids)\n .calc_consensus()\n .0,\n )?;\n }\n _ => {\n let mut rec = recs[0].clone();\n unmark_record(&mut rec)?;\n bam_skipped_writer.write(&rec)?;\n }\n },\n }\n }\n }\n Ok(())\n}\n\nfn group_reads_by_cigar(\n record_ids: Vec<RecordId>,\n record_storage: &mut HashMap<RecordId, RecordStorage>,\n) -> Result<HashMap<Cigar, CigarGroup>> {\n let mut cigar_groups: HashMap<Cigar, CigarGroup> = HashMap::new();\n for rec_id in record_ids {\n let storage_entry = record_storage.remove(&rec_id).unwrap();\n storage_entry.add_to_group(&mut cigar_groups)?;\n }\n Ok(cigar_groups)\n}\n\nfn calc_read_alignments(\n r1_rec: &bam::Record,\n r2_rec: &bam::Record,\n) -> Option<(Vec<bool>, Vec<bool>)> {\n let r1_start = r1_rec.pos();\n let r1_end = r1_rec.cigar_cached().unwrap().end_pos();\n let r2_start = r2_rec.pos();\n let r2_end = r1_rec.cigar_cached().unwrap().end_pos();\n if r1_rec.tid() != r2_rec.tid() {\n None\n } else if r1_start <= r2_start {\n //Check if reads overlap\n if r1_end >= r2_start {\n let offset = r2_start - r1_start;\n calc_alignment_vectors(offset, r1_rec, r2_rec)\n } else {\n //Reads do not overlap\n None\n }\n } else {\n //R2 starts before R1\n if r2_end >= r1_start {\n let offset = r1_start - r2_start;\n calc_alignment_vectors(offset, r2_rec, r1_rec)\n } else {\n None\n }\n }\n}\n\nfn calc_alignment_vectors(\n mut offset: i64,\n r1_rec: &bam::Record,\n r2_rec: &bam::Record,\n) -> Option<(Vec<bool>, Vec<bool>)> {\n let mut r1_vec = Vec::new();\n let mut r2_vec = Vec::new();\n let mut r1_cigarstring = r1_rec\n .cigar_cached()\n .unwrap()\n .iter()\n .flat_map(|cigar| vec![cigar.char(); cigar.len() as usize])\n .collect::<Vec<char>>()\n .into_iter();\n let mut r2_cigarstring = r2_rec\n .cigar_cached()\n .unwrap()\n .iter()\n .flat_map(|cigar| vec![cigar.char(); cigar.len() as usize])\n .collect::<Vec<char>>()\n .into_iter();\n let mut r1_cigar = r1_cigarstring.next();\n let mut r2_cigar = match offset == 0 {\n true => r2_cigarstring.next(),\n false => None,\n };\n let mut intersection_entry_passed = false;\n loop {\n if r2_cigar == None {\n match r1_cigar {\n None => break,\n Some('M') | Some('X') | Some('=') | Some('D') | Some('N') => {\n offset -= 1;\n }\n Some('S') => unreachable!(),\n Some(_) => {}\n }\n match_single_cigar(&r1_cigar, &mut r1_vec, &mut r2_vec);\n r1_cigar = r1_cigarstring.next();\n if offset == 0 {\n r2_cigar = r2_cigarstring.next();\n }\n } else if r1_cigar == None {\n match_single_cigar(&r2_cigar, &mut r2_vec, &mut r1_vec);\n r2_cigar = r2_cigarstring.next();\n } else if r1_cigar != r2_cigar {\n if !intersection_entry_passed && r1_cigar == Some('I') {\n r1_vec.push(true);\n r2_vec.push(false);\n r1_cigar = r1_cigarstring.next();\n } else {\n return None;\n }\n } else {\n intersection_entry_passed = true; // Can this me somehow only be called once?!\n match (r1_cigar, r2_cigar) {\n (Some('M'), Some('M'))\n | (Some('X'), Some('X'))\n | (Some('='), Some('='))\n | (Some('I'), Some('I')) => {\n r1_vec.push(true);\n r2_vec.push(true);\n r1_cigar = r1_cigarstring.next();\n r2_cigar = r2_cigarstring.next();\n }\n (Some('D'), Some('D')) | (Some('H'), Some('H')) => {\n r1_cigar = r1_cigarstring.next();\n r2_cigar = r2_cigarstring.next();\n }\n (None, None) | (None, Some(_)) | (Some(_), None) | (Some(_), Some(_)) => {\n unreachable!()\n }\n };\n }\n }\n Some((r1_vec, r2_vec))\n}\n\nfn cigar_has_softclips(rec: &bam::Record) -> bool {\n for cigar_operation in rec.cigar_cached().unwrap().iter() {\n if let bam::record::Cigar::SoftClip(_) = cigar_operation {\n return true;\n }\n }\n false\n}\n\nfn match_single_cigar(cigar: &Option<char>, first_vec: &mut Vec<bool>, second_vec: &mut Vec<bool>) {\n match cigar {\n Some('M') | Some('S') | Some('X') | Some('=') | Some('I') => {\n first_vec.push(true);\n second_vec.push(false);\n }\n Some(_) | None => {}\n };\n}\n\npub enum RecordStorage {\n PairedRecords {\n r1_rec: IndexedRecord,\n r2_rec: Option<IndexedRecord>,\n },\n SingleRecord {\n rec: IndexedRecord,\n },\n}\n\nimpl RecordStorage {\n fn add_to_group(self, cigar_groups: &mut HashMap<Cigar, CigarGroup>) -> Result<()> {\n let (r1_rec_entry, r1_rec_id, r2_rec_entry, r2_rec_id, cigar_tuple) = match self {\n RecordStorage::PairedRecords { r1_rec, r2_rec } => {\n let r1_rec_id = r1_rec.rec_id;\n let r1_rec_entry = r1_rec.into_rec();\n let r2_rec_unwrapped = r2_rec.unwrap();\n let r2_rec_id = r2_rec_unwrapped.rec_id;\n let r2_rec_entry = r2_rec_unwrapped.into_rec();\n let cigar_tuple = Cigar::Tuple {\n r1_cigar: r1_rec_entry.raw_cigar().to_vec(),\n r2_cigar: r2_rec_entry.raw_cigar().to_vec(),\n };\n if !cigar_groups.contains_key(&cigar_tuple) {\n cigar_groups.insert(\n cigar_tuple.clone(),\n CigarGroup::PairedRecords {\n r1_recs: Vec::new(),\n r2_recs: Vec::new(),\n r1_seqids: Vec::new(),\n r2_seqids: Vec::new(),\n },\n );\n }\n (\n r1_rec_entry,\n r1_rec_id,\n Some(r2_rec_entry),\n Some(r2_rec_id),\n cigar_tuple,\n )\n }\n RecordStorage::SingleRecord { rec } => {\n let rec_id = rec.rec_id;\n let rec_entry = rec.into_rec();\n let cigar_single = Cigar::Single {\n cigar: rec_entry.raw_cigar().to_vec(),\n };\n if !cigar_groups.contains_key(&cigar_single) {\n cigar_groups.insert(\n cigar_single.clone(),\n CigarGroup::SingleRecords {\n recs: Vec::new(),\n seqids: Vec::new(),\n },\n );\n }\n (rec_entry, rec_id, None, None, cigar_single)\n }\n };\n match cigar_groups.get_mut(&cigar_tuple) {\n Some(CigarGroup::PairedRecords {\n r1_recs,\n r2_recs,\n r1_seqids,\n r2_seqids,\n }) => {\n r1_recs.push(r1_rec_entry);\n r2_recs.push(r2_rec_entry.unwrap());\n r1_seqids.push(r1_rec_id);\n r2_seqids.push(r2_rec_id.unwrap());\n }\n Some(CigarGroup::SingleRecords { recs, seqids }) => {\n recs.push(r1_rec_entry);\n seqids.push(r1_rec_id);\n }\n None => unreachable!(),\n }\n\n Ok(())\n }\n}\n\npub struct IndexedRecord {\n rec: bam::Record,\n rec_id: usize,\n}\n\nimpl IndexedRecord {\n fn into_rec(self) -> bam::Record {\n self.rec\n }\n}\n\nimpl Deref for IndexedRecord {\n type Target = bam::Record;\n fn deref(&self) -> &bam::Record {\n &self.rec\n }\n}\n\nimpl DerefMut for IndexedRecord {\n fn deref_mut(&mut self) -> &mut Self::Target {\n &mut self.rec\n }\n}\n\npub enum CigarGroup {\n PairedRecords {\n r1_recs: Vec<bam::Record>,\n r2_recs: Vec<bam::Record>,\n r1_seqids: Vec<usize>,\n r2_seqids: Vec<usize>,\n },\n SingleRecords {\n recs: Vec<bam::Record>,\n seqids: Vec<usize>,\n },\n}\n\n#[derive(Hash, PartialEq, Eq, Clone)]\npub enum Cigar {\n Tuple {\n r1_cigar: Vec<u32>,\n r2_cigar: Vec<u32>,\n },\n Single {\n cigar: Vec<u32>,\n },\n}\n
mit
rust-bio-tools
./rust-bio-tools/src/bam/collapse_reads_to_fragments/mod.rs
mod calc_consensus;\nmod pipeline;\n\nuse anyhow::Result;\nuse bio::io::fastq;\nuse log::info;\nuse pipeline::CallConsensusRead;\nuse rust_htslib::bam;\nuse rust_htslib::bam::{Format, Header, Read};\nuse std::path::Path;\n\npub fn call_consensus_reads_from_paths<P: AsRef<Path>>(\n bam_in: P,\n fq1: P,\n fq2: P,\n fq_se: P,\n bam_skipped_out: P,\n verbose_read_names: bool,\n) -> Result<()> {\n info!('Reading input files:\n {}', bam_in.as_ref().display());\n info!(\n 'Writing forward consensus reads to:\n {}',\n fq1.as_ref().display()\n );\n info!(\n 'Writing reverse consensus reads to:\n {}',\n fq2.as_ref().display()\n );\n info!(\n 'Writing single end consensus reads to:\n {}',\n fq_se.as_ref().display()\n );\n info!(\n 'Writing skipped reads to:\n {}',\n bam_skipped_out.as_ref().display()\n );\n let bam_reader = bam::Reader::from_path(bam_in)?;\n let fq1_writer = fastq::Writer::to_file(fq1)?;\n let fq2_writer = fastq::Writer::to_file(fq2)?;\n let fq_se_writer = fastq::Writer::to_file(fq_se)?;\n let bam_skipped_writer = bam::Writer::from_path(\n bam_skipped_out,\n &Header::from_template(bam_reader.header()),\n Format::Bam,\n )?;\n CallConsensusRead::new(\n bam_reader,\n fq1_writer,\n fq2_writer,\n fq_se_writer,\n bam_skipped_writer,\n verbose_read_names,\n )\n .call_consensus_reads()\n}\n\npub fn unmark_record(record: &mut bam::record::Record) -> Result<()> {\n record.unset_duplicate();\n let _ = record.remove_aux(b'PG');\n let _ = record.remove_aux(b'DI');\n let _ = record.remove_aux(b'DS');\n Ok(())\n}\n
mit
rust-bio-tools
./rust-bio-tools/src/bam/mod.rs
//! Tools that work on BAM files\npub mod anonymize_reads;\npub mod collapse_reads_to_fragments;\npub mod depth;\npub mod plot;\n
mit
rust-bio-tools
./rust-bio-tools/src/common.rs
use anyhow::Context;\nuse approx::relative_eq;\nuse bio::stats::probs::{LogProb, PHREDProb};\nuse bio_types::sequence::SequenceRead;\nuse itertools::Itertools;\nuse ordered_float::NotNaN;\nuse std::cmp;\nuse std::collections::HashMap;\nuse std::str::FromStr;\n\nconst PROB_CONFUSION: LogProb = LogProb(-1.0986122886681098); // (1 / 3).ln()\nconst ALLELES: &[u8] = b'ACGT';\n\npub trait CalcConsensus<'a, R: SequenceRead> {\n fn validate_read_lengths(recs: &[R]) -> bool {\n let reference_length = recs[0].len();\n recs.iter()\n .map(|rec| rec.len())\n .all(|len| len == reference_length)\n }\n /// Compute the likelihood for the given allele and read position.\n /// The allele (A, C, G, or T) is an explicit parameter,\n /// the position i is captured by the closure.\n ///\n /// Likelihoods are managed in log space.\n /// A matching base is scored with (1 - PHRED score), a mismatch\n /// with PHRED score + confusion constant.\n fn allele_likelihood_in_rec(\n allele: &u8,\n seq: &[u8],\n qual: &[u8],\n i: usize,\n offset: u8,\n ) -> LogProb {\n let q = LogProb::from(PHREDProb::from((qual[i] - offset) as f64));\n if *allele == seq[i].to_ascii_uppercase() {\n q.ln_one_minus_exp()\n } else {\n q + PROB_CONFUSION\n }\n }\n fn build_consensus_sequence(\n likelihoods: Vec<LogProb>,\n consensus_lh: &mut LogProb,\n consensus_seq: &mut Vec<u8>,\n consensus_qual: &mut Vec<u8>,\n offset: f64,\n ) {\n if relative_eq!(*likelihoods[0], *likelihoods[1])\n && relative_eq!(*likelihoods[1], *likelihoods[2])\n && relative_eq!(*likelihoods[2], *likelihoods[3])\n {\n consensus_seq.push(b'N');\n consensus_qual.push(offset as u8);\n } else {\n let (max_posterior, allele_lh) = likelihoods\n .iter()\n .enumerate()\n .max_by_key(|&(_, &lh)| NotNaN::new(*lh).unwrap())\n .unwrap();\n *consensus_lh += *allele_lh;\n let marginal = LogProb::ln_sum_exp(&likelihoods);\n // new base: MAP\n consensus_seq.push(ALLELES[max_posterior]);\n // new qual: (1 - MAP)\n let qual = (likelihoods[max_posterior] - marginal).ln_one_minus_exp();\n // Assume the maximal quality, if the likelihood is infinite\n let truncated_quality: f64 = if (*PHREDProb::from(qual)).is_infinite() {\n 93.0\n } else {\n *PHREDProb::from(qual)\n };\n // Truncate quality values to PHRED+33 range\n consensus_qual\n .push(cmp::min(93 + offset as u64, (truncated_quality + offset) as u64) as u8);\n }\n }\n fn build_verbose_read_name(\n uuid: &str,\n seq_ids: &[usize],\n read_ids: &Option<HashMap<usize, Vec<u8>>>,\n ) -> String {\n format!(\n '{}_consensus-read-from:{}',\n uuid,\n seq_ids\n .iter()\n .map(|i| String::from_utf8(\n read_ids\n .as_ref()\n .map(|x| x.get(i).unwrap())\n .unwrap()\n .to_vec()\n )\n .unwrap())\n .join(',')\n )\n }\n\n fn overall_allele_likelihood(&self, allele: &u8, i: usize) -> LogProb;\n fn seqids(&self) -> &'a [usize];\n fn uuid(&self) -> &'a str;\n}\n\n#[derive(Debug, Clone)]\npub struct Region {\n pub(crate) target: String,\n pub(crate) start: u64,\n pub(crate) end: u64,\n}\n\nimpl FromStr for Region {\n type Err = anyhow::Error;\n\n fn from_str(s: &str) -> Result<Self, Self::Err> {\n let (target, range) = s.split_once(':').context('No ':' in region string')?;\n let (start, end) = range.split_once('-').context('No '-' in region string')?;\n let start = start.parse::<u64>()?;\n let end = end.parse::<u64>()?;\n Ok(Region {\n target: target.into(),\n start,\n end,\n })\n }\n}\n
mit
rust-bio-tools
./rust-bio-tools/src/fastq/split.rs
//! Split reads from stdin up into the given files.\n//!\n//! ## Usage:\n//!\n//! Distribute reads from `test.fastq` into the files `A.fastq` and `B.fastq`.\n//! ```bash\n//! $ rbt fastq-split A.fastq B.fastq < test.fastq\n//! ```\n//!\nuse anyhow::Result;\nuse bio::io::fastq;\nuse bio::io::fastq::FastqRead;\nuse log::info;\nuse std::io;\nuse std::path::Path;\n\npub fn split<P: AsRef<Path>>(out_paths: &[P]) -> Result<()> {\n let mut reader = fastq::Reader::new(io::stdin());\n let mut writers = Vec::new();\n for path in out_paths {\n writers.push(fastq::Writer::to_file(path)?);\n }\n let mut record = fastq::Record::new();\n let mut i = 0;\n let mut j = 0;\n loop {\n reader.read(&mut record)?;\n if record.is_empty() {\n return Ok(());\n }\n writers[i].write_record(&record)?;\n i = (i + 1) % writers.len();\n j += 1;\n if j % 1000 == 0 {\n info!('{} records written.', j);\n }\n }\n}\n
mit
rust-bio-tools
./rust-bio-tools/src/fastq/collapse_reads_to_fragments/calc_consensus.rs
use crate::common::CalcConsensus;\nuse bio::io::fastq;\nuse bio::stats::probs::LogProb;\nuse derive_new::new;\nuse itertools::Itertools;\n\nconst ALLELES: &[u8] = b'ACGT';\n\n/// Compute a maximum likelihood fragment sequence for a collection of FASTQ reads.\n///\n/// For each position, compute the likelihood of each allele and\n/// choose the most likely one. Write the most likely allele i.e. base\n/// as sequence into the consensus sequence. The quality value is the\n/// likelihood for this allele, encoded in PHRED+33.\n/// //TODO Generalize as this is identical to BAM except Offset and cigar/writing to record\n#[derive(new)]\npub struct CalcNonOverlappingConsensus<'a> {\n recs: &'a [fastq::Record],\n seqids: &'a [usize],\n uuid: &'a str,\n verbose_read_names: bool,\n}\nimpl<'a> CalcNonOverlappingConsensus<'a> {\n pub fn calc_consensus(&self) -> (fastq::Record, LogProb) {\n let seq_len = self.recs()[0].seq().len();\n let mut consensus_seq: Vec<u8> = Vec::with_capacity(seq_len);\n let mut consensus_qual: Vec<u8> = Vec::with_capacity(seq_len);\n\n // assert that all reads have the same length here\n assert!(\n Self::validate_read_lengths(self.recs()),\n 'Read length of FASTQ records {:?} differ. Cannot compute consensus sequence.',\n self.seqids()\n );\n\n // Potential workflow for different read lengths\n // compute consensus of all reads with max len\n // compute offset of all shorter reads\n // pad shorter reads\n // drop first consensus, compute consensus of full length reads and padded reads\n // ignore padded bases for consensus computation\n\n let mut consensus_lh = LogProb::ln_one();\n\n for i in 0..seq_len {\n // Maximum a-posteriori estimate for the consensus base.\n // Find the allele (theta \in ACGT) with the highest likelihood\n // given the bases at this position, weighted with their quality values\n let likelihoods = ALLELES\n .iter()\n .map(|a| Self::overall_allele_likelihood(self, a, i))\n .collect_vec(); //Check this. See below\n Self::build_consensus_sequence(\n likelihoods,\n &mut consensus_lh,\n &mut consensus_seq,\n &mut consensus_qual,\n 33.0,\n );\n }\n\n let name = match self.verbose_read_names {\n true => format!(\n '{}_consensus-read-from:{}',\n self.uuid(),\n self.seqids().iter().map(|i| format!('{}', i)).join(',')\n ),\n false => format!(\n '{}_consensus-read-from:{}_reads',\n self.uuid(),\n self.seqids().len(),\n ),\n };\n\n (\n fastq::Record::with_attrs(&name, None, &consensus_seq, &consensus_qual),\n consensus_lh,\n )\n }\n\n pub fn recs(&self) -> &[fastq::Record] {\n self.recs\n }\n}\n\n//TODO Generalized as it is identical to BAM except Offset\nimpl<'a> CalcConsensus<'a, fastq::Record> for CalcNonOverlappingConsensus<'a> {\n fn overall_allele_likelihood(&self, allele: &u8, i: usize) -> LogProb {\n let mut lh = LogProb::ln_one(); // posterior: log(P(theta)) = 1\n for rec in self.recs() {\n lh += Self::allele_likelihood_in_rec(allele, rec.seq(), rec.qual(), i, 33);\n }\n lh\n }\n\n fn seqids(&self) -> &'a [usize] {\n self.seqids\n }\n\n fn uuid(&self) -> &'a str {\n self.uuid\n }\n}\n\n/// Compute a consensus sequence for a collection of paired-end FASTQ\n/// reads taking overlap into account.\n///\n/// For each position, compute the likelihood of each allele and\n/// choose the most likely one. Write the most likely allele i.e. base\n/// as sequence into the consensus sequence. The quality value is the\n/// likelihood for this allele, encoded in PHRED+33.\n#[derive(new)]\npub struct CalcOverlappingConsensus<'a> {\n recs1: &'a [fastq::Record],\n recs2: &'a [fastq::Record],\n overlap: usize,\n seqids: &'a [usize],\n uuid: &'a str,\n verbose_read_names: bool,\n}\n\n//TODO Generalize as this is identical to BAM except Offset and cigar/writing to record\nimpl<'a> CalcOverlappingConsensus<'a> {\n pub fn calc_consensus(&self) -> (fastq::Record, LogProb) {\n let seq_len = self.recs1()[0].seq().len() + self.recs2()[0].seq().len() - self.overlap();\n let mut consensus_seq: Vec<u8> = Vec::with_capacity(seq_len);\n let mut consensus_qual: Vec<u8> = Vec::with_capacity(seq_len);\n\n // assert that all reads have the same length here\n assert!(\n Self::validate_read_lengths(self.recs1()),\n 'Read length of FASTQ forward records {:?} differ. Cannot compute consensus sequence.',\n self.seqids()\n );\n\n assert!(\n Self::validate_read_lengths(self.recs2()),\n 'Read length of FASTQ reverse records {:?} differ. Cannot compute consensus sequence.',\n self.seqids()\n );\n let mut consensus_lh = LogProb::ln_one();\n\n for i in 0..seq_len {\n let likelihoods = ALLELES\n .iter()\n .map(|a| Self::overall_allele_likelihood(self, a, i))\n .collect_vec(); //This will be calculated every iteration\n Self::build_consensus_sequence(\n likelihoods,\n &mut consensus_lh,\n &mut consensus_seq,\n &mut consensus_qual,\n 33.0,\n );\n }\n let name = match self.verbose_read_names {\n true => format!(\n '{}_consensus-read-from:{}',\n self.uuid(),\n self.seqids().iter().map(|i| format!('{}', i)).join(',')\n ),\n false => format!(\n '{}_consensus-read-from:{}_reads',\n self.uuid(),\n self.seqids().len(),\n ),\n };\n (\n fastq::Record::with_attrs(&name, None, &consensus_seq, &consensus_qual),\n consensus_lh,\n )\n }\n\n fn recs1(&self) -> &[fastq::Record] {\n self.recs1\n }\n\n fn recs2(&self) -> &[fastq::Record] {\n self.recs2\n }\n\n fn overlap(&self) -> usize {\n self.overlap\n }\n}\n\nimpl<'a> CalcConsensus<'a, fastq::Record> for CalcOverlappingConsensus<'a> {\n fn overall_allele_likelihood(&self, allele: &u8, i: usize) -> LogProb {\n let mut lh = LogProb::ln_one();\n for (rec1, rec2) in self.recs1().iter().zip(self.recs2()) {\n if i < rec1.seq().len() {\n lh += Self::allele_likelihood_in_rec(allele, rec1.seq(), rec1.qual(), i, 33);\n };\n if i >= rec1.seq().len() - self.overlap() {\n let rec2_i = i - (rec1.seq().len() - self.overlap());\n let rec2_seq = bio::alphabets::dna::revcomp(rec2.seq());\n let rec2_qual: Vec<u8> = rec2.qual().iter().rev().cloned().collect();\n lh += Self::allele_likelihood_in_rec(allele, &rec2_seq, &rec2_qual, rec2_i, 33);\n };\n }\n lh\n }\n\n fn seqids(&self) -> &'a [usize] {\n self.seqids\n }\n\n fn uuid(&self) -> &'a str {\n self.uuid\n }\n}\n
mit
rust-bio-tools
./rust-bio-tools/src/fastq/collapse_reads_to_fragments/pipeline.rs
use anyhow::Result;\nuse bio::io::fastq;\nuse bio::io::fastq::{FastqRead, Record};\nuse bio::stats::probs::LogProb;\nuse derive_new::new;\nuse ordered_float::NotNaN;\nuse rgsl::randist::gaussian::ugaussian_P;\nuse rocksdb::DB;\nuse std::io;\nuse std::io::Write;\nuse std::mem;\nuse std::process::{Command, Stdio};\nuse std::str;\nuse tempfile::tempdir;\nuse uuid::Uuid;\n\nuse super::calc_consensus::{CalcNonOverlappingConsensus, CalcOverlappingConsensus};\n\nconst HAMMING_THRESHOLD: f64 = 10.0;\n\n/// Interpret a cluster returned by starcode\nfn parse_cluster(record: csv::StringRecord) -> Result<Vec<usize>> {\n let seqids = &record[2];\n Ok(csv::ReaderBuilder::new()\n .delimiter(b',')\n .has_headers(false)\n .from_reader(seqids.as_bytes())\n .deserialize()\n .next()\n .unwrap()?)\n}\n\n/// Calculates the median hamming distance for all records by deriving the overlap from insert size\nfn median_hamming_distance(\n insert_size: usize,\n f_recs: &[fastq::Record],\n r_recs: &[fastq::Record],\n) -> Option<f64> {\n let distances = f_recs.iter().zip(r_recs).filter_map(|(f_rec, r_rec)| {\n // check if reads overlap within insert size\n if (insert_size < f_rec.seq().len()) | (insert_size < r_rec.seq().len()) {\n return None;\n }\n if insert_size >= (f_rec.seq().len() + r_rec.seq().len()) {\n return None;\n }\n let overlap = (f_rec.seq().len() + r_rec.seq().len()) - insert_size;\n let suffix_start_idx: usize = f_rec.seq().len() - overlap;\n Some(bio::alignment::distance::hamming(\n &f_rec.seq()[suffix_start_idx..],\n &bio::alphabets::dna::revcomp(r_rec.seq())[..overlap],\n ))\n });\n stats::median(distances)\n}\n\n/// as shown in http://www.milefoot.com/math/stat/pdfc-normaldisc.htm\nfn isize_pmf(value: f64, mean: f64, sd: f64) -> LogProb {\n LogProb((ugaussian_P((value + 0.5 - mean) / sd) - ugaussian_P((value - 0.5 - mean) / sd)).ln())\n}\n\n/// Used to store a mapping of read index to read sequence\n#[derive(Debug)]\nstruct FastqStorage {\n db: DB,\n}\n\nimpl FastqStorage {\n /// Create a new FASTQStorage using a Rocksdb database\n /// that maps read indices to read seqeunces.\n pub fn new() -> Result<Self> {\n // Save storage_dir to prevent it from leaving scope and\n // in turn deleting the tempdir\n let storage_dir = tempdir()?.path().join('db');\n Ok(FastqStorage {\n db: DB::open_default(storage_dir)?,\n })\n }\n\n #[allow(clippy::wrong_self_convention)]\n fn as_key(i: u64) -> [u8; 8] {\n unsafe { mem::transmute::<u64, [u8; 8]>(i) }\n }\n\n /// Enter a (read index, read sequence) pair into the database.\n pub fn put(&mut self, i: usize, f_rec: &fastq::Record, r_rec: &fastq::Record) -> Result<()> {\n Ok(self.db.put(\n &Self::as_key(i as u64),\n serde_json::to_string(&(f_rec, r_rec))?.as_bytes(),\n )?)\n }\n\n /// Retrieve the read sequence of the read with index `i`.\n pub fn get(&self, i: usize) -> Result<(fastq::Record, fastq::Record)> {\n Ok(serde_json::from_str(\n str::from_utf8(&self.db.get(&Self::as_key(i as u64))?.unwrap()).unwrap(),\n )?)\n }\n}\n\npub struct OverlappingConsensus {\n record: Record,\n likelihood: LogProb,\n}\n\npub struct NonOverlappingConsensus {\n f_record: Record,\n r_record: Record,\n likelihood: LogProb,\n}\n\npub trait CallConsensusReads<'a, R: io::Read + io::BufRead + 'a, W: io::Write + 'a> {\n /// Cluster reads from fastq readers according to their sequence\n /// and UMI, then compute a consensus sequence.\n ///\n /// Cluster the reads in the input file according to their sequence\n /// (concatenated p5 and p7 reads without UMI). Read the\n /// identified clusters, and cluster all reds in a cluster by UMI,\n /// creating groups of very likely PCR duplicates.\n /// Next, compute a consensus read for each unique read,\n /// i.e. a cluster with similar sequences and identical UMI,\n /// and write it into the output files.\n fn call_consensus_reads(&'a mut self) -> Result<()> {\n let spinner_style = indicatif::ProgressStyle::default_spinner()\n .tick_chars('⠁⠂⠄⡀⢀⠠⠐⠈ ')\n .template('{prefix:.bold.dim} {spinner} {wide_msg}');\n\n // cluster by umi\n // Note: If starcode is not installed, this throws a\n // hard to interpret error:\n // (No such file or directory (os error 2))\n // The expect added below should make this more clear.\n let mut umi_cluster = Command::new('starcode')\n .arg('--dist')\n .arg(format!('{}', self.umi_dist()))\n .arg('--seq-id')\n .arg('-s')\n .stdin(Stdio::piped())\n .stdout(Stdio::piped())\n .stderr(Stdio::piped())\n .spawn()\n .expect('Error in starcode call. Starcode might not be installed.');\n\n let mut f_rec = fastq::Record::new();\n let mut r_rec = fastq::Record::new();\n // init temp storage for reads\n let mut read_storage = FastqStorage::new()?;\n let mut i = 0;\n\n // prepare spinner for user feedback\n let pb = indicatif::ProgressBar::new_spinner();\n pb.set_style(spinner_style.clone());\n pb.set_prefix('[1/2] Clustering input reads by UMI using starcode.');\n\n loop {\n // update spinner\n pb.set_message(&format!(' Processed {:>10} reads', i));\n pb.inc(1);\n self.fq1_reader().read(&mut f_rec)?;\n self.fq2_reader().read(&mut r_rec)?;\n\n match (f_rec.is_empty(), r_rec.is_empty()) {\n (true, true) => break,\n (false, false) => (),\n (true, false) => {\n let error_message = format!('Given FASTQ files have unequal lengths. Forward file returned record {} as empty, reverse record is not: id:'{}' seq:'{:?}'.', i, r_rec.id(), str::from_utf8(r_rec.seq()));\n panic!('{}', error_message);\n }\n (false, true) => {\n let error_message = format!('Given FASTQ files have unequal lengths. Reverse file returned record {} as empty, forward record is not: id:'{}' seq:'{:?}'.', i, f_rec.id(), str::from_utf8(f_rec.seq()));\n panic!('{}', error_message);\n }\n }\n // extract umi for clustering\n let umi = if self.reverse_umi() {\n r_rec.seq()[..self.umi_len()].to_owned()\n } else {\n f_rec.seq()[..self.umi_len()].to_owned()\n };\n umi_cluster.stdin.as_mut().unwrap().write_all(&umi)?;\n umi_cluster.stdin.as_mut().unwrap().write_all(b'\n')?;\n // remove umi from read sequence for all further clustering steps\n if self.reverse_umi() {\n r_rec = self.strip_umi_from_record(&r_rec)\n } else {\n f_rec = self.strip_umi_from_record(&f_rec)\n }\n // store read sequences in an on-disk key value store for random access\n read_storage.put(i, &f_rec, &r_rec)?;\n i += 1;\n }\n umi_cluster.stdin.as_mut().unwrap().flush()?;\n drop(umi_cluster.stdin.take());\n pb.finish_with_message(&format!('Done. Analyzed {} reads.', i));\n\n // prepare user feedback\n let mut j = 0;\n let pb = indicatif::ProgressBar::new_spinner();\n pb.set_style(spinner_style);\n pb.set_prefix('[1/2] Clustering input reads by UMI using starcode.');\n // read clusters identified by the first starcode run\n // the first run clustered by UMI, hence all reads in\n // the clusters handled here had similar UMIs\n for record in csv::ReaderBuilder::new()\n .delimiter(b'\t')\n .has_headers(false)\n .from_reader(umi_cluster.stdout.as_mut().unwrap())\n .records()\n {\n // update spinner\n pb.inc(1);\n pb.set_message(&format!('Processed {:>10} cluster', j));\n let seqids = parse_cluster(record?)?;\n // cluster within in this cluster by read sequence\n let mut seq_cluster = Command::new('starcode')\n .arg('--dist')\n .arg(format!('{}', self.seq_dist()))\n .arg('--seq-id')\n .arg('-s')\n .stdin(Stdio::piped())\n .stdout(Stdio::piped())\n .stderr(Stdio::piped())\n .spawn()?;\n for &seqid in &seqids {\n // get sequences from rocksdb (key value store)\n let (f_rec, r_rec) = read_storage.get(seqid - 1).unwrap();\n // perform clustering using the concatenated read sequences\n // without the UMIs (remove in the first clustering step)\n seq_cluster\n .stdin\n .as_mut()\n .unwrap()\n .write_all(&[f_rec.seq(), r_rec.seq()].concat())?;\n seq_cluster.stdin.as_mut().unwrap().write_all(b'\n')?;\n }\n seq_cluster.stdin.as_mut().unwrap().flush()?;\n drop(seq_cluster.stdin.take());\n\n // handle each potential unique read, i.e. clusters with similar\n // UMI and similar sequence\n for record in csv::ReaderBuilder::new()\n .delimiter(b'\t')\n .has_headers(false)\n .from_reader(seq_cluster.stdout.as_mut().unwrap())\n .records()\n {\n let inner_seqids = parse_cluster(record?)?;\n // this is a proper cluster\n // calculate consensus reads and write to output FASTQs\n let mut f_recs = Vec::new();\n let mut r_recs = Vec::new();\n let mut outer_seqids = Vec::new();\n\n for inner_seqid in inner_seqids {\n let seqid = seqids[inner_seqid - 1];\n let (f_rec, r_rec) = read_storage.get(seqid - 1)?;\n f_recs.push(f_rec);\n r_recs.push(r_rec);\n outer_seqids.push(seqid);\n }\n self.write_records(f_recs, r_recs, outer_seqids)?;\n }\n\n match seq_cluster\n .wait()\n .expect('process did not even start')\n .code()\n {\n Some(0) => (),\n Some(s) => eprintln!('Starcode failed with error code {}', s),\n None => eprintln!('Starcode was terminated by signal'),\n }\n j += 1;\n }\n pb.finish_with_message(&format!('Done. Processed {} cluster.', j));\n Ok(())\n }\n fn strip_umi_from_record(&mut self, record: &Record) -> Record {\n let rec_seq = &record.seq()[self.umi_len()..];\n let rec_qual = &record.qual()[self.umi_len()..];\n Record::with_attrs(record.id(), record.desc(), rec_seq, rec_qual)\n }\n fn write_records(\n &mut self,\n f_recs: Vec<Record>,\n r_recs: Vec<Record>,\n outer_seqids: Vec<usize>,\n ) -> Result<()>;\n fn fq1_reader(&mut self) -> &mut fastq::Reader<R>;\n fn fq2_reader(&mut self) -> &mut fastq::Reader<R>;\n fn umi_len(&self) -> usize;\n fn seq_dist(&self) -> usize;\n fn umi_dist(&self) -> usize;\n fn reverse_umi(&self) -> bool;\n}\n\n/// Struct for calling non-overlapping consensus reads\n/// Implements Trait CallConsensusReads\n#[allow(clippy::too_many_arguments)]\n#[derive(new)]\npub struct CallNonOverlappingConsensusRead<'a, R: io::Read, W: io::Write> {\n fq1_reader: &'a mut fastq::Reader<R>,\n fq2_reader: &'a mut fastq::Reader<R>,\n fq1_writer: &'a mut fastq::Writer<W>,\n fq2_writer: &'a mut fastq::Writer<W>,\n umi_len: usize,\n seq_dist: usize,\n umi_dist: usize,\n reverse_umi: bool,\n verbose_read_names: bool,\n}\n\nimpl<'a, R: io::Read + io::BufRead, W: io::Write> CallConsensusReads<'a, R, W>\n for CallNonOverlappingConsensusRead<'a, R, W>\n{\n fn write_records(\n &mut self,\n f_recs: Vec<Record>,\n r_recs: Vec<Record>,\n outer_seqids: Vec<usize>,\n ) -> Result<()> {\n if f_recs.len() > 1 {\n let uuid = &Uuid::new_v4().to_hyphenated().to_string();\n self.fq1_writer.write_record(\n &CalcNonOverlappingConsensus::new(\n &f_recs,\n &outer_seqids,\n uuid,\n self.verbose_read_names,\n )\n .calc_consensus()\n .0,\n )?;\n self.fq2_writer.write_record(\n &CalcNonOverlappingConsensus::new(\n &r_recs,\n &outer_seqids,\n uuid,\n self.verbose_read_names,\n )\n .calc_consensus()\n .0,\n )?;\n } else {\n self.fq1_writer.write_record(&f_recs[0])?;\n self.fq2_writer.write_record(&r_recs[0])?;\n }\n Ok(())\n }\n\n fn fq1_reader(&mut self) -> &mut fastq::Reader<R> {\n self.fq1_reader\n }\n\n fn fq2_reader(&mut self) -> &mut fastq::Reader<R> {\n self.fq2_reader\n }\n\n fn umi_len(&self) -> usize {\n self.umi_len\n }\n\n fn seq_dist(&self) -> usize {\n self.seq_dist\n }\n\n fn umi_dist(&self) -> usize {\n self.umi_dist\n }\n\n fn reverse_umi(&self) -> bool {\n self.reverse_umi\n }\n}\n\n///Clusters fastq reads by UMIs and calls consensus for overlapping reads\n#[allow(clippy::too_many_arguments)]\n#[derive(new)]\npub struct CallOverlappingConsensusRead<'a, R: io::Read, W: io::Write> {\n fq1_reader: &'a mut fastq::Reader<R>,\n fq2_reader: &'a mut fastq::Reader<R>,\n fq1_writer: &'a mut fastq::Writer<W>,\n fq2_writer: &'a mut fastq::Writer<W>,\n fq3_writer: &'a mut fastq::Writer<W>,\n umi_len: usize,\n seq_dist: usize,\n umi_dist: usize,\n insert_size: usize,\n std_dev: usize,\n reverse_umi: bool,\n verbose_read_names: bool,\n}\n\nimpl<'a, R: io::Read, W: io::Write> CallOverlappingConsensusRead<'a, R, W> {\n fn isize_highest_probability(&mut self, f_seq_len: usize, r_seq_len: usize) -> f64 {\n if f_seq_len + f_seq_len < self.insert_size {\n self.insert_size as f64\n } else if f_seq_len + r_seq_len > self.insert_size + 2 * self.std_dev {\n (self.insert_size + 2 * self.std_dev) as f64\n } else {\n (f_seq_len + r_seq_len) as f64\n }\n }\n\n fn maximum_likelihood_overlapping_consensus(\n &mut self,\n f_recs: &[Record],\n r_recs: &[Record],\n outer_seqids: &[usize],\n uuid: &str,\n ) -> OverlappingConsensus {\n //Returns consensus record by filtering overlaps with lowest hamming distance.\n //For these overlaps(insert sizes) the consensus reads and their likelihoods are calculated.\n //The read with maximum likelihood will be returned.\n let insert_sizes = ((self.insert_size - 2 * self.std_dev)\n ..(self.insert_size + 2 * self.std_dev))\n .filter_map(|insert_size| {\n median_hamming_distance(insert_size, f_recs, r_recs)\n .filter(|&median_distance| median_distance < HAMMING_THRESHOLD)\n .map(|_| insert_size)\n });\n insert_sizes\n .map(|insert_size| {\n let overlap = (f_recs[0].seq().len() + r_recs[0].seq().len()) - insert_size;\n let (consensus_record, lh_isize) = CalcOverlappingConsensus::new(\n f_recs,\n r_recs,\n overlap,\n outer_seqids,\n uuid,\n self.verbose_read_names,\n )\n .calc_consensus();\n let likelihood = lh_isize\n + isize_pmf(\n insert_size as f64,\n self.insert_size as f64,\n self.std_dev as f64,\n );\n OverlappingConsensus {\n record: consensus_record,\n likelihood,\n }\n })\n .max_by_key(|consensus| NotNaN::new(*consensus.likelihood).unwrap())\n .unwrap()\n }\n\n fn maximum_likelihood_nonoverlapping_consensus(\n &mut self,\n f_recs: &[Record],\n r_recs: &[Record],\n outer_seqids: &[usize],\n uuid: &str,\n ) -> NonOverlappingConsensus {\n //Calculate non-overlapping consensus records and shared lh\n let (f_consensus_rec, f_lh) =\n CalcNonOverlappingConsensus::new(f_recs, outer_seqids, uuid, self.verbose_read_names)\n .calc_consensus();\n let (r_consensus_rec, r_lh) =\n CalcNonOverlappingConsensus::new(r_recs, outer_seqids, uuid, self.verbose_read_names)\n .calc_consensus();\n let overall_lh_isize = f_lh + r_lh;\n //Determine insert size with highest probability for non-overlapping records based on expected insert size\n let likeliest_isize =\n self.isize_highest_probability(f_recs[0].seq().len(), r_recs[0].seq().len());\n let overall_lh = overall_lh_isize\n + isize_pmf(\n likeliest_isize,\n self.insert_size as f64,\n self.std_dev as f64,\n );\n NonOverlappingConsensus {\n f_record: f_consensus_rec,\n r_record: r_consensus_rec,\n likelihood: overall_lh,\n }\n }\n}\n\nimpl<'a, R: io::Read + io::BufRead, W: io::Write> CallConsensusReads<'a, R, W>\n for CallOverlappingConsensusRead<'a, R, W>\n{\n fn write_records(\n &mut self,\n f_recs: Vec<Record>,\n r_recs: Vec<Record>,\n outer_seqids: Vec<usize>,\n ) -> Result<()> {\n //TODO Add deterministic uuid considering read ids\n let uuid = &Uuid::new_v4().to_hyphenated().to_string();\n let ol_consensus =\n self.maximum_likelihood_overlapping_consensus(&f_recs, &r_recs, &outer_seqids, uuid);\n let non_ol_consensus =\n self.maximum_likelihood_nonoverlapping_consensus(&f_recs, &r_recs, &outer_seqids, uuid);\n match ol_consensus.likelihood > non_ol_consensus.likelihood {\n true => self.fq3_writer.write_record(&ol_consensus.record)?,\n false => {\n self.fq1_writer.write_record(&non_ol_consensus.f_record)?;\n self.fq2_writer.write_record(&non_ol_consensus.r_record)?;\n }\n }\n Ok(())\n }\n\n fn fq1_reader(&mut self) -> &mut fastq::Reader<R> {\n self.fq1_reader\n }\n\n fn fq2_reader(&mut self) -> &mut fastq::Reader<R> {\n self.fq2_reader\n }\n\n fn umi_len(&self) -> usize {\n self.umi_len\n }\n\n fn seq_dist(&self) -> usize {\n self.seq_dist\n }\n\n fn umi_dist(&self) -> usize {\n self.umi_dist\n }\n\n fn reverse_umi(&self) -> bool {\n self.reverse_umi\n }\n}\n
mit
rust-bio-tools
./rust-bio-tools/src/fastq/collapse_reads_to_fragments/mod.rs
//! Tool to merge sets of reads in paired FASTQ files that share the UMI and have similar read sequence.\n//! The result is a maximum likelihood fragment sequence per set.\n//!\n//! This tool takes two FASTQ files (forward and reverse)\n//! and returns two FASTQ files in which all PCR duplicates\n//! have been merged into a consensus read.\n//! Duplicates are identified by a Unique Molecular Identifier (UMI).\n//!\n//! ## Requirements:\n//!\n//! - starcode\n//!\n//!\n//! ## Usage:\n//!\n//! ```bash\n//! $ rbt collapse-reads-to-fragments fastq \\n//! <Path to FASTQ file with forward reads> \\n//! <Path to FASTQ file with reverse reads> \\n//! <Path for output forward FASTQ file> \\n//! <Path for output reverse FASTQ file> \\n//! -l <Length of UMI sequence> \\n//! -D <Maximum distance between sequences in a cluster> \ # See step 1 below\n//! -d <Maximum distance between UMIs in a cluster> \ # See step 2 below\n//! --umi-on-reverse # if the UMIs are part of the reverse reads\n//! ```\n//!\n//! ## Assumptions:\n//!\n//! - Reads are of equal length\n//! - UMI is the prefix of the reads\n//!\n//! ## Workflow:\n//!\n//! The main steps are:\n//!\n//! 1. Preparation\n//! 1. Remove UMI sequence from read (and save it for later use).\n//! 2. Concatenate forward and reverse sequence.\n//! ```text\n//! Forward Read: [================]\n//! Reverse Read: [(UMI)-----------]\n//! Sequence for clustering in step 3: [================-----------]\n//! ```\n//!\n//! 2. Cluster all reads by their UMIs using starcode.\n//! Each cluster generated in this step contains reads with similar UMIs.\n//! However, all PCR duplicates of a read are within one cluster, since they\n//! share a UMI sequence.\n//! The size of these clusters highly depends on the length of the used UMI.\n//!\n//! 2. For each cluster from step two:\n//! 1. Cluster reads by their concatenated sequences (without UMI) using starcode.\n//! 2. Each new cluster contains reads that have a similar UMI (from step 2)\n//! as well as similar sequences. Consequently, these sets of reads are\n//! likely to be PCR duplicates of each other.\n//!\n//! 3. For each cluster from step three: Compute a consensus sequence.\n//!\n//! At each position in the read, all bases and quality values are used\n//! to compute the base with Maximum a-posteriori probability (MAP).\n//!\n//! 1. For one position, compute the likelihood for the four alleles\n//! A, C, G, and T, incorporating the number of bases as well as\n//! their quality values.\n//! 2. Choose the allele with the largest likelihood for the consensus read.\n//! 3. Compute the quality value of the consensus read from the maximum posterior\n//! probability used to select the allele.\n//!\n//! 4. Write consensus reads to output file.\n//!\n//!\n//!\n// Since this is a binary crate, documentation needs to be compiled with this 'ancient incantation':\n// https://github.com/rust-lang/cargo/issues/1865#issuecomment-394179125\nmod calc_consensus;\nmod pipeline;\n\nuse anyhow::Result;\nuse bio::io::fastq;\nuse flate2::bufread::MultiGzDecoder;\nuse flate2::write::GzEncoder;\nuse flate2::Compression;\nuse log::info;\nuse pipeline::{CallConsensusReads, CallNonOverlappingConsensusRead, CallOverlappingConsensusRead};\nuse std::fs;\nuse std::io::{BufReader, BufWriter, Read, Write};\nuse std::path::Path;\n\n// TODO: reduce arguments for clippy to <= 7\n/// Build readers for the given input and output FASTQ files and pass them to\n/// `call_consensus_reads`.\n///\n/// The type of the readers (writers) depends on the file ending.\n/// If the input file names end with '.gz' a gzipped reader (writer) is used.\n#[allow(clippy::too_many_arguments)]\npub fn call_consensus_reads_from_paths<P: AsRef<Path> + std::fmt::Debug>(\n fq1: P,\n fq2: P,\n fq1_out: P,\n fq2_out: P,\n fq3_out: Option<P>,\n umi_len: usize,\n seq_dist: usize,\n umi_dist: usize,\n reverse_umi: bool,\n verbose_read_names: bool,\n insert_size: Option<usize>,\n std_dev: Option<usize>,\n) -> Result<()> {\n match fq3_out {\n None => {\n info!(\n 'Reading input files:\n {}\n {}',\n fq1.as_ref().display(),\n fq2.as_ref().display()\n );\n info!(\n 'Writing output to:\n {}\n {}',\n fq1_out.as_ref().display(),\n fq2_out.as_ref().display()\n );\n\n fn reader<P: AsRef<Path>>(\n path: P,\n ) -> Result<fastq::Reader<BufReader<Box<dyn std::io::Read>>>> {\n let r: Box<dyn Read> = if path.as_ref().ends_with('.gz') {\n Box::new(\n fs::File::open(&path)\n .map(BufReader::new)\n .map(MultiGzDecoder::new)?,\n )\n } else {\n Box::new(fs::File::open(&path).map(BufReader::new)?)\n };\n Ok(fastq::Reader::new(r))\n }\n\n fn writer<P: AsRef<Path>>(path: P) -> Result<fastq::Writer<Box<dyn std::io::Write>>> {\n let w: Box<dyn Write> = if path.as_ref().ends_with('.gz') {\n Box::new(\n fs::File::create(&path)\n .map(BufWriter::new)\n .map(|w| GzEncoder::new(w, Compression::default()))?,\n )\n } else {\n Box::new(fs::File::create(&path).map(BufWriter::new)?)\n };\n Ok(fastq::Writer::new(w))\n }\n\n CallNonOverlappingConsensusRead::new(\n &mut reader(fq1)?,\n &mut reader(fq2)?,\n &mut writer(fq1_out)?,\n &mut writer(fq2_out)?,\n umi_len,\n seq_dist,\n umi_dist,\n reverse_umi,\n verbose_read_names,\n )\n .call_consensus_reads()\n }\n Some(fq3_path) => {\n eprintln!(\n 'Reading input files:\n {}\n {}',\n fq1.as_ref().display(),\n fq2.as_ref().display()\n );\n eprintln!(\n 'Writing output to:\n {}\n {}\n {}',\n fq1_out.as_ref().display(),\n fq2_out.as_ref().display(),\n fq3_path.as_ref().display()\n );\n match (fq1.as_ref().ends_with('.gz'), fq2.as_ref().ends_with('.gz'), fq1_out.as_ref().ends_with('.gz'), fq2_out.as_ref().ends_with('.gz'), fq3_path.as_ref().ends_with('.gz')) {\n (false, false, false, false, false) => CallOverlappingConsensusRead::new(\n &mut fastq::Reader::from_file(fq1)?,\n &mut fastq::Reader::from_file(fq2)?,\n &mut fastq::Writer::to_file(fq1_out)?,\n &mut fastq::Writer::to_file(fq2_out)?,\n &mut fastq::Writer::to_file(fq3_path)?,\n umi_len,\n seq_dist,\n umi_dist,\n insert_size.unwrap(),\n std_dev.unwrap(),\n reverse_umi,\n verbose_read_names,\n ).call_consensus_reads(),\n (true, true, false, false, false) => CallOverlappingConsensusRead::new(\n &mut fastq::Reader::new(fs::File::open(fq1).map(BufReader::new).map(MultiGzDecoder::new)?),\n &mut fastq::Reader::new(fs::File::open(fq2).map(BufReader::new).map(MultiGzDecoder::new)?),\n &mut fastq::Writer::to_file(fq1_out)?,\n &mut fastq::Writer::to_file(fq2_out)?,\n &mut fastq::Writer::to_file(fq3_path)?,\n umi_len,\n seq_dist,\n umi_dist,\n insert_size.unwrap(),\n std_dev.unwrap(),\n reverse_umi,\n verbose_read_names,\n ).call_consensus_reads(),\n (false, false, true, true, true) => CallOverlappingConsensusRead::new(\n &mut fastq::Reader::from_file(fq1)?,\n &mut fastq::Reader::from_file(fq2)?,\n &mut fastq::Writer::new(GzEncoder::new(fs::File::create(fq1_out)?, Compression::default())),\n &mut fastq::Writer::new(GzEncoder::new(fs::File::create(fq2_out)?, Compression::default())),\n &mut fastq::Writer::new(GzEncoder::new(fs::File::create(fq3_path)?, Compression::default())),\n umi_len,\n seq_dist,\n umi_dist,\n insert_size.unwrap(),\n std_dev.unwrap(),\n reverse_umi,\n verbose_read_names,\n ).call_consensus_reads(),\n (true, true, true, true, true) => CallOverlappingConsensusRead::new(\n &mut fastq::Reader::new(fs::File::open(fq1).map(BufReader::new).map(MultiGzDecoder::new)?),\n &mut fastq::Reader::new(fs::File::open(fq2).map(BufReader::new).map(MultiGzDecoder::new)?),\n &mut fastq::Writer::new(GzEncoder::new(fs::File::create(fq1_out)?, Compression::default())),\n &mut fastq::Writer::new(GzEncoder::new(fs::File::create(fq2_out)?, Compression::default())),\n &mut fastq::Writer::new(GzEncoder::new(fs::File::create(fq3_path)?, Compression::default())),\n umi_len,\n seq_dist,\n umi_dist,\n insert_size.unwrap(),\n std_dev.unwrap(),\n reverse_umi,\n verbose_read_names,\n ).call_consensus_reads(),\n _ => panic!('Invalid combination of files. Each pair of files (input and output) need to be both gzipped or both not zipped.')\n }\n }\n }\n}\n
mit
rust-bio-tools
./rust-bio-tools/src/fastq/mod.rs
//! Tools that work on FASTQ files\npub mod collapse_reads_to_fragments;\npub mod filter;\npub mod split;\n
mit
rust-bio-tools
./rust-bio-tools/src/fastq/filter.rs
//! Filter reads matching names in a text file into a new FASTQ file.\n//!\n//! ## Usage:\n//!\n//! Extract the read with identifier `A` from `test.fastq` into a new file `filtered.fastq`\n//! ```bash\n//! $ cat ids.txt\n//! A\n//!\n//! $ cat test.fastq\n//! @A\n//! ACTCTATCTA\n//! +\n//! !!!!!!!!!!\n//! @B\n//! CTCTATCTCTA\n//! +\n//! !!!!!!!!!!!\n//!\n//! $ rbt fastq-filter ids.txt < test.fastq > filtered.fastq\n//!\n//! $ cat filtered.fastq\n//! @A\n//! ACTCTATCTA\n//! +\n//! !!!!!!!!!!\n//! ```\n//!\nuse anyhow::Result;\nuse bio::io::fastq;\nuse bio::io::fastq::FastqRead;\nuse std::collections::HashSet;\nuse std::fs::File;\nuse std::io::{self, BufRead, BufReader};\nuse std::iter::FromIterator;\nuse std::path::Path;\n\npub fn filter<P: AsRef<Path>>(ids_path: P) -> Result<()> {\n let mut reader = fastq::Reader::new(io::stdin());\n let mut writer = fastq::Writer::new(io::stdout());\n let f = File::open(ids_path)?;\n let f = BufReader::new(f);\n let ids =\n HashSet::<String>::from_iter(f.lines().filter_map(Result::ok).collect::<Vec<String>>());\n\n let mut record = fastq::Record::new();\n\n loop {\n reader.read(&mut record)?;\n if record.is_empty() {\n return Ok(());\n }\n if !ids.contains(record.id()) {\n writer.write_record(&record)?;\n }\n }\n}\n
mit
rust-bio-tools
./rust-bio-tools/src/sequences_stats.rs
//! Compute statics on sequences from stdin:\n//! - min: length of shortest sequence\n//! - max: length of longest sequence\n//! - average: average length of sequence\n//! - median: median length of sequence\n//! - nb_reads: number of reads\n//! - nb_bases: number of bases\n//! - n50: N50 of sequences\n//!\n//! Output is in yaml format\n//!\n//! ## Usage:\n//!\n//! ```\n//! $ rbt sequences-stats < A.fasta\n//! $ rbt sequences-stats -q < A.fastq\n//! ```\n\nuse anyhow::{bail, Result};\nuse bio::io::{fasta, fastq};\nuse std::io;\nuse thiserror::Error;\n\npub fn stats(fastq: bool) -> Result<()> {\n let mut lengths = if fastq {\n fastq_lengths()\n } else {\n fasta_lengths()\n };\n\n if lengths.is_empty() {\n bail!(InputError::NoSequence);\n }\n // Sort lengths one time\n lengths.sort_unstable();\n\n let nb_bases = lengths.iter().sum::<usize>();\n\n println!(\n 'min: {min}\nmax: {max}\naverage: {average}\nmediane: {mediane}\nnumber of reads: {nb_reads}\nnumber of bases: {nb_bases}\nn50: {n50}',\n min = lengths[0], // First element is the minimal element\n max = lengths[lengths.len() - 1], // last element is the maximal element\n average = average(&lengths),\n mediane = median(&lengths),\n nb_reads = lengths.len(),\n nb_bases = nb_bases,\n n50 = n50(&lengths, nb_bases),\n );\n\n Ok(())\n}\n\nfn fasta_lengths() -> Vec<usize> {\n let reader = fasta::Reader::new(io::stdin());\n\n let mut lengths = Vec::new();\n\n let mut records = reader.records();\n while let Some(Ok(record)) = records.next() {\n lengths.push(record.seq().len());\n }\n\n lengths\n}\n\npub fn fastq_lengths() -> Vec<usize> {\n let reader = fastq::Reader::new(io::stdin());\n\n let mut lengths = Vec::new();\n\n let mut records = reader.records();\n while let Some(Ok(record)) = records.next() {\n lengths.push(record.seq().len());\n }\n\n lengths\n}\n\nfn n50(numbers: &[usize], nb_bases_total: usize) -> usize {\n let mut acc = 0;\n for val in numbers.iter() {\n acc += *val;\n if acc > nb_bases_total / 2 {\n return *val;\n }\n }\n\n numbers[numbers.len() - 1]\n}\n\nfn average(numbers: &[usize]) -> f64 {\n numbers.iter().sum::<usize>() as f64 / numbers.len() as f64\n}\n\nfn median(data: &[usize]) -> f64 {\n match data.len() {\n 0 => 0.0,\n 1 => data[0] as f64,\n len if len % 2 == 0 => {\n let v1 = data[(len / 2) - 1];\n let v2 = data[len / 2];\n (v1 + v2) as f64 / 2.0\n }\n len => data[len / 2] as f64,\n }\n}\n\n#[derive(Error, Debug)]\npub enum InputError {\n #[error('stdin didn't contain any sequence')]\n NoSequence,\n}\n
mit
rust-bio-tools
./rust-bio-tools/src/cli.rs
use crate::common::Region;\nuse std::path::PathBuf;\nuse structopt::StructOpt;\n\n#[derive(StructOpt)]\n#[structopt(\n about = 'A set of ultra-fast command line utilities for bioinformatics tasks based on Rust-Bio.',\n author = 'Johannes Köster <johannes.koester@tu-dortmund.de>',\n name = 'Rust-Bio-Tools'\n)]\npub(crate) struct Rbt {\n #[structopt(long, short, help = 'Verbose output.')]\n pub(crate) verbose: bool,\n\n #[structopt(subcommand)]\n pub(crate) cmd: Command,\n}\n\n#[derive(StructOpt)]\npub(crate) enum Command {\n /// Split FASTQ file from STDIN into N chunks.\n ///\n /// Example:\n /// rbt fastq-split A.fastq B.fastq < test.fastq\n #[structopt(author = 'Johannes Köster <johannes.koester@tu-dortmund.de>')]\n FastqSplit {\n #[structopt(parse(from_os_str), help = 'File name(s) for the chunks to create.')]\n chunks: Vec<PathBuf>,\n },\n /// Remove records from a FASTQ file (from STDIN), output to STDOUT.\n ///\n /// Example:\n /// rbt fastq-filter ids.txt < test.fastq > filtered.fastq\n #[structopt(author = 'Erik Clarke <ecl@pennmedicine.upenn.edu>')]\n FastqFilter {\n #[structopt(parse(from_os_str))]\n /// File with list of record IDs to remove, one per line.\n ids: PathBuf,\n },\n\n /// Print depth of BAM or CRAM file at given positions from STDIN (tab separated: chrom, pos).\n ///\n /// Usage:\n /// $ rbt bam-depth test.bam < pos.txt > depth.txt\n ///\n /// The positions file contains the name of one reference sequence and one position per line (tab separated).\n /// Example:\n ///\n /// 16 1\n /// 17 38\n /// 17 39\n ///\n /// Depths are written to stdout as tab-separated lines, similar to the positions input.\n /// Example:\n ///\n /// 16 1 0\n /// 17 38 14\n /// 17 39 13\n #[structopt(author = 'Johannes Köster <johannes.koester@tu-dortmund.de>')]\n BamDepth {\n /// Path to indexed BAM file.\n #[structopt(parse(from_os_str))]\n bam_path: PathBuf,\n\n /// Maximum read length to consider. This affects the speed of the involved pileup.\n /// Reads longer than this length can be missed when calculating the depth.\n #[structopt(long, short, default_value = '1000')]\n max_read_length: u32,\n\n /// Skip reads with mask bits unset [].\n #[structopt(long = 'incl-flags', short, default_value = '0')]\n include_flags: u16,\n\n /// Skip reads with mask bits set [UNMAP, SECONDARY, QCFAIL, DUP].\n #[structopt(long = 'excl-flags', short, default_value = '1796')]\n exclude_flags: u16,\n\n /// Minimum mapping quality.\n #[structopt(long, short = 'q', default_value = '0')]\n min_mapq: u8,\n },\n\n /// Convert any IUPAC codes in alleles into Ns (in order to comply with VCF 4 specs).\n /// Reads VCF/BCF from STDIN and writes BCF to STDOUT.\n ///\n /// Example:\n /// rbt vcf-fix-iupac-alleles < test.vcf > fixed.bcf\n #[structopt(author = 'Johannes Köster <johannes.koester@tu-dortmund.de>')]\n VcfFixIupacAlleles {},\n\n /// Convert VCF/BCF file from STDIN to tab-separated TXT file at STDOUT.\n /// INFO and FORMAT tags have to be selected explicitly.\n ///\n /// Example:\n /// rbt vcf-to-txt --genotypes --fmt S --info T X SOMATIC < test.vcf > variant-table.txt\n ///\n /// The resulting table can be e.g. parsed with PANDAS in Python:\n ///\n /// pd.read_table('variants.txt', header=[0, 1])\n #[structopt(author = 'Johannes Köster <johannes.koester@tu-dortmund.de>')]\n VcfToTxt {\n /// Select INFO tags\n #[structopt(long, short, value_name = 'NAME')]\n info: Vec<String>,\n\n /// Select FORMAT tags.\n #[structopt(long = 'fmt', short, value_name = 'NAME')]\n format: Vec<String>,\n\n /// Display genotypes.\n #[structopt(long, short)]\n genotypes: bool,\n\n /// Include FILTER field.\n #[structopt(long)]\n with_filter: bool,\n },\n\n /// Annotate for each variant in a VCF/BCF at STDIN whether it is contained in a\n /// given second VCF/BCF. The matching is fuzzy for indels and exact for SNVs.\n /// Results are printed as BCF to STDOUT, with an additional INFO tag MATCHING.\n /// The two vcfs do not have to be sorted.\n ///\n /// Example:\n /// rbt vcf-match dbsnp.vcf < calls.vcf | bcftools view\n #[structopt(author = 'Johannes Köster <johannes.koester@tu-dortmund.de>')]\n VcfMatch {\n /// VCF/BCF file to match against.\n #[structopt(parse(from_os_str))]\n vcf: PathBuf,\n\n /// Maximum distance between centres of two indels considered to match.\n #[structopt(long, short = 'd', value_name = 'INT', default_value = '20')]\n max_dist: u32,\n\n /// Maximum difference between lengths of two indels.\n #[structopt(long, short = 'l', value_name = 'INT', default_value = '10')]\n max_len_diff: u32,\n },\n\n /// Annotate b-allele frequency for each single nucleotide variant and sample.\n ///\n /// Example:\n /// rbt vcf-baf < calls.bcf > annotated.bcf\n #[structopt(\n author = 'Johannes Köster <johannes.koester@uni-due.de>, Jan Forster <j.forster@dkfz.de>'\n )]\n VcfBaf {},\n\n /// Looks for interacting drugs in DGIdb and annotates them for every gene in every record.\n ///\n /// Example:\n /// rbt vcf-annotate-dgidb input.vcf > output.vcf\n #[structopt(author = 'Felix Mölder <felix.moelder@uni-due.de>')]\n VcfAnnotateDgidb {\n /// VCF/BCF file to be extended by dgidb drug entries\n #[structopt()]\n vcf: String,\n\n /// Url prefix for requesting interaction drugs by gene names.\n #[structopt(\n long,\n short = 'p',\n default_value = 'http://dgidb.org/api/v2/interactions.json?genes='\n )]\n api_path: String,\n\n /// Info field name to be used for annotation.\n #[structopt(long, short = 'f', default_value = 'dgiDB_drugs')]\n field: String,\n\n /// A list of data sources included in query. If omitted all sources are considered.\n /// A list of all sources can be found at http://dgidb.org/api/v2/interaction_sources.json\n #[structopt(long, short = 's', value_name = 'STR')]\n datasources: Option<Vec<String>>,\n\n /// Number of genes to submit per api request. A lower value increases the number of api requests in return.\n /// Too many requests could be rejected by the DGIdb server.\n #[structopt(long, short = 'g', default_value = '500')]\n genes_per_request: usize,\n },\n\n /// Creates report from a given csv file containing a table with the given data\n /// Examples:\n /// With current directory as default ouput path:\n /// rbt csv-report path/to/table.csv --rows-per-page 100 --sort-column 'p-value' --sort-order ascending\n #[structopt(author = 'Felix Wiegand <felix.wiegand@tu-dortmund.de>')]\n CsvReport {\n /// CSV file including the data for the report.\n #[structopt()]\n csv_path: String,\n\n /// Sets the numbers of rows of each table per page. Default is 100.\n #[structopt(long, short = 'r', default_value = '100')]\n rows_per_page: u32,\n\n /// Column that the data should be sorted by.\n #[structopt(long, short = 'c')]\n sort_column: Option<String>,\n\n /// Order the data ascending or descending. Default is descending.\n #[structopt(long, short = 'o', default_value = 'descending', possible_values = &['ascending','descending'])]\n sort_order: String,\n\n /// Change the separator of the csv file to tab or anything else. Default is ','.\n #[structopt(long, short = 's', default_value = ',')]\n separator: char,\n\n /// Configure a custom formatter function for each column by providing a file containing a javascript object with csv column title as the key and a format function as the value.\n /// More information on the formatting functions and how to use them here: https://bootstrap-table.com/docs/api/column-options/#formatter.\n #[structopt(long, short = 'f')]\n formatter: Option<String>,\n\n /// Pins the table until the given column such that scrolling to the right does not hide the given column and those before.\n #[structopt(long, short = 'p')]\n pin_until: Option<String>,\n\n /// Relative output path for the report files. Default value is the current directory.\n #[structopt(default_value = '.')]\n output_path: String,\n },\n\n #[structopt(verbatim_doc_comment)]\n /// Creates a html file with a vega visualization of the given bam region that is then written to stdout.\n ///\n /// EXAMPLE:\n /// rbt plot-bam -b input.bam -g 2:132424-132924 -r input.fa > plot.html\n #[structopt(\n author = 'Felix Wiegand <felix.wiegand@tu-dortmund.de>',\n usage = 'rbt plot-bam [OPTIONS] --bam-path <bam-path>... --reference <reference> --region <region> > plot.html'\n )]\n PlotBam {\n /// BAM file to be visualized.\n #[structopt(long, short = 'b', required = true, parse(from_os_str))]\n bam_path: Vec<PathBuf>,\n\n /// Path to the reference fasta file.\n #[structopt(long, short = 'r', parse(from_os_str))]\n reference: PathBuf,\n\n /// Chromosome and region for the visualization. Example: 2:132424-132924\n #[structopt(long, short = 'g')]\n region: Region,\n\n /// Set the maximum rows that will be shown in the alignment plots.\n #[structopt(long, short = 'd', default_value = '500')]\n max_read_depth: u32,\n },\n\n /// Creates report from a given VCF file including a visual plot\n /// for every variant with the given BAM and FASTA file.\n /// The VCF file has to be annotated with VEP, using the options --hgvs and --hgvsg.\n ///\n /// Examples:\n /// With current directory as default ouput path:\n /// rbt vcf-report fasta.fa --vcfs a=a.vcf b=b.vcf --bams a:sample1=a.bam b:sample1=b.bam\n /// With custom directory as default ouput path:\n /// rbt vcf-report fasta.fa --vcfs a=a.vcf b=b.vcf --bams a:sample1=a.bam b:sample1=b.bam -- my/output/path/\n /// With custom info tags in table report:\n /// rbt vcf-report fasta.fa --vcfs a=a.vcf b=b.vcf --bams a:sample1=a.bam b:sample1=b.bam --info PROB_SOMATIC PROB_GERMLINE\n #[structopt(\n author = 'Johannes Köster <johannes.koester@uni-due.de>, Felix Wiegand <felix.wiegand@tu-dortmund.de>'\n )]\n VcfReport {\n /// FASTA file containing the reference genome for the visual plot\n #[structopt()]\n fasta: String,\n\n /// VCF files to include (multi-sample). Group is the name that will be used in the oncoprint. There needs to be one corresponding BAM file for each sample of a VCF/BCF file. Please only use VCF/BCF files annotated by VEP.\n #[structopt(long, short = 'v', value_name = 'GROUP=VCF_FILE')]\n vcfs: Vec<String>,\n\n /// VCF files to include (multi-sample). Group is the name that will be used in the oncoprint. There needs to be one corresponding BAM file for each sample of a VCF/BCF file. Please only use VCF/BCF files annotated by VEP.\n #[structopt(long, short = 'b', value_name = 'GROUP:SAMPLE=BAM_FILE')]\n bams: Vec<String>,\n\n /// Set the maximum number of cells in the oncoprint per page. Lowering max-cells should improve the performance of the plots in the browser. Default value is 1000.\n #[structopt(long, short = 'c', default_value = '1000')]\n cells: u32,\n\n /// Set the maximum lines of reads that will be shown in the alignment plots. Default value is 500.\n #[structopt(long, short = 'd', default_value = '500')]\n max_read_depth: u32,\n\n /// Add custom values from the info field to each variant as a data attribute to access them via the custom javascript. Multiple fields starting with the same prefix can be added by placing '*' at the end of a prefix.\n #[structopt(long, short = 'i', value_name = 'INFO_TAG')]\n infos: Option<Vec<String>>,\n\n /// Add custom values from the format field to each variant as a data attribute to access them via the custom javascript. All given format values will also be inserted into the main table.\n #[structopt(long, short = 'f', value_name = 'FORMAT_TAG')]\n formats: Option<Vec<String>>,\n\n /// Add multiple keys from the info field of your vcf to the plots of the first and second stage of the report.\n #[structopt(long, value_name = 'PLOT_INFO')]\n plot_info: Option<Vec<String>>,\n\n /// Change the default javascript file for the table-report to a custom one to add own plots or tables to the sidebar by appending these to an empty div in the HTML template.\n #[structopt(long, short = 'j', value_name = 'JS_FILE_PATH')]\n custom_js_template: Option<String>,\n\n /// Add one or multiple js file (e.g. libraries) for usage in the custom-js-file. The ordering of the arguments will be the same as they will be imported.\n #[structopt(long, short = 'l', value_name = 'JS_FILE_PATH')]\n custom_js_files: Option<Vec<String>>,\n\n /// Add a TSV file that contains one or multiple custom values for each sample for the oncoprint. First column has to be the sample name, followed by one or more columns with custom values. Make sure you include one row for each given sample.\n #[structopt(long, short = 't', value_name = 'TSV_FILE_PATH')]\n tsv: Option<String>,\n\n /// Sets the number of threads used to build the table reports.\n #[structopt(long, default_value = '0')]\n threads: usize,\n\n /// Set the name of the annotation field generated by VEP.\n #[structopt(long, short = 'a', default_value = 'ANN')]\n annotation_field: String,\n\n /// Relative output path for the report files. Default value is the current directory.\n #[structopt(default_value = '.')]\n output_path: String,\n },\n\n /// Split a given VCF/BCF file into N chunks of approximately the same size. Breakends are kept together.\n /// Output type is always BCF.\n ///\n /// Example:\n /// rbt vcf-split input.bcf output1.bcf output2.bcf output3.bcf ... outputN.bcf\n #[structopt(author = 'Johannes Köster <johannes.koester@uni-due.de>')]\n VcfSplit {\n #[structopt(parse(from_os_str), help = 'Input VCF/BCF that shall be splitted.')]\n input: PathBuf,\n\n #[structopt(\n parse(from_os_str),\n help = 'BCF files to split into. Breakends are kept together. Each file will contain approximately the same number of records.'\n )]\n output: Vec<PathBuf>,\n },\n\n /// Tool to predict maximum likelihood fragment sequence from FASTQ or BAM files.\n ///\n /// Requirements:\n /// - starcode\n #[structopt(\n author = 'Johannes Köster <johannes.koester@uni-due.de>, Henning Timm <henning.timm@tu-dortmund.de>, Felix Mölder <felix.moelder@uni-due.de>'\n )]\n CollapseReadsToFragments {\n #[structopt(subcommand)]\n cmd: CollapseReadsToFragmentsSubcommand,\n },\n\n /// Tool to build artifical reads from real BAM files with identical properties.\n #[structopt(author = 'Felix Mölder <felix.moelder@uni-due.de>')]\n BamAnonymize {\n #[structopt(parse(from_os_str), help = 'Input BAM file')]\n bam: PathBuf,\n #[structopt(parse(from_os_str), help = 'Input reference as fasta file')]\n input_ref: PathBuf,\n #[structopt(parse(from_os_str), help = 'Output BAM file with artificial reads')]\n output_bam: PathBuf,\n #[structopt(\n parse(from_os_str),\n help = 'Output fasta file with artificial reference'\n )]\n output_ref: PathBuf,\n #[structopt(help = 'chromosome name')]\n chr: String,\n #[structopt(help = '1-based start position')]\n start: u64,\n #[structopt(help = '1-based exclusive end position')]\n end: u64,\n #[structopt(\n long,\n short = 'p',\n help = 'Only simulates reads whos mates are both in defined range.'\n )]\n keep_only_pairs: bool,\n },\n\n /// Tool to compute stats on sequence file (from STDIN), output is in YAML with fields:\n /// - min: length of shortest sequence\n /// - max: length of longest sequence\n /// - average: average length of sequence\n /// - median: median length of sequence\n /// - nb_reads: number of reads\n /// - nb_bases: number of bases\n /// - n50: N50 of sequences\n ///\n /// Example:\n /// rbt sequence-stats < test.fasta\n /// rbt sequence-stats -q < test.fastq\n #[structopt(author = 'Pierre Marijon <pmarijon@mpi-inf.mpg.de>')]\n SequenceStats {\n #[structopt(\n long,\n short = 'q',\n help = 'Flag to indicate the sequence in stdin is in fastq format.'\n )]\n fastq: bool,\n },\n}\n\n#[derive(StructOpt)]\npub enum CollapseReadsToFragmentsSubcommand {\n /// Tool to merge sets of reads from paired FASTQ files that share the UMI and have similar read sequence. The result is a maximum likelihood fragment sequence per set with base quality scores improved accordingly.\n ///\n /// Takes two FASTQ files (forward and reverse) and returns two FASTQ files in which all PCR duplicates have been merged into a consensus read.\n /// Duplicates are identified by a Unique Molecular Identifier (UMI).\n ///\n /// Assumptions:\n /// - Reads are of equal length\n /// - UMI is the prefix of the reads\n ///\n /// Example:\n /// rbt collapse-reads-to-fragments fastq \\n /// reads_1.fq reads_2.fq \ # input files\n /// merged_1.fq merged_2.fq \ # output files\n /// -l 13 \ # length of UMI\n /// -d 1 \ # max hamming distance of UMIs within a cluster\n /// -D 2 \ # max hamming distance of sequences within a cluster\n /// --umi-on-reverse # UMI is the prefix of the reverse read\n #[structopt(\n author = 'Johannes Köster <johannes.koester@uni-due.de>, Henning Timm <henning.timm@tu-dortmund.de>, Felix Mölder <felix.moelder@uni-due.de>'\n )]\n Fastq {\n #[structopt(parse(from_os_str), help = 'Input FASTQ file with forward reads.')]\n fq1: PathBuf,\n\n #[structopt(parse(from_os_str), help = 'Input FASTQ file with reverse reads.')]\n fq2: PathBuf,\n\n #[structopt(parse(from_os_str), help = 'Output FASTQ file with forward reads')]\n consensus_fq1: PathBuf,\n\n #[structopt(parse(from_os_str), help = 'Output FASTQ file with reverse reads')]\n consensus_fq2: PathBuf,\n\n #[structopt(\n parse(from_os_str),\n requires_all(&['insert-size', 'std-dev']),\n help = 'Output FASTQ file for overlapping consensus reads (Required for calculating overlapping consensus only)'\n )]\n consensus_fq3: Option<PathBuf>,\n\n #[structopt(\n long,\n short = 'd',\n default_value = '1',\n help = 'Maximum hamming distance between the UMIs of any pair of reads in the same cluster.'\n )]\n max_umi_dist: usize,\n\n #[structopt(\n long,\n short = 'l',\n default_value = '8',\n help = 'Length of UMI in read.'\n )]\n umi_len: usize,\n\n #[structopt(long, short = 'D', possible_values = &['1','2','3','4','5','6','7','8'], default_value = '2', help = 'Maximum hamming distance between the sequences of any pair of reads in the same cluster.')]\n max_seq_dist: usize,\n\n #[structopt(long, short = 'u', help = 'Set if UMI is on reverse read')]\n umi_on_reverse: bool,\n\n #[structopt(\n long,\n help = 'Add list of reads that were merged for each consensus read. Note that this can yield very long FASTQ name lines which cannot be handled by some tools.'\n )]\n verbose_read_names: bool,\n\n #[structopt(\n long,\n short = 'i',\n requires = 'consensus-fq3',\n help = 'Expected insert size of sequenced fragment (Required for calculating overlapping consensus only)'\n )]\n insert_size: Option<usize>,\n\n #[structopt(\n long,\n short = 's',\n requires = 'consensus-fq3',\n help = 'Standard deviation of expected insert size. Defines search space of the most likely overlap. (Required for calculating overlapping consensus only)'\n )]\n std_dev: Option<usize>,\n },\n\n /// Tool to merge sets of PCR duplicate reads from a BAM file into one maximum likelihood fragment sequence each with accordingly improved base quality scores.\n ///\n /// Takes a BAM file and returns a BAM file in which all PCR duplicates have been merged into a consensus read.\n /// Duplicates must be marked by Picard Tools using the TAG_DUPLICATE_SET_MEMBERS option.\n ///\n /// Assumptions:\n /// - Reads are of equal length\n /// - Reads are marked by Picard Tools\n #[structopt(author = 'Felix Mölder <felix.moelder@uni-due.de>')]\n Bam {\n #[structopt(parse(from_os_str), help = 'Input BAM file with marked duplicates')]\n bam: PathBuf,\n\n #[structopt(parse(from_os_str), help = 'Output FASTQ file with forward reads')]\n consensus_fq1: PathBuf,\n\n #[structopt(parse(from_os_str), help = 'Output FASTQ file with reverse reads')]\n consensus_fq2: PathBuf,\n\n #[structopt(\n parse(from_os_str),\n help = 'Output FASTQ file for overlapping consensus reads.'\n )]\n consensus_fq_se: PathBuf,\n\n #[structopt(\n parse(from_os_str),\n help = 'Output FASTQ file for overlapping consensus reads.'\n )]\n skipped_bam: PathBuf,\n\n #[structopt(\n long,\n help = 'Add list of reads that were merged for each consensus read. Note that this can yield very long FASTQ name lines which cannot be handled by some tools.'\n )]\n verbose_read_names: bool,\n },\n}\n
mit
rust-bio-tools
./rust-bio-tools/src/csv/report.rs
use crate::bcf::report::oncoprint::WriteErr;\nuse anyhow::Context as AnyhowContext;\nuse anyhow::Result;\nuse chrono::{DateTime, Local};\nuse derive_new::new;\nuse itertools::Itertools;\nuse lz_str::compress_to_utf16;\nuse serde_derive::Serialize;\nuse serde_json::json;\nuse std::collections::{HashMap, HashSet};\nuse std::convert::TryInto;\nuse std::fs;\nuse std::fs::File;\nuse std::io::{Read, Write};\nuse std::path::Path;\nuse std::str::FromStr;\nuse tera::{Context, Tera};\nuse xlsxwriter::*;\n\ntype LookupTable = HashMap<String, HashMap<String, Vec<(String, usize, usize)>>>;\n\n#[allow(clippy::too_many_arguments)]\npub(crate) fn csv_report(\n csv_path: &str,\n output_path: &str,\n rows_per_page: usize,\n separator: char,\n sort_column: Option<&str>,\n ascending: Option<bool>,\n formatter: Option<&str>,\n pin_until: Option<&str>,\n) -> Result<()> {\n let mut rdr = csv::ReaderBuilder::new()\n .delimiter(separator as u8)\n .from_path(csv_path)?;\n\n let header = rdr.headers()?.clone();\n let titles = header.iter().collect_vec();\n let mut table = Vec::new();\n let mut numeric = HashMap::new();\n let mut non_numeric = HashMap::new();\n let mut integer = HashMap::new();\n for res in rdr.records() {\n let row = res?;\n let mut table_entry = HashMap::new();\n for (i, tile) in titles.iter().enumerate() {\n table_entry.insert(tile.to_string(), row[i].to_owned());\n match f32::from_str(&row[i]) {\n Ok(_) => {\n let num = numeric.entry(tile.to_owned()).or_insert_with(|| 0);\n *num += 1;\n if i32::from_str(&row[i]).is_ok() {\n let int = integer.entry(tile.to_owned()).or_insert_with(|| 0);\n *int += 1;\n }\n }\n _ => {\n let no_num = non_numeric.entry(tile.to_owned()).or_insert_with(|| 0);\n *no_num += 1;\n }\n }\n }\n table.push(table_entry);\n }\n\n let mut is_numeric = HashMap::new();\n for title in &titles {\n let is_num = match (numeric.get(title), non_numeric.get(title)) {\n (Some(num), Some(no_num)) => num > no_num,\n (Some(_), None) => true,\n _ => false,\n };\n is_numeric.insert(title.to_owned(), is_num);\n }\n\n let mut is_integer = HashMap::new();\n for title in &titles {\n let is_int = match (integer.get(title), non_numeric.get(title)) {\n (Some(num), Some(no_num)) => num > no_num,\n (Some(_), None) => true,\n _ => false,\n };\n is_integer.insert(title.to_owned(), is_int);\n }\n\n let mut plot_data = HashMap::new();\n let mut num_plot_data = HashMap::new();\n let mut reasonable_plot = titles.iter().map(|t| (*t, true)).collect::<HashMap<_, _>>();\n\n for title in &titles {\n match is_numeric.get(title) {\n Some(true) => {\n let plot = num_plot(&table, title.to_string());\n num_plot_data.insert(title, plot);\n }\n Some(false) => {\n if let Some(plot) = nominal_plot(&table, title.to_string()) {\n plot_data.insert(title, plot);\n } else {\n plot_data.insert(title, vec![]);\n reasonable_plot.insert(title, false);\n }\n }\n _ => unreachable!(),\n };\n }\n\n match (sort_column, ascending) {\n (Some(column), Some(true)) => table.sort_by(|a, b| {\n match (\n f32::from_str(a.get(column).unwrap()),\n f32::from_str(b.get(column).unwrap()),\n ) {\n (Ok(float_a), Ok(float_b)) => float_a.partial_cmp(&float_b).unwrap(),\n _ => a.get(column).cmp(&b.get(column)),\n }\n }),\n (Some(column), Some(false)) => table.sort_by(|a, b| {\n match (\n f32::from_str(a.get(column).unwrap()),\n f32::from_str(b.get(column).unwrap()),\n ) {\n (Ok(float_a), Ok(float_b)) => float_b.partial_cmp(&float_a).unwrap(),\n _ => a.get(column).cmp(&b.get(column)),\n }\n }),\n (_, _) => {}\n }\n\n let wb = Workbook::new(&(output_path.to_owned() + '/report.xlsx'));\n let mut sheet = wb.add_worksheet(Some('Report'))?;\n for (i, title) in titles.iter().enumerate() {\n sheet.write_string(0, i.try_into()?, title, None)?;\n }\n\n for (i, row) in table.iter().enumerate() {\n for (c, title) in titles.iter().enumerate() {\n sheet.write_string(\n (i + 1).try_into()?,\n c.try_into()?,\n row.get(*title).unwrap(),\n None,\n )?;\n }\n }\n\n wb.close()?;\n\n let pages = if table.len() % rows_per_page == 0 && !table.is_empty() {\n (table.len() / rows_per_page) - 1\n } else {\n table.len() / rows_per_page\n };\n\n let plot_path = output_path.to_owned() + '/plots/';\n fs::create_dir(Path::new(&plot_path)).context(WriteErr::CantCreateDir {\n dir_path: plot_path.to_owned(),\n })?;\n\n for (n, title) in titles.iter().enumerate() {\n let mut templates = Tera::default();\n templates.add_raw_template('plot.js.tera', include_str!('plot.js.tera'))?;\n let mut context = Context::new();\n match is_numeric.get(title) {\n Some(true) => {\n context.insert(\n 'table',\n &json!(num_plot_data.get(title).unwrap()).to_string(),\n );\n context.insert('num', &true);\n }\n Some(false) => {\n context.insert('table', &json!(plot_data.get(title).unwrap()).to_string());\n context.insert('num', &false);\n }\n _ => unreachable!(),\n }\n context.insert('title', &title);\n context.insert('index', &n.to_string());\n let js = templates.render('plot.js.tera', &context)?;\n\n let file_path = plot_path.to_owned() + 'plot_' + &n.to_string() + '.js';\n let mut file = fs::File::create(file_path)?;\n file.write_all(js.as_bytes())?;\n }\n\n let index_path = output_path.to_owned() + '/indexes/';\n fs::create_dir(Path::new(&index_path)).context(WriteErr::CantCreateDir {\n dir_path: index_path.to_owned(),\n })?;\n\n let data_path = output_path.to_owned() + '/data/';\n fs::create_dir(Path::new(&data_path)).context(WriteErr::CantCreateDir {\n dir_path: data_path.to_owned(),\n })?;\n\n let mut prefixes = make_prefixes(\n table\n .clone()\n .into_iter()\n .map(|hm| {\n hm.into_iter()\n .filter(|(k, _)| !is_numeric.get(k.as_str()).unwrap())\n .collect()\n })\n .collect(),\n titles\n .clone()\n .into_iter()\n .filter(|e| !is_numeric.get(e).unwrap())\n .collect(),\n rows_per_page,\n );\n\n let bin = make_bins(\n table\n .clone()\n .into_iter()\n .map(|hm| {\n hm.into_iter()\n .filter(|(k, _)| {\n *is_numeric.get(k.as_str()).unwrap() && !is_integer.get(k.as_str()).unwrap()\n })\n .collect()\n })\n .collect(),\n titles\n .clone()\n .into_iter()\n .filter(|e| *is_numeric.get(e).unwrap() && !is_integer.get(e).unwrap())\n .collect(),\n rows_per_page,\n );\n\n let int_bin = make_bins_for_integers(\n table\n .clone()\n .into_iter()\n .map(|hm| {\n hm.into_iter()\n .filter(|(k, _)| *is_integer.get(k.as_str()).unwrap())\n .collect()\n })\n .collect(),\n titles\n .clone()\n .into_iter()\n .filter(|e| *is_integer.get(e).unwrap())\n .collect(),\n rows_per_page,\n );\n\n for (k, v) in bin.into_iter().chain(int_bin) {\n prefixes.insert(k, v);\n }\n\n let prefix_path = output_path.to_owned() + '/prefixes/';\n fs::create_dir(Path::new(&prefix_path)).context(WriteErr::CantCreateDir {\n dir_path: prefix_path.to_owned(),\n })?;\n\n for (n, title) in titles.iter().enumerate() {\n if let Some(prefix_table) = prefixes.get(title.to_owned()) {\n let mut templates = Tera::default();\n templates.add_raw_template(\n 'prefix_table.html.tera',\n include_str!('prefix_table.html.tera'),\n )?;\n let mut context = Context::new();\n context.insert('title', title);\n context.insert('index', &n.to_string());\n context.insert('table', prefix_table);\n context.insert('numeric', is_numeric.get(title).unwrap());\n let html = templates.render('prefix_table.html.tera', &context)?;\n\n let file_path = output_path.to_owned() + '/prefixes/col_' + &n.to_string() + '.html';\n let mut file = fs::File::create(file_path)?;\n file.write_all(html.as_bytes())?;\n\n let title_path = prefix_path.to_owned() + '/col_' + &n.to_string() + '/';\n fs::create_dir(Path::new(&title_path)).context(WriteErr::CantCreateDir {\n dir_path: title_path.to_owned(),\n })?;\n\n for (prefix, values) in prefix_table {\n let mut templates = Tera::default();\n templates.add_raw_template(\n 'lookup_table.html.tera',\n include_str!('lookup_table.html.tera'),\n )?;\n let mut context = Context::new();\n context.insert('title', title);\n context.insert('values', values);\n context.insert('index', &n.to_string());\n let html = templates.render('lookup_table.html.tera', &context)?;\n\n let file_path = title_path.to_owned() + prefix + '.html';\n let mut file = fs::File::create(file_path)?;\n file.write_all(html.as_bytes())?;\n }\n }\n }\n\n let formatter_object = if let Some(f) = formatter {\n let mut file_string = ''.to_string();\n let mut custom_file =\n File::open(f).context('Unable to open given file for formatting colums')?;\n custom_file\n .read_to_string(&mut file_string)\n .context('Unable to read string from formatting file')?;\n\n Some(file_string)\n } else {\n None\n };\n\n let pinned_columns = if let Some(col) = pin_until {\n titles.iter().position(|&r| r == col).context(\n 'Given value for --pin-until did not match any of the columns of your csv file',\n )? + 1\n } else {\n 0\n };\n\n let mut templates = Tera::default();\n templates.add_raw_template('csv_report.js.tera', include_str!('csv_report.js.tera'))?;\n let mut context = Context::new();\n context.insert('titles', &titles);\n context.insert('num', &is_numeric);\n context.insert('formatter', &formatter_object);\n context.insert('pinned_columns', &pinned_columns);\n context.insert('pin', &pin_until.is_some());\n\n let js = templates.render('csv_report.js.tera', &context)?;\n\n let file_path = output_path.to_owned() + '/js/csv_report.js';\n let mut file = fs::File::create(file_path)?;\n file.write_all(js.as_bytes())?;\n\n if table.is_empty() {\n let mut templates = Tera::default();\n templates.add_raw_template('csv_report.html.tera', include_str!('csv_report.html.tera'))?;\n templates.add_raw_template('data.js.tera', include_str!('data.js.tera'))?;\n let mut context = Context::new();\n context.insert('table', &table);\n context.insert('titles', &titles);\n context.insert('current_page', &1);\n context.insert('pages', &1);\n let local: DateTime<Local> = Local::now();\n context.insert('time', &local.format('%a %b %e %T %Y').to_string());\n context.insert('version', &env!('CARGO_PKG_VERSION'));\n context.insert('is_reasonable', &reasonable_plot);\n\n let data: Vec<Vec<&str>> = Vec::new();\n\n context.insert(\n 'data',\n &json!(compress_to_utf16(&json!(data).to_string())).to_string(),\n );\n\n let js = templates.render('data.js.tera', &context)?;\n let js_file_path = output_path.to_owned() + '/data/index1.js';\n let mut js_file = fs::File::create(js_file_path)?;\n js_file.write_all(js.as_bytes())?;\n\n let html = templates.render('csv_report.html.tera', &context)?;\n let file_path = output_path.to_owned() + '/indexes/index1.html';\n let mut file = fs::File::create(file_path)?;\n file.write_all(html.as_bytes())?;\n } else {\n for (i, current_table) in table.chunks(rows_per_page).enumerate() {\n let page = i + 1;\n\n let mut templates = Tera::default();\n templates\n .add_raw_template('csv_report.html.tera', include_str!('csv_report.html.tera'))?;\n templates.add_raw_template('data.js.tera', include_str!('data.js.tera'))?;\n let mut context = Context::new();\n context.insert('table', &current_table);\n context.insert('titles', &titles);\n context.insert('current_page', &page);\n context.insert('pages', &(pages + 1));\n let local: DateTime<Local> = Local::now();\n context.insert('time', &local.format('%a %b %e %T %Y').to_string());\n context.insert('version', &env!('CARGO_PKG_VERSION'));\n context.insert('is_reasonable', &reasonable_plot);\n\n let mut data = Vec::new();\n for row in current_table {\n let mut r = Vec::new();\n for title in &titles {\n r.push(row.get(*title).unwrap())\n }\n data.push(r);\n }\n\n context.insert(\n 'data',\n &json!(compress_to_utf16(&json!(data).to_string())).to_string(),\n );\n\n let html = templates.render('csv_report.html.tera', &context)?;\n let js = templates.render('data.js.tera', &context)?;\n\n let file_path = output_path.to_owned() + '/indexes/index' + &page.to_string() + '.html';\n let mut file = fs::File::create(file_path)?;\n file.write_all(html.as_bytes())?;\n\n let js_file_path = output_path.to_owned() + '/data/index' + &page.to_string() + '.js';\n let mut js_file = fs::File::create(js_file_path)?;\n js_file.write_all(js.as_bytes())?;\n }\n }\n Ok(())\n}\n\nfn num_plot(table: &[HashMap<String, String>], column: String) -> Vec<BinnedPlotRecord> {\n let mut values = Vec::new();\n let mut nan = 0;\n for row in table {\n match f32::from_str(row.get(&column).unwrap()) {\n Ok(val) => values.push(val.to_owned()),\n _ => nan += 1,\n }\n }\n let min = values.iter().fold(f32::INFINITY, |a, &b| a.min(b));\n let max = values.iter().fold(f32::NEG_INFINITY, |a, &b| a.max(b));\n let bins = 20;\n let step = (max - min) / bins as f32;\n let mut binned_data = HashMap::new();\n let mut bin_borders = HashMap::new();\n for val in values {\n for i in 0..bins {\n let lower_bound = min + i as f32 * step;\n let upper_bound = lower_bound + step;\n let bin_name = String::from('bin') + &i.to_string();\n bin_borders.insert(bin_name.to_owned(), (lower_bound, upper_bound));\n let entry = binned_data.entry(bin_name.to_owned()).or_insert_with(|| 0);\n if ((i < (bins - 1) && val < upper_bound) || (i < bins && val <= upper_bound))\n && val >= lower_bound\n {\n *entry += 1;\n }\n }\n }\n if nan > 0 {\n bin_borders.insert(\n String::from('bin') + &bins.to_string(),\n (f32::NAN, f32::NAN),\n );\n binned_data.insert(String::from('bin') + &bins.to_string(), nan);\n }\n let mut plot_data = Vec::new();\n for (name, v) in binned_data {\n let (lower_bound, upper_bound) = bin_borders.get(&name).unwrap();\n let plot_record = BinnedPlotRecord {\n bin_start: *lower_bound,\n value: v,\n bin_end: *upper_bound,\n };\n plot_data.push(plot_record);\n }\n plot_data\n}\n\nfn nominal_plot(table: &[HashMap<String, String>], column: String) -> Option<Vec<PlotRecord>> {\n let values = table\n .iter()\n .map(|row| row.get(&column).unwrap().to_owned())\n .filter(|s| !s.is_empty())\n .collect_vec();\n\n let mut count_values = HashMap::new();\n for v in values {\n let entry = count_values.entry(v.to_owned()).or_insert_with(|| 0);\n *entry += 1;\n }\n\n let mut plot_data = count_values\n .iter()\n .map(|(k, v)| PlotRecord {\n key: k.to_owned(),\n value: *v,\n })\n .collect_vec();\n\n if plot_data.len() > 10 {\n let unique_values: HashSet<_> = count_values.iter().map(|(_, v)| v).collect();\n if unique_values.len() <= 1 {\n return None;\n };\n plot_data.sort_by(|a, b| b.value.cmp(&a.value));\n plot_data = plot_data.into_iter().take(10).collect();\n }\n\n Some(plot_data)\n}\n\nfn make_prefixes(\n table: Vec<HashMap<String, String>>,\n titles: Vec<&str>,\n rows_per_page: usize,\n) -> LookupTable {\n let mut title_map = HashMap::new();\n for (i, partial_table) in table.chunks(rows_per_page).enumerate() {\n let page = i + 1;\n let prefix_len = 3;\n for (index, row) in partial_table.iter().enumerate() {\n for key in &titles {\n let value = &row[key.to_owned()].trim().to_owned();\n if !value.is_empty() {\n let entry = value.split_whitespace().take(1).collect_vec()[0];\n if entry.len() >= prefix_len {\n let prefix = entry.chars().take(prefix_len).collect::<String>();\n let prefix_map = title_map\n .entry(key.to_string())\n .or_insert_with(HashMap::new);\n let values = prefix_map.entry(prefix).or_insert_with(Vec::new);\n values.push((value.to_owned(), page, index));\n }\n }\n }\n }\n // write stuff to output map with page like so: HashMap<column_title, HashMap<prefix, Vec<(value, page, index)>>>\n }\n title_map\n}\n\nfn make_bins(\n table: Vec<HashMap<String, String>>,\n titles: Vec<&str>,\n rows_per_page: usize,\n) -> LookupTable {\n let mut title_map = HashMap::new();\n for title in titles {\n let mut values = Vec::new();\n for row in &table {\n if let Ok(val) = f32::from_str(row.get(title).unwrap()) {\n values.push(val.to_owned())\n }\n }\n let min = values.iter().fold(f32::INFINITY, |a, &b| a.min(b));\n let max = values.iter().fold(f32::NEG_INFINITY, |a, &b| a.max(b));\n let bins = 20;\n let step = (max - min) / bins as f32;\n let mut bin_data = HashMap::new();\n for val in values {\n for i in 0..bins {\n let lower_bound = min + i as f32 * step;\n let upper_bound = lower_bound + step;\n let bin_name = lower_bound.to_string() + '-' + &upper_bound.to_string();\n let entry = bin_data\n .entry(bin_name.to_owned())\n .or_insert_with(HashSet::new);\n if ((i < (bins - 1) && val < upper_bound) || (i < bins && val <= upper_bound))\n && val >= lower_bound\n {\n entry.insert(val.to_string());\n }\n }\n }\n\n let mut value_on_page = HashMap::new();\n for (i, partial_table) in table.chunks(rows_per_page).enumerate() {\n let page = i + 1;\n for (index, row) in partial_table.iter().enumerate() {\n if let Ok(val) = f32::from_str(row.get(title).unwrap()) {\n let entry = value_on_page\n .entry(val.to_string())\n .or_insert_with(HashSet::new);\n entry.insert((page, index));\n }\n }\n // write stuff to output map with page like so: HashMap<column_title, HashMap<bin, Vec<(value, page, index)>>>\n }\n let mut bin_map = HashMap::new();\n for (bin, values) in bin_data {\n for v in values {\n let entry = bin_map.entry(bin.to_string()).or_insert_with(Vec::new);\n for (page, index) in value_on_page.get(&v).unwrap() {\n entry.push((v.to_string(), *page, *index));\n }\n }\n }\n title_map.insert(title.to_string(), bin_map);\n }\n\n title_map\n}\n\nfn make_bins_for_integers(\n table: Vec<HashMap<String, String>>,\n titles: Vec<&str>,\n rows_per_page: usize,\n) -> LookupTable {\n let mut title_map = HashMap::new();\n for title in titles {\n let mut values = Vec::new();\n for row in &table {\n if let Ok(val) = i32::from_str(row.get(title).unwrap()) {\n values.push(val.to_owned())\n }\n }\n let min = *values.iter().min().unwrap();\n let max = *values.iter().max().unwrap();\n let bins = 20;\n let step = if max - min <= 20 {\n 1\n } else {\n (max - min) / bins\n };\n let mut bin_data = HashMap::new();\n for val in values {\n for i in 0..bins {\n let lower_bound = min + i * step;\n let upper_bound = if i == bins { max } else { lower_bound + step };\n let bin_name = lower_bound.to_string() + '-' + &upper_bound.to_string();\n let entry = bin_data\n .entry(bin_name.to_owned())\n .or_insert_with(HashSet::new);\n if ((i < (bins - 1) && val < upper_bound) || (i < bins && val <= upper_bound))\n && val >= lower_bound\n {\n entry.insert(val.to_string());\n }\n }\n }\n\n let mut value_on_page = HashMap::new();\n for (i, partial_table) in table.chunks(rows_per_page).enumerate() {\n let page = i + 1;\n for (index, row) in partial_table.iter().enumerate() {\n if let Ok(val) = i32::from_str(row.get(title).unwrap()) {\n let entry = value_on_page\n .entry(val.to_string())\n .or_insert_with(HashSet::new);\n entry.insert((page, index));\n }\n }\n // write stuff to output map with page like so: HashMap<column_title, HashMap<bin, Vec<(value, page, index)>>>\n }\n let mut bin_map = HashMap::new();\n for (bin, values) in bin_data {\n for v in values {\n let entry = bin_map.entry(bin.to_string()).or_insert_with(Vec::new);\n for (page, index) in value_on_page.get(&v).unwrap() {\n entry.push((v.to_string(), *page, *index));\n }\n }\n }\n title_map.insert(title.to_string(), bin_map);\n }\n\n title_map\n}\n\n#[derive(new, Serialize, Debug, Clone)]\nstruct PlotRecord {\n key: String,\n value: u32,\n}\n\n#[derive(new, Serialize, Debug, Clone)]\nstruct BinnedPlotRecord {\n bin_start: f32,\n bin_end: f32,\n value: u32,\n}\n
mit
rust-bio-tools
./rust-bio-tools/src/csv/mod.rs
//! Tools that work on CSV files.\npub mod report;\n
mit
rust-bio-tools
./rust-bio-tools/src/main.rs
//! Documentation for Rust Bio Tools\nuse anyhow::{Context, Result};\nuse itertools::Itertools;\nuse log::LevelFilter;\nuse rayon::prelude::*;\nuse std::collections::HashMap;\nuse std::fs;\nuse std::path::Path;\nuse structopt::StructOpt;\n\nuse cli::Command::*;\n\npub mod bam;\npub mod bcf;\nmod cli;\npub mod common;\npub mod csv;\npub mod fastq;\npub mod sequences_stats;\n\nfn main() -> Result<()> {\n let args = cli::Rbt::from_args();\n\n fern::Dispatch::new()\n .format(|out, message, _| out.finish(format_args!('{}', message)))\n .level(if args.verbose {\n LevelFilter::Debug\n } else {\n LevelFilter::Info\n })\n .chain(std::io::stderr())\n .apply()\n .unwrap();\n\n match args.cmd {\n FastqSplit { chunks } => {\n fastq::split::split(&chunks.iter().map(|p| p.to_str().unwrap()).collect_vec())?\n }\n FastqFilter { ids } => fastq::filter::filter(&ids).unwrap(),\n BamDepth {\n bam_path,\n max_read_length,\n include_flags,\n exclude_flags,\n min_mapq,\n } => bam::depth::depth(\n &bam_path,\n max_read_length,\n include_flags,\n exclude_flags,\n min_mapq,\n )?,\n VcfToTxt {\n info,\n format,\n genotypes,\n with_filter,\n } => bcf::to_txt::to_txt(\n info.iter().map(|s| s as &str).collect_vec().as_slice(),\n format.iter().map(|s| s as &str).collect_vec().as_slice(),\n genotypes,\n with_filter,\n )?,\n VcfMatch {\n vcf,\n max_dist,\n max_len_diff,\n } => bcf::match_variants::match_variants(vcf, max_dist, max_len_diff)?,\n VcfBaf {} => bcf::baf::calculate_baf()?,\n VcfFixIupacAlleles {} => bcf::fix_iupac_alleles::fix_iupac_alleles()?,\n VcfAnnotateDgidb {\n vcf,\n api_path,\n field,\n datasources,\n genes_per_request,\n } => bcf::annotate_dgidb::annotate_dgidb(\n vcf,\n api_path,\n &*field,\n datasources.as_deref(),\n genes_per_request,\n )?,\n CsvReport {\n csv_path,\n rows_per_page,\n sort_column,\n sort_order,\n separator,\n formatter,\n pin_until,\n output_path,\n } => {\n if !Path::new(&output_path).exists() {\n fs::create_dir_all(Path::new(&output_path))?;\n }\n bcf::report::embed_js(&output_path, false, None, vec![])?;\n bcf::report::embed_css(&output_path, false)?;\n bcf::report::embed_html(&output_path)?;\n\n let order = match sort_order.as_str() {\n 'ascending' => Some(true),\n 'descending' => Some(false),\n _ => None,\n };\n\n csv::report::csv_report(\n &csv_path,\n &output_path,\n rows_per_page as usize,\n separator,\n sort_column.as_deref(),\n order,\n formatter.as_deref(),\n pin_until.as_deref(),\n )?\n }\n PlotBam {\n bam_path,\n reference,\n region,\n max_read_depth,\n } => bam::plot::plot_bam::plot_bam(&bam_path, reference, &region, max_read_depth)?,\n VcfReport {\n fasta,\n vcfs,\n bams,\n cells,\n max_read_depth,\n infos,\n formats,\n plot_info,\n custom_js_template,\n custom_js_files,\n tsv,\n threads,\n annotation_field,\n output_path,\n } => {\n let mut sample_calls = HashMap::new();\n let mut bam_paths = HashMap::new();\n if !Path::new(&output_path).exists() {\n fs::create_dir(Path::new(&output_path)).context(format!(\n 'Couldn't create output directory at {}. Please make sure the path exists.',\n output_path\n ))?;\n }\n let js_files_vec = custom_js_files\n .clone()\n .map_or_else(Vec::new, |values| values.into_iter().collect());\n let js_file_names = if let Some(files) = custom_js_files {\n files\n .iter()\n .map(|f| {\n f.split('/')\n .collect_vec()\n .pop()\n .unwrap_or_else(|| {\n panic!('Unable to extract file name from path: {:?}', f)\n })\n .to_owned()\n })\n .collect()\n } else {\n vec![]\n };\n bcf::report::embed_js(\n &output_path,\n true,\n custom_js_template.as_deref(),\n js_files_vec,\n )?;\n bcf::report::embed_css(&output_path, true)?;\n bcf::report::embed_html(&output_path)?;\n let detail_path = output_path.to_owned() + '/details/';\n fs::create_dir(Path::new(&detail_path))?;\n for vcf in vcfs {\n let v: Vec<_> = vcf.split('=').collect();\n match sample_calls.insert(v[0].to_owned(), v[1].to_owned()) {\n None => {}\n _ => panic!('Found duplicate sample name {}. Please make sure the provided sample names are unique.', v[0].to_owned())\n }\n }\n for bam in bams {\n let b: Vec<_> = bam.split('=').collect();\n let c: Vec<_> = b[0].split(':').collect();\n let rec = bam_paths.entry(c[0].to_owned()).or_insert_with(Vec::new);\n rec.push((c[1].to_owned(), b[1].to_owned()))\n }\n\n rayon::ThreadPoolBuilder::new()\n .num_threads(threads)\n .build_global()?;\n\n sample_calls.par_iter().for_each(|(sample, sample_call)| {\n bcf::report::table_report::table_report(\n sample_call,\n &fasta,\n bam_paths\n .get(sample)\n .unwrap_or_else(|| panic!('No bam provided for sample {}', sample)),\n &output_path,\n sample,\n infos.clone(),\n formats.clone(),\n max_read_depth,\n js_file_names.clone(),\n &annotation_field,\n )\n .unwrap_or_else(|e| {\n panic!('Failed building table report for sample {}. {}', sample, e)\n });\n });\n\n bcf::report::oncoprint::oncoprint(\n &sample_calls,\n &output_path,\n cells,\n tsv.as_deref(),\n plot_info,\n &annotation_field,\n )?\n }\n VcfSplit { input, output } => bcf::split::split(input, output.as_ref())?,\n CollapseReadsToFragments { cmd } => match cmd {\n cli::CollapseReadsToFragmentsSubcommand::Fastq {\n fq1,\n fq2,\n consensus_fq1,\n consensus_fq2,\n consensus_fq3,\n umi_len,\n max_seq_dist,\n max_umi_dist,\n umi_on_reverse,\n verbose_read_names,\n insert_size,\n std_dev,\n } => fastq::collapse_reads_to_fragments::call_consensus_reads_from_paths(\n fq1,\n fq2,\n consensus_fq1,\n consensus_fq2,\n consensus_fq3,\n umi_len,\n max_seq_dist,\n max_umi_dist,\n umi_on_reverse,\n verbose_read_names,\n insert_size,\n std_dev,\n )?,\n cli::CollapseReadsToFragmentsSubcommand::Bam {\n bam,\n consensus_fq1,\n consensus_fq2,\n consensus_fq_se,\n skipped_bam,\n verbose_read_names,\n } => bam::collapse_reads_to_fragments::call_consensus_reads_from_paths(\n bam,\n consensus_fq1,\n consensus_fq2,\n consensus_fq_se,\n skipped_bam,\n verbose_read_names,\n )?,\n },\n BamAnonymize {\n bam,\n input_ref,\n output_bam,\n output_ref,\n chr,\n start,\n end,\n keep_only_pairs,\n } => bam::anonymize_reads::anonymize_reads(\n bam,\n input_ref,\n output_bam,\n output_ref,\n chr,\n start - 1..end - 1,\n keep_only_pairs,\n )?,\n SequenceStats { fastq } => sequences_stats::stats(fastq)?,\n }\n Ok(())\n}\n
mit
rust-bio-tools
./rust-bio-tools/src/bcf/split.rs
use std::collections::HashMap;\nuse std::path::Path;\n\nuse anyhow::Context;\nuse anyhow::Result;\nuse itertools::Itertools;\nuse rust_htslib::bcf;\nuse rust_htslib::bcf::Read;\n\npub fn split<P: AsRef<Path>>(input_bcf: P, output_bcfs: &[P]) -> Result<()> {\n let n_records = bcf::Reader::from_path(input_bcf.as_ref())\n .context('error reading input VCF/BCF')?\n .records()\n .fold(0_u64, |count, _| count + 1);\n let mut reader = bcf::Reader::from_path(input_bcf).context('error reading input VCF/BCF')?;\n let header = bcf::Header::from_template(reader.header());\n let mut bnd_cache = HashMap::new();\n\n let chunk_size = n_records / output_bcfs.len() as u64;\n\n let mut writers = output_bcfs\n .iter()\n .map(|path| {\n bcf::Writer::from_path(path, &header, false, bcf::Format::Bcf)\n .context('error creating output VCF/BCF')\n })\n .collect::<Result<Vec<_>>>()?;\n\n for (rec, i) in reader.records().zip(0..) {\n let rec = rec?;\n\n let mut chunk = i / (chunk_size + 1);\n if rec.is_bnd() {\n if let Some(group) = BreakendGroup::from(&rec) {\n let event_chunk = match group {\n BreakendGroup::Event(id) => bnd_cache.entry(id).or_insert(chunk),\n BreakendGroup::Mates(ids) => {\n let ids = ids.clone();\n bnd_cache.entry(ids.concat()).or_insert(chunk)\n }\n };\n chunk = *event_chunk;\n }\n };\n let writer = &mut writers[chunk as usize];\n writer.write(&rec)?;\n }\n\n Ok(())\n}\n\n#[derive(Eq, PartialEq, Hash, Clone, Debug)]\nenum BreakendGroup {\n Event(Vec<u8>),\n Mates(Vec<Vec<u8>>),\n}\n\nimpl BreakendGroup {\n fn from(rec: &bcf::Record) -> Option<Self> {\n if let Some(event) = rec.event() {\n Some(BreakendGroup::Event(event))\n } else if let Some(mut mates) = rec.mateids() {\n let id = rec.id();\n mates.push(id);\n mates.sort();\n Some(BreakendGroup::Mates(mates))\n } else {\n None\n }\n }\n}\n\ntype Id = Vec<u8>;\n\ntrait BndRecord {\n fn is_bnd(&self) -> bool;\n fn event(&self) -> Option<Id>;\n fn mateids(&self) -> Option<Vec<Id>>;\n}\n\nimpl BndRecord for bcf::Record {\n fn is_bnd(&self) -> bool {\n self.info(b'SVTYPE').string().map_or(false, |entries| {\n entries.map_or(false, |entries| entries[0] == b'BND')\n })\n }\n\n fn event(&self) -> Option<Id> {\n if let Ok(Some(event)) = self.info(b'EVENT').string() {\n Some(event[0].to_owned())\n } else {\n None\n }\n }\n\n fn mateids(&self) -> Option<Vec<Id>> {\n match self.info(b'MATEID').string() {\n Ok(Some(s)) => Some(s.clone().into_iter().map(|v| v.to_vec()).collect_vec()),\n _ => None,\n }\n }\n}\n
mit
git clone https://github.com/rust-bio/rust-bio-tools

rm -f RustBioGPT-validate.csv && for i in `find . -name "*.rs"`;do paste -d "," <(echo "rust-bio-tools"|perl -pe "s/(.+)/\"\1\"/g") <(echo $i|perl -pe "s/(.+)/\"\1\"/g") <(perl -pe "s/\n/\\\n/g" $i|perl -pe s"/\"/\'/g" |perl -pe "s/(.+)/\"\1\"/g") <(echo "mit"|perl -pe "s/(.+)/\"\1\"/g") >> RustBioGPT-validate.csv; done

sed -i '1i "repo_name","path","content","license"' RustBioGPT-validate.csv
Downloads last month
1
Edit dataset card