SciCode-Domain-Code / data /dataset_Codon.csv
SciCodePile's picture
Upload data/dataset_Codon.csv with huggingface_hub
05c598f verified
raw
history blame
405 kB
"keyword","repo_name","file_path","file_extension","file_size","line_count","content","language"
"Codon","LKremer/MSA_trimmer","alignment_trimmer.py",".py","9112","221","#!/usr/bin/env python3
from __future__ import division, print_function
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import SingleLetterAlphabet
import argparse
import os
def gap_proportion(msa_column):
gap_counter = 0
for aa_or_codon in msa_column:
if aa_or_codon in ('-', '---'):
gap_counter += 1
return gap_counter / len(msa_column)
class Alignment():
def __init__(self, alignment_fasta, maxgap,
trim_positions, mask, is_cds=False):
self._is_cds = is_cds
self._letter_n = 3 if is_cds else 1
self._maxgap = maxgap
self._fasta = []
self._trimmed_fasta = []
with open(alignment_fasta, 'r') as input_handle:
for fasta_record in SeqIO.parse(input_handle, 'fasta'):
self._fasta.append(fasta_record)
self._trimmed_fasta.append([])
self._first_seq = self._fasta[0]
self._assert_fasta_is_aligned()
self._set_mask_char(mask)
self._parse_trim_positions(trim_positions)
fname, fextension = os.path.splitext(alignment_fasta)
self._output_filepath = '{}_{}{}'.format(
fname,
""masked"" if self._mask_char else ""trimmed"",
fextension,
)
return
def _parse_trim_positions(self, trim_pos_list):
self._trim_positions = set()
if trim_pos_list:
for trim_pos in trim_pos_list:
if '-' in trim_pos: # it's a range!
start_raw, stop_raw = trim_pos.split('-')
start = int(start_raw) - 1
stop = int(stop_raw)
if start >= stop:
raise Exception('Invalid range:', trim_pos)
self._trim_positions |= set(range(start, stop))
else:
pos = self._str_to_pos(trim_pos)
self._trim_positions.add(pos)
return
def _str_to_pos(self, pos_str):
try:
pos = int(pos_str) - 1 # account for zero-indexing
except ValueError:
raise Exception(pos_str, 'is not a valid position!')
self[pos] # raises IndexError when pos is out of range of the MSA
return pos
def _set_mask_char(self, mask_param):
if mask_param:
if len(mask_param) != 1:
raise Exception('Invalid --mask character:', mask_param)
self._mask_char = mask_param * self._letter_n
else:
self._mask_char = False
return
def _assert_fasta_is_aligned(self):
for fa_entry in self._fasta[1:]:
if len(fa_entry) != len(self._first_seq):
raise Exception('The sequences {} and {} have different '
'lengths. Is your fasta really aligned'
'?'.format(fa_entry.id, self._first_seq.id))
return
def __getitem__(self, i):
''' returns the n-th column of the multifasta, works for
CDS and protein fastas '''
n = self._letter_n # the number of characters per position
# three for CDS sequences (codons), one for protein sequences
start = i * n
end = start + n
if start < 0 or end > len(self._first_seq):
raise IndexError('Cannot access MSA position {} - index out '
'of range'.format(i))
return tuple(str(fa.seq[start:end]) for fa in self._fasta)
def __iter__(self):
''' column-based iteration over MSA fasta, works for
CDS and protein fastas '''
for i in range(int(len(self._first_seq) / self._letter_n)):
yield self[i]
def _trim_column(self, msa_column, col_index):
''' trim an MSA column if the number of gaps exceeds the user-specified
threshold, or if it is within the user-specified trim-range '''
col_str = ' '.join(msa_column)
gap_prop = gap_proportion(msa_column)
if (col_index in self._trim_positions) or (gap_prop > self._maxgap):
if self._mask_char:
action = ' - masked'
else:
action = ' - trimmed'
trim_column = True
else:
action = ''
trim_column = False
for row_i, aa_or_codon in enumerate(msa_column):
if trim_column: # trim (or mask) the column
if self._mask_char: # mask it
self._trimmed_fasta[row_i].append(self._mask_char)
else: # don't add the column at all
break
else: # keep the column as it was
self._trimmed_fasta[row_i].append(aa_or_codon)
print('{: >3} {} ({:.1%} gaps){}'.format(
col_index + 1, col_str, gap_prop, action))
return
def _trim_fasta(self):
for col_index, msa_column in enumerate(self):
self._trim_column(msa_column, col_index)
self._trimmed_seqs = []
for i, trimmed_seq_list in enumerate(self._trimmed_fasta):
trimmed_seq = ''.join(trimmed_seq_list)
input_seq = self._fasta[i]
out_record = SeqRecord(
Seq(trimmed_seq, SingleLetterAlphabet),
id=input_seq.id,
description=input_seq.description
)
self._trimmed_seqs.append(out_record)
def dump_trimmed_fasta(self):
self._trim_fasta()
SeqIO.write(self._trimmed_seqs, self._output_filepath, 'fasta')
if self._mask_char:
print(' Wrote ""{}""-masked fasta to {}\n'.format(
self._mask_char, self._output_filepath))
else:
print(' Wrote trimmed fasta to {}\n'.format(self._output_filepath))
return
def __eq__(self, other):
"""""" defines an equality test with the == operator
returns False when a position of one alignment is a gap
when it isn't a gap in the other alignment """"""
for col_i, self_column in enumerate(self):
other_column = other[col_i]
for row_i, aa_or_codon in enumerate(self_column):
if aa_or_codon in ('-', '---'):
if other_column[row_i] not in ('-', '---'):
print('\nCodon mismatch in row {} column {}'
'\n""{}"" does not match ""{}""\n'.format(
row_i, col_i, aa_or_codon, other_column[row_i]))
return False
return True
def __ne__(self, other):
"""""" defines a non-equality test with the != operator """"""
return not self.__eq__(other)
def main(pep_alignment_fasta, cds_alignment_fasta,
trim_gappy, trim_positions, mask):
if pep_alignment_fasta:
pep_al = Alignment(pep_alignment_fasta, maxgap=trim_gappy, mask=mask,
trim_positions=trim_positions)
pep_al.dump_trimmed_fasta()
if cds_alignment_fasta:
cds_al = Alignment(cds_alignment_fasta, maxgap=trim_gappy, mask=mask,
trim_positions=trim_positions, is_cds=True)
if pep_alignment_fasta and cds_alignment_fasta:
if pep_al != cds_al:
raise Exception('The two alignments have a mismatch'
'- see printouts above.')
else:
print('Peptide and CDS alignment are in agreement!\n')
cds_al.dump_trimmed_fasta()
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='A program to remove (trim) or mask columns of fasta '
'multiple sequence alignments. Columns are removed if they exceed a '
'user-specified ""gappyness"" treshold, and/or if they are specified by '
'column index. Also works for codon alignments. The trimmed alignment '
'is written to [your_alignment]_trimmed.fa')
parser.add_argument('-p', '--pep_alignment_fasta')
parser.add_argument('-c', '--cds_alignment_fasta')
parser.add_argument('--trim_gappy', type=float, default=9999,
help='the maximum allowed gap proportion for an MSA '
'column, all columns with a higher proportion of gaps '
'will be trimmed (default: off)')
parser.add_argument('--trim_positions', help='specify columns to be '
'trimmed from the alignment, e.g. ""1-10 42"" to trim the '
'first ten columns and the 42th column', nargs='+')
parser.add_argument('--mask', default=False, const='-',
action='store', nargs='?', help='mask the alignment '
'instead of trimming. if a character is specified after'
' --mask, that character will be used for masking '
'(default char: ""-"")')
args = parser.parse_args()
if args.pep_alignment_fasta or args.cds_alignment_fasta:
main(**vars(args))
else:
parser.print_help()
","Python"
"Codon","adelq/dnds","setup.py",".py","1034","30","from setuptools import setup
package = 'dnds'
version = '2.1'
setup(
name=package,
version=version,
description=""Calculate dN/dS ratio precisely (Ka/Ks) using a codon-by-codon counting method."",
long_description=open(""README.rst"").read(),
author=""Adel Qalieh"",
author_email=""adelq@med.umich.edu"",
url=""https://github.com/adelq/dnds"",
license=""MIT"",
py_modules=['dnds', 'codons'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Bio-Informatics',
])
","Python"
"Codon","adelq/dnds","test_dnds.py",".py","3156","97","from __future__ import division
from fractions import Fraction
from nose.tools import assert_equal, assert_almost_equal
from dnds import dnds, pnps, substitutions, dnds_codon, dnds_codon_pair, syn_sum, translate
# From Canvas practice problem
TEST_SEQ1 = 'ACTCCGAACGGGGCGTTAGAGTTGAAACCCGTTAGA'
TEST_SEQ2 = 'ACGCCGATCGGCGCGATAGGGTTCAAGCTCGTACGA'
# From in-class problem set
TEST_SEQ3 = 'ATGCTTTTGAAATCGATCGTTCGTTCACATCGATGGATC'
TEST_SEQ4 = 'ATGCGTTCGAAGTCGATCGATCGCTCAGATCGATCGATC'
# From http://bioinformatics.cvr.ac.uk/blog/calculating-dnds-for-ngs-datasets/
TEST_SEQ5 = 'ATGAAACCCGGGTTTTAA'
TEST_SEQ6 = 'ATGAAACGCGGCTACTAA'
def test_translate():
assert_equal(translate(TEST_SEQ1), 'TPNGALELKPVR')
assert_equal(translate(TEST_SEQ2), 'TPIGAIGFKLVR')
assert_equal(translate(TEST_SEQ3), 'MLLKSIVRSHRWI')
def test_dnds_codon_easy():
assert_equal([0, 0, 1], dnds_codon('ACC'))
def test_dnds_codon_harder1():
assert_equal([0, 0, Fraction(1, 3)], dnds_codon('AAC'))
assert_equal([0, 0, Fraction(2, 3)], dnds_codon('ATC'))
def test_dnds_codon_harder2():
assert_equal([Fraction(1, 3), 0, Fraction(1, 3)], dnds_codon('AGA'))
assert_equal([Fraction(1, 3), 0, 1], dnds_codon('CGA'))
def test_dnds_codon_ngs():
assert_equal([0, 0, 0], dnds_codon('TGG'))
assert_equal([Fraction(1, 3), 0, 1], dnds_codon('CGG'))
def test_dnds_codon_ngs_sums():
assert_equal(sum(dnds_codon('ATG')), 0)
assert_equal(sum(dnds_codon('AAA')), Fraction(1, 3))
assert_equal(sum(dnds_codon('CCC')), 1)
assert_equal(sum(dnds_codon('GGG')), 1)
assert_equal(sum(dnds_codon('TTT')), Fraction(1, 3))
assert_equal(sum(dnds_codon('TAA')), Fraction(2, 3))
def test_dnds_codon_ps():
assert_equal(dnds_codon('CTT'), [0, 0, 1])
assert_equal(dnds_codon('CGT'), [0, 0, 1])
def test_dnds_codon_pair():
assert_equal([Fraction(1, 3), 0, Fraction(2, 3)],
dnds_codon_pair('AGA', 'CGA'))
def test_dnds_codon_pair_harder1():
assert_equal([Fraction(1, 6), 0, Fraction(1, 3)],
dnds_codon_pair('TTG', 'TTC'))
def test_dnds_codon_pair_harder2():
assert_equal([Fraction(1, 6), 0, Fraction(1, 2)],
dnds_codon_pair('TTA', 'ATA'))
def test_syn_sum_ps():
assert_almost_equal(syn_sum(TEST_SEQ3, TEST_SEQ4), 10, delta=1)
def test_syn_subs():
assert_equal(substitutions(TEST_SEQ1, TEST_SEQ2), (5, 5))
assert_equal(substitutions(TEST_SEQ3, TEST_SEQ4), (2, 5))
assert_equal(substitutions(""ACCGGA"", ""ACAAGA""), (1, 1))
assert_equal(substitutions(""TGC"", ""TAT""), (1, 1))
assert_equal(substitutions(""ACC"", ""AAA""), (0.5, 1.5))
assert_equal(substitutions(""CCC"", ""AAC""), (0, 2))
def test_pnps():
assert_almost_equal(pnps(TEST_SEQ1, TEST_SEQ2), 0.269, delta=0.1)
assert_almost_equal(pnps(TEST_SEQ3, TEST_SEQ4), 0.86, delta=0.1)
assert_almost_equal(pnps(TEST_SEQ5, TEST_SEQ6), 0.1364 / 0.6001, delta=1e-4)
def test_dnds():
assert_almost_equal(dnds(TEST_SEQ5, TEST_SEQ6), 0.1247, delta=1e-4)
# DNDS.pdf - wrong based on email conversation with Sean
#
# def test_syn_sum():
# assert_close(syn_sum(TEST_SEQ1, TEST_SEQ2), 7.5833)
","Python"
"Codon","adelq/dnds","dnds.py",".py","5340","169","""""""dnds
This module is a reference implementation of estimating nucleotide substitution
neutrality by estimating the percent of synonymous and nonsynonymous mutations.
""""""
from __future__ import print_function, division
from math import log
from fractions import Fraction
import logging
from codons import codons
BASES = {'A', 'G', 'T', 'C'}
def split_seq(seq, n=3):
'''Returns sequence split into chunks of n characters, default is codons'''
return [seq[i:i + n] for i in range(0, len(seq), n)]
def average_list(l1, l2):
""""""Return the average of two lists""""""
return [(i1 + i2) / 2 for i1, i2 in zip(l1, l2)]
def dna_to_protein(codon):
'''Returns single letter amino acid code for given codon'''
return codons[codon]
def translate(seq):
""""""Translate a DNA sequence into the 1-letter amino acid sequence""""""
return """".join([dna_to_protein(codon) for codon in split_seq(seq)])
def is_synonymous(codon1, codon2):
'''Returns boolean whether given codons are synonymous'''
return dna_to_protein(codon1) == dna_to_protein(codon2)
def dnds_codon(codon):
'''Returns list of synonymous counts for a single codon.
Calculations done per the methodology taught in class.
http://sites.biology.duke.edu/rausher/DNDS.pdf
'''
syn_list = []
for i in range(len(codon)):
base = codon[i]
other_bases = BASES - {base}
syn = 0
for new_base in other_bases:
new_codon = codon[:i] + new_base + codon[i + 1:]
syn += int(is_synonymous(codon, new_codon))
syn_list.append(Fraction(syn, 3))
return syn_list
def dnds_codon_pair(codon1, codon2):
""""""Get the dN/dS for the given codon pair""""""
return average_list(dnds_codon(codon1), dnds_codon(codon2))
def syn_sum(seq1, seq2):
""""""Get the sum of synonymous sites from two DNA sequences""""""
syn = 0
codon_list1 = split_seq(seq1)
codon_list2 = split_seq(seq2)
for i in range(len(codon_list1)):
codon1 = codon_list1[i]
codon2 = codon_list2[i]
dnds_list = dnds_codon_pair(codon1, codon2)
syn += sum(dnds_list)
return syn
def hamming(s1, s2):
""""""Return the hamming distance between 2 DNA sequences""""""
return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2)) + abs(len(s1) - len(s2))
def codon_subs(codon1, codon2):
""""""Returns number of synonymous substitutions in provided codon pair
Methodology for multiple substitutions from Dr. Swanson, UWashington
https://faculty.washington.edu/wjs18/dnds.ppt
""""""
diff = hamming(codon1, codon2)
if diff < 1:
return 0
elif diff == 1:
return int(translate(codon1) == translate(codon2))
syn = 0
for i in range(len(codon1)):
base1 = codon1[i]
base2 = codon2[i]
if base1 != base2:
new_codon = codon1[:i] + base2 + codon1[i + 1:]
syn += int(is_synonymous(codon1, new_codon))
syn += int(is_synonymous(codon2, new_codon))
return syn / diff
def substitutions(seq1, seq2):
""""""Returns number of synonymous and nonsynonymous substitutions""""""
dna_changes = hamming(seq1, seq2)
codon_list1 = split_seq(seq1)
codon_list2 = split_seq(seq2)
syn = 0
for i in range(len(codon_list1)):
codon1 = codon_list1[i]
codon2 = codon_list2[i]
syn += codon_subs(codon1, codon2)
return (syn, dna_changes - syn)
def clean_sequence(seq):
""""""Clean up provided sequence by removing whitespace.""""""
return seq.replace(' ', '')
def pnps(seq1, seq2):
""""""Main function to calculate pN/pS between two DNA sequences.""""""
# Strip any whitespace from both strings
seq1 = clean_sequence(seq1)
seq2 = clean_sequence(seq2)
# Check that both sequences have the same length
assert len(seq1) == len(seq2)
# Check that sequences are codons
assert len(seq1) % 3 == 0
syn_sites = syn_sum(seq1, seq2)
non_sites = len(seq1) - syn_sites
logging.info('Sites (syn/nonsyn): {}, {}'.format(syn_sites, non_sites))
syn_subs, non_subs = substitutions(seq1, seq2)
logging.info('pN: {} / {}\t\tpS: {} / {}'
.format(non_subs, round(non_sites), syn_subs, round(syn_sites)))
pn = non_subs / non_sites
ps = syn_subs / syn_sites
return pn / ps
def dnds(seq1, seq2):
""""""Main function to calculate dN/dS between two DNA sequences per Nei &
Gojobori 1986. This includes the per site conversion adapted from Jukes &
Cantor 1967.
""""""
# Strip any whitespace from both strings
seq1 = clean_sequence(seq1)
seq2 = clean_sequence(seq2)
# Check that both sequences have the same length
assert len(seq1) == len(seq2)
# Check that sequences are codons
assert len(seq1) % 3 == 0
syn_sites = syn_sum(seq1, seq2)
non_sites = len(seq1) - syn_sites
logging.info('Sites (syn/nonsyn): {}, {}'.format(syn_sites, non_sites))
syn_subs, non_subs = substitutions(seq1, seq2)
pn = non_subs / non_sites
ps = syn_subs / syn_sites
dn = -(3 / 4) * log(1 - (4 * pn / 3))
ds = -(3 / 4) * log(1 - (4 * ps / 3))
logging.info('dN: {}\t\tdS: {}'.format(round(dn, 3), round(ds, 3)))
return dn / ds
if __name__ == '__main__':
print(pnps('ACC GTG GGA TGC ACC GGT GTG CCC',
'ACA GTG AGA TAT AAA GGA GAG AAC'))
","Python"
"Codon","adelq/dnds","codons.py",".py","1045","67","codons = {
""TTT"": ""F"",
""TTC"": ""F"",
""TTA"": ""L"",
""TTG"": ""L"",
""TCT"": ""S"",
""TCC"": ""S"",
""TCA"": ""S"",
""TCG"": ""S"",
""TAT"": ""Y"",
""TAC"": ""Y"",
""TAA"": ""STOP"",
""TAG"": ""STOP"",
""TGT"": ""C"",
""TGC"": ""C"",
""TGA"": ""STOP"",
""TGG"": ""W"",
""CTT"": ""L"",
""CTC"": ""L"",
""CTA"": ""L"",
""CTG"": ""L"",
""CCT"": ""P"",
""CCC"": ""P"",
""CCA"": ""P"",
""CCG"": ""P"",
""CAT"": ""H"",
""CAC"": ""H"",
""CAA"": ""Q"",
""CAG"": ""Q"",
""CGT"": ""R"",
""CGC"": ""R"",
""CGA"": ""R"",
""CGG"": ""R"",
""ATT"": ""I"",
""ATC"": ""I"",
""ATA"": ""I"",
""ATG"": ""M"",
""ACT"": ""T"",
""ACC"": ""T"",
""ACA"": ""T"",
""ACG"": ""T"",
""AAT"": ""N"",
""AAC"": ""N"",
""AAA"": ""K"",
""AAG"": ""K"",
""AGT"": ""S"",
""AGC"": ""S"",
""AGA"": ""R"",
""AGG"": ""R"",
""GTT"": ""V"",
""GTC"": ""V"",
""GTA"": ""V"",
""GTG"": ""V"",
""GCT"": ""A"",
""GCC"": ""A"",
""GCA"": ""A"",
""GCG"": ""A"",
""GAT"": ""D"",
""GAC"": ""D"",
""GAA"": ""E"",
""GAG"": ""E"",
""GGT"": ""G"",
""GGC"": ""G"",
""GGA"": ""G"",
""GGG"": ""G""
}
","Python"
"Codon","veg/hyphy-analyses","SCUEAL-WG/SCUEALfullG.sh",".sh","294","10","#!/bin/sh
#module load openmpi-x86_64
module load openmpi-1.10-x86_64
# Run MPI program through Ethernet eth0
mpirun -np 28 /localdisk/software/hyphy/hyphy-2.2.4/HYPHYMPI USEPATH=/dev/null BASEPATH=/localdisk/software/hyphy/lib/hyphy /localdisk/home/PATH_TO_FOLDER_/SCUEALfullG.bf
","Shell"
"Codon","veg/hyphy-analyses","molerate/JSON.md",".md","4938","72","# Molerate JSON Schema Description
The top-level JSON object contains the following keys:
* **`analysis`** (Object): Contains metadata about the analysis performed.
* `authors` (String): Name of the author(s).
* `contact` (String): Contact email address.
* `info` (String): Brief description of the analysis.
* `labeling strategy` (String): The strategy used for labeling branches (e.g., ""all-descendants"").
* `model` (String): The substitution model used (e.g., ""WAG"").
* `rate variation` (String): The model used for rate variation across sites (e.g., ""GDD"").
* `requirements` (String): Description of the input data required.
* `version` (String): Version of the analysis tool.
* **`branch attributes`** (Object): Contains attributes for phylogenetic branches, grouped by partition (e.g., ""0"") and model.
* `<partition_id>` (Object, e.g., ""0""): Represents a data partition.
* `<branch_name>` (Object, e.g., ""HLacaPus1"", ""Node1""): Represents a specific branch in the tree.
* `Proportional` (Number): Estimated branch length for the Proportional model.
* `Proportional Partitioned` (Number): Estimated branch length for the Proportional Partitioned model.
* `Reference` (Number): Branch length from the reference tree.
* `Unconstrained Test` (Number): Estimated branch length for the Unconstrained Test model.
* *( Potentially other model names as keys with numeric values )*
* `attributes` (Object): Defines metadata for the models listed under branch names.
* `<model_name>` (Object, e.g., ""Proportional""):
* `attribute type` (String): Type of attribute (e.g., ""branch length"").
* `display order` (Number): Suggested order for displaying this model's results.
* **`branch level analysis`** (Object): Contains detailed likelihood ratio test results for individual branches designated as 'test'.
* `<branch_name>` (Object, e.g., ""HLamaAes1""): Represents a specific 'test' branch.
* `LogL` (Number): Log-likelihood for the `Proportional+1` model at this branch.
* `alternative` (Object): `Proportional+1` vs `Unconstrained Test`
* `Corrected P-value` (Number): Corrected (Holm-Bonferroni) p-value.
* `LRT` (Number): Likelihood Ratio Test statistic.
* `p-value` (Number): Uncorrected p-value.
* `null` (Object): `Proportional` vs `Proportional+1`
* `Corrected P-value` (Number): orrected (Holm-Bonferroni) p-value.
* `LRT` (Number): Likelihood Ratio Test statistic.
* `p-value` (Number): Uncorrected p-value.
* **`fits`** (Object): Contains details about the statistical fit of each evolutionary model tested.
* `<model_name>` (Object, e.g., ""Proportional"", ""Unconstrained Test""):
* `AIC-c` (Number): Corrected Akaike Information Criterion value.
* `Log Likelihood` (Number): Log-likelihood value for the model fit.
* `Rate Distributions` (Object): Parameters related to site rate variation.
* *( Keys vary based on the rate variation model, e.g., GDD category rates, mixture weights, branch scalers )* (Number)
* `display order` (Number): Suggested order for displaying this model's fit results.
* `estimated parameters` (Number): Number of parameters estimated for this model.
* **`input`** (Object): Describes the input data used for the analysis.
* `file name` (String): Path to the input alignment file.
* `number of sequences` (Number): Count of sequences in the alignment.
* `number of sites` (Number): Count of sites (columns) in the alignment.
* `partition count` (Number): Number of data partitions (usually 0 or 1 if not partitioned).
* `trees` (Object): Contains the input tree structure(s).
* `<partition_id>` (String, e.g., ""0""): Tree structure in Newick format string.
* **`runtime`** (String): Version of `HyPhy` used to execute the analysis.
* **`test results`** (Object): Contains results of Likelihood Ratio Tests comparing pairs of nested models.
* `<model_comparison>` (Object, e.g., ""Proportional Partitioned:Unconstrained Test""): Represents a comparison between two models (Null:Alternative).
* `Corrected P-value` (Number): Corrected (Holm-Bonferroni) p-value for the comparison.
* `LRT` (Number): Likelihood Ratio Test statistic.
* `Uncorrected P-value` (Number): Uncorrected p-value.
* **`tested`** (Object): Maps each branch name from the input tree to its classification in the analysis.
* `<branch_name>` (String, e.g., ""HLacaPus1""): Value is either ""test"" or ""background"".
* **`timers`** (Object): Records the time duration for specific parts of the analysis.
* `<component_name>` (Object, e.g., ""Overall"", ""Proportional""):
* `order` (Number): Order index for the component.
* `timer` (Number): Time duration in seconds.","Markdown"
"Codon","Ensembl/ensembl-compara","setup.py",".py","776","22","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""setuptools-based stub for editable installs""""""
from setuptools import setup
if __name__ == ""__main__"":
setup()
","Python"
"Codon","Ensembl/ensembl-compara","pull_request_template.md",".md","521","26","## Description
_Describe the problem you're addressing here._
**Related JIRA tickets:**
- ENSCOMPARASW-XXXX
## Overview of changes
_Give details of what changes were required to solve the problem. Break into sections if applicable._
#### Change 1
- _detail 1.1_
#### Change 2
- _detail 2.1_
## Testing
_How was this tested? Have new unit tests been included?_
## Notes
_Optional extra information._
---
For code reviewers: [code review SOP](https://www.ebi.ac.uk/seqdb/confluence/display/EnsCom/Code+review+SOP)
","Markdown"
"Codon","Ensembl/ensembl-compara","run_all_tests.sh",".sh","1573","32","#!/bin/bash
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -euxo pipefail
cd ""$ENSEMBL_ROOT_DIR""
prove -r ensembl-compara/travisci/all-housekeeping/
prove -r ensembl-compara/travisci/sql-unittest/
ensembl-test/scripts/runtests.pl ensembl-compara/modules/t
ensembl-test/scripts/runtests.pl ensembl/modules/t/compara.t
ensembl-test/scripts/runtests.pl ensembl-rest/t/genomic_alignment.t ensembl-rest/t/info.t ensembl-rest/t/taxonomy.t ensembl-rest/t/homology.t ensembl-rest/t/gene_tree.t ensembl-rest/t/cafe_tree.t ensembl-rest/t/family.t
cd ""$ENSEMBL_ROOT_DIR/ensembl-compara""
./travisci/perl-linter_harness.sh
find docs modules scripts sql travisci -iname '*.t' -print0 | xargs -0 -n 1 perl -c
find docs modules scripts sql travisci -iname '*.pl' -print0 | xargs -0 -n 1 perl -c
find docs modules scripts sql travisci -iname '*.pm' \! -name 'LoadSynonyms.pm' \! -name 'HALAdaptor.pm' \! -name 'HALXS.pm' -print0 | xargs -0 -n 1 perl -c
echo ""All good !""
","Shell"
"Codon","Ensembl/ensembl-compara","DEPRECATED.md",".md","2457","87","> This file contains the list of methods deprecated in the Ensembl Compara
> API. A method is deprecated when it is not functional any more
> (schema/data change) or has been replaced by a better one. Backwards
> compatibility is provided whenever possible. When a method is
> deprecated, a deprecation warning is thrown whenever the method is used.
> The warning also contains instructions on replacing the deprecated method
> and when it will be removed.
----
# Deprecated methods scheduled for deletion
## Ensembl 116
* `Bio::EnsEMBL::Compara::Utils::MasterDatabase::find_overlapping_genome_db_ids`
# Deprecated methods not yet scheduled for deletion
* `AlignedMember::get_cigar_array()`
* `AlignedMember::get_cigar_breakout()`
* `GenomicAlignTree::genomic_align_array()`
* `GenomicAlignTree::get_all_GenomicAligns()`
* `Homology::dn()`
* `Homology::dnds_ratio()`
* `Homology::ds()`
* `Homology::lnl()`
* `Homology::n()`
* `Homology::s()`
* `Homology::threshold_on_ds()`
* `HomologyAdaptor::update_genetic_distance()`
# Methods removed in previous versions of Ensembl
## Ensembl 110
* `DBSQL::'*MemberAdaptor::fetch_by_stable_id()`
## Ensembl 100
* `DBSQL::'*MemberAdaptor::get_source_taxon_count()`
## Ensembl 98
* `DBSQL::DnaFragAdaptor::fetch_all_by_GenomeDB_region()`
## Ensembl 96
* `AlignSlice::Slice::get_all_VariationFeatures_by_VariationSet`
* `AlignSlice::Slice::get_all_genotyped_VariationFeatures`
* `Taggable::get_value_for_XXX()`
* `Taggable::get_all_values_for_XXX()`
* `Taggable::get_XXX_value()`
## Ensembl 93
* `DnaFrag::isMT()`
* `DnaFrag::dna_type()`
* `MethodLinkSpeciesSet::species_set_obj()`
## Ensembl 92
* `Member::print_member()`
* `SyntenyRegion::regions()`
## Ensembl 91
* `Homology::print_homology()`
* `MethodLinkSpeciesSet::get_common_classification()`
* `NCBITaxon::binomial()`
* `NCBITaxon::ensembl_alias_name()`
* `NCBITaxon::common_name()`
* `DnaFragAdaptor::is_already_stored()`
* `GeneMemberAdaptor::fetch_all_by_source_Iterator()`
* `GeneMemberAdaptor::fetch_all_Iterator()`
* `GeneMemberAdaptor::load_all_from_seq_members()`
* `GenomeDBAdaptor::fetch_all_by_low_coverage()`
* `GenomeDBAdaptor::fetch_all_by_taxon_id_assembly()`
* `GenomeDBAdaptor::fetch_by_taxon_id()`
* `SeqMemberAdaptor::fetch_all_by_source_Iterator()`
* `SeqMemberAdaptor::fetch_all_Iterator()`
* `SeqMemberAdaptor::update_sequence()`
* `SequenceAdaptor::fetch_by_dbIDs()`
## Ensembl 89
* `SpeciesTree::species_tree()`
","Markdown"
"Codon","Ensembl/ensembl-compara","conftest.py",".py","3680","88","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Local directory-specific hook implementations.
Since this file is located at the root of all ensembl-compara tests, every test in every subfolder will have
access to the plugins, hooks and fixtures defined here.
""""""
# Disable all the redefined-outer-name violations due to how pytest fixtures work
# pylint: disable=redefined-outer-name
import os
from pathlib import Path
import shutil
import time
import pytest
from _pytest.fixtures import FixtureRequest
from ensembl.compara.filesys import DirCmp
pytest_plugins = (""ensembl.utils.plugin"",)
def pytest_configure() -> None:
""""""Adds global variables and configuration attributes required by Compara's unit tests.
`Pytest initialisation hook
<https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_configure>`_.
""""""
test_data_dir = Path(__file__).parent / 'src' / 'test_data'
pytest.dbs_dir = test_data_dir / 'databases' # type: ignore[attr-defined]
pytest.files_dir = test_data_dir / 'flatfiles' # type: ignore[attr-defined]
@pytest.fixture(scope=""session"")
def dir_cmp(request: FixtureRequest, tmp_path_factory: pytest.TempPathFactory) -> DirCmp:
""""""Returns a directory tree comparison (:class:`DirCmp`) object.
Requires a dictionary with the following keys:
ref (:obj:`PathLike`): Reference root directory path.
target (:obj:`PathLike`): Target root directory path.
passed via `request.param`. In both cases, if a relative path is provided, the starting folder will be
``src/python/tests/flatfiles``. This fixture is intended to be used via indirect parametrization, for
example::
@pytest.mark.parametrize(""dir_cmp"", [{'ref': 'citest/reference', 'target': 'citest/target'}],
indirect=True)
def test_method(..., dir_cmp: DirCmp, ...):
Args:
request: Access to the requesting test context.
tmp_path_factory: Temporary directory path factory fixture.
""""""
tmp_path = tmp_path_factory.mktemp(""dir_cmp_root"")
# Get the source and temporary absolute paths for reference and target root directories
ref = Path(request.param['ref']) # type: ignore[attr-defined]
ref_src = ref if ref.is_absolute() else pytest.files_dir / ref # type: ignore
ref_tmp = tmp_path / str(ref).replace(os.path.sep, '_')
target = Path(request.param['target']) # type: ignore[attr-defined]
target_src = target if target.is_absolute() else pytest.files_dir / target # type: ignore
target_tmp = tmp_path / str(target).replace(os.path.sep, '_')
# Copy directory trees (if they have not been copied already) ignoring file metadata
if not ref_tmp.exists():
shutil.copytree(ref_src, ref_tmp, copy_function=shutil.copy)
# Sleep one second in between to ensure the timestamp differs between reference and target files
time.sleep(1)
if not target_tmp.exists():
shutil.copytree(target_src, target_tmp, copy_function=shutil.copy)
return DirCmp(ref_tmp, target_tmp)
","Python"
"Codon","Ensembl/ensembl-compara","modules/Bio/EnsEMBL/Compara/HAL/HALXS/INLINE.h",".h","1732","44","/*
* See the NOTICE file distributed with this work for additional information
* regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the ""License"");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an ""AS IS"" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define Inline_Stack_Vars dXSARGS
#define Inline_Stack_Items items
#define Inline_Stack_Item(x) ST(x)
#define Inline_Stack_Reset sp = mark
#define Inline_Stack_Push(x) XPUSHs(x)
#define Inline_Stack_Done PUTBACK
#define Inline_Stack_Return(x) XSRETURN(x)
#define Inline_Stack_Void XSRETURN(0)
#define INLINE_STACK_VARS Inline_Stack_Vars
#define INLINE_STACK_ITEMS Inline_Stack_Items
#define INLINE_STACK_ITEM(x) Inline_Stack_Item(x)
#define INLINE_STACK_RESET Inline_Stack_Reset
#define INLINE_STACK_PUSH(x) Inline_Stack_Push(x)
#define INLINE_STACK_DONE Inline_Stack_Done
#define INLINE_STACK_RETURN(x) Inline_Stack_Return(x)
#define INLINE_STACK_VOID Inline_Stack_Void
#define inline_stack_vars Inline_Stack_Vars
#define inline_stack_items Inline_Stack_Items
#define inline_stack_item(x) Inline_Stack_Item(x)
#define inline_stack_reset Inline_Stack_Reset
#define inline_stack_push(x) Inline_Stack_Push(x)
#define inline_stack_done Inline_Stack_Done
#define inline_stack_return(x) Inline_Stack_Return(x)
#define inline_stack_void Inline_Stack_Void
","Unknown"
"Codon","Ensembl/ensembl-compara","pipelines/SpeciesTreeFromBusco/scripts/fix_leaf_names.py",".py","1811","58","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
Replace leaf names in a species tree with production name.
Example:
$ python fix_leaf_names.py -t astral_species_tree_neutral_bl.nwk -c input_genomes.csv -o test.nwk
""""""
import sys
import argparse
from os import path
from Bio import Phylo
import pandas as pd
# Parse command line arguments:
parser = argparse.ArgumentParser(
description='Replace leafs of a species tree with production names.')
parser.add_argument(
'-t', metavar='tree', type=str, help=""Input tree."", required=True, default=None)
parser.add_argument(
'-c', metavar='csv', type=str, help=""Input CSV."", required=True, default=None)
parser.add_argument(
'-o', metavar='output', type=str, help=""Output tree."", required=True)
if __name__ == '__main__':
args = parser.parse_args()
df = pd.read_csv(args.c, delimiter=""\t"", header=None)
trans_map = {}
for r in df.itertuples():
trans_map[r[10]] = r[2]
tree = Phylo.read(args.t, format=""newick"")
for leaf in tree.get_terminals():
leaf.name = trans_map[leaf.name]
Phylo.write(
trees=tree,
file=args.o,
format=""newick""
)
","Python"
"Codon","Ensembl/ensembl-compara","pipelines/SpeciesTreeFromBusco/scripts/pick_third_site.py",".py","1845","59","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
Pick out every third site from a codon alignment. The input sequences
must have a length divisible by 3.
Example:
$ python pick_third_site.py -i input.fas -o output.fas
""""""
import sys
import argparse
from collections import OrderedDict
import pandas as pd
from Bio import SeqIO
# Parse command line arguments:
parser = argparse.ArgumentParser(
description='Pick out the third sites from a codon alignment.')
parser.add_argument(
'-i', metavar='input', type=str, help=""Input fasta."", required=True)
parser.add_argument(
'-o', metavar='output', type=str, help=""Output fasta."", required=True)
if __name__ == '__main__':
args = parser.parse_args()
fh = open(args.o, ""w"")
for record in SeqIO.parse(args.i, ""fasta""):
# Check if sequence length is divisible by 3:
if len(record.seq) % 3 != 0:
sys.stderr.write(f""The length of sequence {record.id} is not divisible by 3!\n"")
sys.exit(1)
# Pick out every third site:
print(len(record.seq))
record.seq = record.seq[2::3]
# Write out record:
SeqIO.write(record, fh, ""fasta"")
fh.flush()
fh.close()
","Python"
"Codon","Ensembl/ensembl-compara","pipelines/SpeciesTreeFromBusco/scripts/fetch_genomes_from_db.py",".py","4595","120","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
Locate genome dumps of a collection or a species set on disk.
Example:
$ python fetch_genomes_from_db.py -u mysql://ensro@mysql-ens-compara-prod-1:4485/ensembl_compara_master -c pig_breeds -o test.tsv -d /hps/nobackup/flicek/ensembl/compara/shared/genome_dumps/vertebrates
""""""
import sys
import argparse
import csv
from os import path
from sqlalchemy.engine.row import Row
from sqlalchemy import create_engine, text
# Parse command line arguments:
parser = argparse.ArgumentParser(
description='Locate genomes of a species set/collection in the dump directory.')
parser.add_argument('-u', metavar='db_URL', type=str, help=""Compara master db URL."", required=True)
parser.add_argument(
'-d', metavar='dump_dir', type=str, help=""Genome dump directory."", required=True)
parser.add_argument(
'-s', metavar='ssid', type=str, help=""Species set id."", required=False, default=None)
parser.add_argument(
'-c', metavar='cid', type=str, help=""Collection id."", required=False, default=None)
parser.add_argument(
'-o', metavar='output', type=str, help=""Output CSV."", required=True)
def _dir_revhash(gid: int) -> str:
""""""Build directory hash from genome db id.""""""
dir_hash = list(reversed(str(gid)))
dir_hash.pop()
return path.join(*dir_hash) if dir_hash else path.curdir
def _build_dump_path(row: Row) -> str:
gcomp = """"
if row.genome_component is not None:
gcomp = f""comp{row.genome_component}.""
gpath = path.join(args.d, _dir_revhash(row.genome_db_id), f""{row.name}.{row.assembly}.{gcomp}soft.fa"")
return gpath
if __name__ == '__main__':
args = parser.parse_args()
db_url = args.u
ss_id = args.s
if ss_id == """":
ss_id = None
coll_id = args.c
if coll_id == """":
coll_id = None
engine = create_engine(db_url, future=True)
if ss_id is None and coll_id is None:
sys.stderr.write(""Either a collection name or a species set id must be specified!\n"")
sys.exit(1)
if ss_id is not None and coll_id is not None:
sys.stderr.write(""Specify either a collection name or a species set id!\n"")
sys.exit(1)
ss_query = f""""""
SELECT genome_db_id, genome_db.name, assembly, genebuild,
strain_name, display_name, genome_component
FROM species_set JOIN genome_db USING(genome_db_id)
WHERE species_set_id = {ss_id};
""""""
c_query = f""""""
SELECT genome_db_id, genome_db.name, assembly, genebuild,
strain_name, display_name, genome_component
FROM species_set_header JOIN species_set USING(species_set_id)
JOIN genome_db USING(genome_db_id) WHERE species_set_header.name='{coll_id}'
AND species_set_header.last_release is NULL
AND species_set_header.first_release IS NOT NULL;
""""""
query = ss_query
if coll_id is not None:
query = c_query
missing_fas = []
with engine.connect() as conn, open(args.o, ""w"") as ofh:
result = conn.execute(text(query))
writer = csv.writer(ofh, delimiter=""\t"", lineterminator=""\n"")
for row in result:
gpath = _build_dump_path(row)
fa_name = str(row.name) + ""_"" + str(row.assembly)
prod_name = row.name
if row.genome_component is not None:
fa_name += f""_{row.genome_component}""
prod_name += f""_{row.genome_component}""
dump_exists = path.exists(gpath)
if not dump_exists:
missing_fas.append(gpath)
writer.writerow([row.genome_db_id, prod_name, row.assembly, row.genebuild, row.strain_name,
row.display_name, row.genome_component, gpath, dump_exists, fa_name])
if len(missing_fas) > 0:
sys.stderr.write(""Fatal error! The following genome dumps are missing:\n"")
for p in missing_fas:
sys.stderr.write(f""{p}\n"")
sys.exit(1)
","Python"
"Codon","Ensembl/ensembl-compara","pipelines/SpeciesTreeFromBusco/scripts/filter_for_longest_busco.py",".py","3005","95","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
Script for filtering for the longest protein isoform per gene.
Example:
$ python filter_for_longest_busco.py -i input.fas -o output.fas -l output.genes.tsv
""""""
import sys
import argparse
import re
from Bio import SeqIO
# Parse command line arguments:
parser = argparse.ArgumentParser(
description='Filter for longest BUSCO isoform.')
parser.add_argument(
'-i', metavar='input', type=str, help=""Input fasta."", required=True)
parser.add_argument(
'-o', metavar='output', type=str, help=""Output fasta."",
default=""filtered_busco.fas"", required=True)
parser.add_argument(
'-l', metavar='output', type=str, help=""List of BUSCO genes."",
required=True)
parser.add_argument(
'-r', metavar='rep_filter', type=int,
help=""Filter out seqeunces with repeats longer than this."",
required=False, default=None)
parser.add_argument(
'-f', metavar='rep_out', type=str, help=""Fasta of filtered out sequences."",
default=""repetitive_busco.fas"", required=False)
if __name__ == '__main__':
args = parser.parse_args()
db = {}
with open(args.i) as handle:
for record in SeqIO.parse(handle, ""fasta""):
gene = record.id.split(""_"")[0]
if gene not in db:
db[gene] = record
elif len(record.seq) > len(db[gene].seq):
db[gene] = record
if len(db) == 0:
sys.stderr.write(""Empty BUSCO gene set!\n"")
sys.exit(1)
i = 0
repetitive = {}
for k, v in list(db.items()):
seq = str(v.seq)
if args.r is not None:
rg = f""([A-Za-z]+?)\\1{{{args.r},}}""
res = re.search(rg, seq)
if res:
repetitive[k] = v
del db[k]
else:
v.id = f""g{i}""
v.description = """"
i += 1
else:
v.id = f""g{i}""
v.description = """"
i += 1
with open(args.o, ""w"") as output_handle:
SeqIO.write(db.values(), output_handle, ""fasta"")
with open(args.f, ""w"") as output_handle:
SeqIO.write(repetitive.values(), output_handle, ""fasta"")
with open(args.l, ""w"") as oh:
oh.write(""Gene\tGeneId\n"")
for gene, rec in db.items():
gid = rec.id
oh.write(f""{gene}\t{gid}\n"")
","Python"
"Codon","Ensembl/ensembl-compara","pipelines/SpeciesTreeFromBusco/scripts/collate_busco_results.py",".py","5029","141","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
Script for collating per-genome BUSCO cDNA results.
Example:
$ python collate_busco_results.py --input input.fofn --genes busco_genes.txt \
--output per_busco_genes --stats output_stats.tsv --taxa taxa.tsv
""""""
import sys
import argparse
from collections import defaultdict, OrderedDict
from os import path
from typing import Dict, List, Tuple
import pandas as pd
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
# Parse command line arguments:
parser = argparse.ArgumentParser(
description='Collate BUSCO results and build per-gene fasta files.')
parser.add_argument(
'-i', metavar='input', type=str, help=""Input fofn."", required=True)
parser.add_argument(
'-l', metavar='genes', type=str, help=""List of busco genes."", required=True)
parser.add_argument(
'-o', metavar='output', type=str, help=""Output directory."", required=True)
parser.add_argument(
'-s', metavar='stats', type=str, help=""Output stats TSV."", required=True)
parser.add_argument(
'-t', metavar='taxa', type=str, help=""Output taxa TSV."", required=True)
parser.add_argument(
'-m', metavar='min_taxa', type=float, help=""Minimum fraction of taxa."", required=True)
def load_sequences(infile: str) -> Tuple[Dict[str, List[SeqRecord]], int]:
""""""
Load sequences from fasta files and filter out duplicates.
""""""
lgenes = defaultdict(list)
with open(infile) as handle:
for record in SeqIO.parse(handle, ""fasta""):
gene = str(record.id).split(""_"", maxsplit=1)[0]
lgenes[gene].append(record)
filtered = {}
lduplicates = 0
for gene, records in lgenes.items():
if len(records) > 1:
lduplicates += 1
else:
filtered[gene] = records[0]
return filtered, lduplicates
if __name__ == '__main__':
args = parser.parse_args()
all_genes = pd.read_csv(args.l, sep=""\t"")
with open(args.i) as x:
seq_files = [y.strip() for y in x.readlines()]
if len(seq_files) == 0:
sys.stderr.write(""No input cDNA files specified in the fofn file!\n"")
sys.exit(1)
taxa = []
dups = []
usable = []
per_gene = defaultdict(list)
for seq_file in seq_files:
genes, duplicates = load_sequences(seq_file)
taxon = path.basename(seq_file)
for g, s in genes.items():
s.id = taxon # type: ignore
s.description = """" # type: ignore
per_gene[g].append(s)
taxa.append(taxon)
dups.append(duplicates)
usable.append(len(genes))
stat_data = OrderedDict(
[('Taxa', taxa), ('Genes', usable), ('Duplicates', dups)])
stat_df = pd.DataFrame(stat_data)
stat_df[""SetGenes""] = len(all_genes.Gene)
stat_df[""UsablePercent""] = (stat_df.Genes * 100) / stat_df.SetGenes
stat_df.to_csv(args.s, sep=""\t"", index=False)
taxa_df = pd.DataFrame({""Taxa"": taxa})
taxa_df.to_csv(args.t, sep=""\t"", index=False)
if sum(stat_df.Genes == 0) > 0:
sys.stderr.write(""Some genomes have no usable genes annotated! Please check the stats file!\n"")
# sys.exit(1)
# Dump per-gene cDNAs:
for g, s in per_gene.items():
# Filter out sequences with lengths not divisible by 3:
s = [x for x in s if len(x.seq) % 3 == 0]
# Filter out gene if we don't have enough taxa:
if len(s) / len(taxa) < args.m:
continue
with open(path.join(args.o, f""gene_cdna_{g}.fas""), ""w"") as output_handle:
SeqIO.write(s, output_handle, ""fasta"")
ts = []
for x in s: # type: ignore
if (len(x.seq) % 3) != 0:
continue
y = x.translate(stop_symbol='*', to_stop=False, cds=False) # type: ignore
# Filter out if cDNA had too many Ns:
if str(y.seq).count(""X"") > 10:
continue
# Filter out sequences with multiple stop
# codons:
if y.seq.count(""*"") > 1:
continue
y.id = x.id # type: ignore
y.description = """"
ts.append(y)
if len(ts) < 3:
continue
if len(ts) / len(taxa) < args.m:
continue
with open(path.join(args.o, f""gene_prot_{g}.fas""), ""w"") as output_handle:
SeqIO.write(ts, output_handle, ""fasta"")
","Python"
"Codon","Ensembl/ensembl-compara","pipelines/SpeciesTreeFromBusco/scripts/alignments_to_partitions.py",".py","3382","106","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
Merge alignments into partitions.
Example:
$ python alignments_to_partitions.py -i input.fofn -o output.fas -p output_partitions.csv -t input.taxa
""""""
import sys
import os
import argparse
from collections import OrderedDict
import pandas as pd
from Bio import SeqIO
# Parse command line arguments:
parser = argparse.ArgumentParser(
description='Merge alignments with common (possibly partial) taxa.')
parser.add_argument(
'-i', metavar='input', type=str, help=""Input fofn."", required=True)
parser.add_argument(
'-o', metavar='output', type=str, help=""Output fasta."", required=True)
parser.add_argument(
'-p', metavar='output', type=str, help=""Partition file."", required=True)
parser.add_argument(
'-t', metavar='input_list', type=str, help=""Input taxa file."", required=True)
if __name__ == '__main__':
args = parser.parse_args()
# Read in taxa:
taxa_df = pd.read_csv(args.t, sep=""\t"")
taxa = sorted(taxa_df.Taxa)
# Initialize results dict:
merged = OrderedDict()
for t in taxa:
merged[t] = """"
# Open partitions file:
# pylint: disable=consider-using-with
part_fh = open(args.p, ""w"")
# Slurp list of input alignemnts:
with open(args.i) as x:
aln_files = [y.strip() for y in x.readlines()]
if len(aln_files) == 0:
sys.stderr.write(""No alignment files specified in the input fofn file!\n"")
sys.exit(1)
# Total alignment length so far:
total_len = 0
# For each alignment:
for nr_part, aln_file in enumerate(aln_files):
# Check for empty alignments:
if os.stat(aln_file).st_size == 0:
continue
# Read in aligned sequences:
records = {x.id: x for x in SeqIO.parse(aln_file, ""fasta"")}
# Get current lenght:
curr_len = len(list(records.values())[0].seq)
# Define partition start and end:
start = total_len + 1
end = total_len + curr_len
# Define partition number:
shift_part = nr_part + 1
# Write out partition:
part_fh.write(f""LG+F+G, part{shift_part} = {start}-{end}\n"")
# Advance length counter:
total_len += curr_len
# For each taxa concatenate the sequence if present, gaps if missing:
for t in taxa:
if t in records:
merged[t] = merged[t] + str(records[t].seq)
else:
merged[t] = merged[t] + ""-"" * curr_len
part_fh.flush()
part_fh.close()
# Write out merged alignment file:
with open(args.o, ""w"") as fas_fh:
for taxa, seq in merged.items():
if seq.count(""-"") < len(seq):
fas_fh.write(f"">{taxa}\n{seq}\n"")
","Python"
"Codon","Ensembl/ensembl-compara","travisci/perl-external_unittest_harness.sh",".sh","1842","48","#!/bin/bash
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo ""We are running Perl '$TRAVIS_PERL_VERSION', Coverage reporting is set to '$COVERAGE'""
# Setup the environment variables
ENSEMBL_PERL5OPT='-MDevel::Cover=+ignore,bioperl,+ignore,ensembl,+ignore,ensembl-test,+ignore,ensembl-variation,+ignore,ensembl-funcgen'
ENSEMBL_TESTER=""$PWD/ensembl-test/scripts/runtests.pl""
ENSEMBL_TESTER_OPTIONS=()
CORE_SCRIPTS=(""$PWD/ensembl/modules/t/compara.t"")
REST_SCRIPTS=(""$PWD/ensembl-rest/t/genomic_alignment.t"" ""$PWD/ensembl-rest/t/info.t"" ""$PWD/ensembl-rest/t/taxonomy.t"" ""$PWD/ensembl-rest/t/homology.t"" ""$PWD/ensembl-rest/t/gene_tree.t"" ""$PWD/ensembl-rest/t/cafe_tree.t"")
if [ ""$COVERAGE"" = 'true' ]; then
EFFECTIVE_PERL5OPT=""$ENSEMBL_PERL5OPT""
ENSEMBL_TESTER_OPTIONS+=('-verbose')
else
EFFECTIVE_PERL5OPT=""""
fi
echo ""Running ensembl test suite using $PERL5LIB""
PERL5OPT=""$EFFECTIVE_PERL5OPT"" perl ""$ENSEMBL_TESTER"" ""${ENSEMBL_TESTER_OPTIONS[@]}"" ""${CORE_SCRIPTS[@]}""
rt1=$?
echo ""Running ensembl-rest test suite using $PERL5LIB""
PERL5OPT=""$EFFECTIVE_PERL5OPT"" perl ""$ENSEMBL_TESTER"" ""${ENSEMBL_TESTER_OPTIONS[@]}"" ""${REST_SCRIPTS[@]}""
rt2=$?
if [[ ($rt1 -eq 0) && ($rt2 -eq 0) ]]; then
exit 0
else
exit 255
fi
","Shell"
"Codon","Ensembl/ensembl-compara","travisci/python-linter_harness.sh",".sh","2505","76","#!/bin/bash
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Setup the environment variables
# shellcheck disable=SC2155
export PYTHONPATH=$PYTHONPATH:$(python -c 'import sysconfig; print(sysconfig.get_paths()[""purelib""])')
# more info: https://mypy.readthedocs.io/en/stable/running_mypy.html#mapping-file-paths-to-modules
export MYPYPATH=$MYPYPATH:src/python/lib
# Function to run pylint
run_pylint() {
local pylint_output_file=$(mktemp)
# Run pylint, excluding specific files and directories
find ""${PYTHON_SOURCE_LOCATIONS[@]}"" -type f -name ""*.py"" \
\! -name ""Ortheus.py"" \
\! -name ""*citest*.py"" \
\! -path ""*/citest/*"" -print0 |
xargs -0 pylint --rcfile=pyproject.toml --verbose \
--msg-template='COMPARA_PYLINT_MSG:{path}:{line}:{column}: {msg_id}: {msg} ({symbol})' |
tee ""$pylint_output_file""
# Return 1 if pylint messages were found, otherwise 0
# -c option counts the number of matches, -m 1 stops after the first match to optimize performance,
local result=$(grep -c -m 1 -E '^COMPARA_PYLINT_MSG:' ""$pylint_output_file"")
# Cleanup
rm ""$pylint_output_file""
return ""$result""
}
# Function to run mypy, excluding certain files and paths, and capturing the outcome
run_mypy() {
find ""${PYTHON_SOURCE_LOCATIONS[@]}"" -type f -name ""*.py"" \
\! -name ""Ortheus.py"" \
\! -name ""*citest*.py"" \
\! -path ""*/citest/*"" -print0 |
xargs -0 mypy --config-file pyproject.toml --namespace-packages --explicit-package-bases
}
# Define Python source locations
PYTHON_SOURCE_LOCATIONS=('scripts' 'src/python')
# Run pylint and mypy, capturing their return codes
run_pylint
rt1=$?
run_mypy
rt2=$?
# Determine exit code based on results
if [[ $rt1 -eq 0 && $rt2 -eq 0 ]]; then
exit 0 # success
elif [[ $rt1 -ne 0 ]]; then
exit 1 # pylint error
elif [[ $rt2 -ne 0 ]]; then
exit 2 # mypy error
else
exit 3 # error on both
fi
","Shell"
"Codon","Ensembl/ensembl-compara","travisci/trigger-dependent-build.sh",".sh","5531","156","#!/bin/bash
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################
# Global Variables #
####################
# The Travis API endpoint. .com and .org are the commercial and free versions,
# respectively; enterprise users will have their own hostname.
endpoint=https://api.travis-ci.org
#############
# Functions #
#############
# Get this repo ID
repo_id () {
curl -s -X GET -H ""Authorization: token $AUTH_TOKEN"" -H ""Travis-API-Version: 3"" https://api.travis-ci.org/repo/$1 | python3 -c ""import sys, json; print(json.load(sys.stdin)['id'])""
}
# Make an API request using the auth token set above. First argument is the path
# of the API method, all later arguments are passed to curl directly.
travis_api () {
curl -s $endpoint$1 \
-H ""Authorization: token $AUTH_TOKEN"" \
-H 'Content-Type: application/json' \
-H 'Travis-API-Version: 3' \
""${@:2}""
}
# Create a new environment variable for the repo and return its ID.
# First argument is the repo id, second is the environment variable
# name, and third is the value.
function env_var {
travis_api /settings/env_vars?repository_id=$1 \
-d ""{\""env_var\"":{\""name\"":\""$2\"",\""value\"":\""$3\"",\""public\"":true}}"" |
sed 's/{""env_var"":{""id"":""\([^""]*\)"",.*/\1/'
}
# print a spinner and terminate it
sp=""/-\|""
sc=0
spin() {
printf ""\b${sp:sc++:1}""
((sc==${#sp})) && sc=0
}
endspin() {
printf ""\r%s\n"" ""$@""
}
# Only run for master builds. Pull request builds have the branch set to master,
# so ignore those too.
if [ ""${TRAVIS_BRANCH}"" != ""master"" ] || [ ""${TRAVIS_PULL_REQUEST}"" != ""false"" ]; then
exit 0
fi
# The list of downstream dependent repos
dep_repos=(""Ensembl%2Fensembl-rest"")
for dep_repo in ""${dep_repos[@]}""; do
# Get the ID of the dependent repo
dep_repo_id=`repo_id $dep_repo`
echo ""Dependent repo: $dep_repo (ID: $dep_repo_id)""
echo ""Checking API triggered builds in the last hour""
if travis_api /repo/$dep_repo/builds?build.event_type=api | python3 travisci/api_build_run_last_hour.py | grep -q ""True""; then
echo ""Detected recent API-triggered build (run in the last hour) ... skip.""
continue
fi
echo ""----------------------------------""
echo ""Triggering build on dependent repo""
echo ""----------------------------------""
body=""{
\""request\"": {
\""message\"": \""Build triggered by upstream $TRAVIS_REPO_SLUG repo (commit: $TRAVIS_COMMIT, branch: $TRAVIS_BRANCH).\"",
\""branch\"": \""master\""
}}""
# Make the request to trigger the build and get the ID of the request
dep_repo_master_build_request_id=`travis_api /repo/$dep_repo/requests -H 'Accept: application/json' -X POST -d ""$body"" | python3 -c ""import sys, json; print(json.load(sys.stdin)['request']['id'])""`
echo ""Build request ID: $dep_repo_master_build_request_id""
# Wait until request is approved or max amount of time has passed
i=0
echo ""Waiting for build request $dep_repo_master_build_request_id to be approved ""
build_request_approved=""""
until travis_api /repo/$dep_repo/request/$dep_repo_master_build_request_id | grep -q '""result"": ""approved""'; do
spin
sleep 5
true $(( i++ ))
if [ $i -eq 100 ]
then
echo "" reached max waiting time ... ABORT""
exit 1
fi
done
endspin
echo ""Build request approved.""
# Get the ID of the master build.
dep_repo_master_build_id=`travis_api /repo/$dep_repo/request/$dep_repo_master_build_request_id | python3 -c ""import sys, json; print(json.load(sys.stdin)['builds'][0]['id'])""`
echo ""Build on $dep_repo master branch created (ID: $dep_repo_master_build_id)""
# # Set the three environment variables needed, and capture their IDs so that they
# # can be removed later.
# env_var_ids=(`env_var $dep_repo_id DEPENDENT_BUILD true`
# `env_var $dep_repo_id TRIGGER_COMMIT $TRAVIS_COMMIT`
# `env_var $dep_repo_id TRIGGER_REPO $TRAVIS_REPO_SLUG`)
# Wait for the build to start using the new environment variables.
i=0
printf ""Waiting for build $dep_repo_master_build_id to start ""
build_started=""""
until travis_api /build/$dep_repo_master_build_id | grep -q '""state"": ""started""'; do
spin
sleep 5
true $(( i++ ))
if [ $i -eq 100 ]
then
echo "" reached max waiting time ... stop waiting""
build_started=""not yet""
break
fi
done
endspin
echo ""Build $dep_repo_master_build_id $build_started started""
# Remove all of the environment variables set above. This does mean that if this
# script is terminated for whatever reason, these will need to be cleaned up
# manually. We can do this either through the API, or by going to Settings ->
# Environment Variables in the Travis web interface.
#
# for env_var_id in ""${env_var_ids[@]}""; do
# travis_api /settings/env_vars/$env_var_id?repository_id=$dep_repo_id -X DELETE
# done
done
","Shell"
"Codon","Ensembl/ensembl-compara","travisci/compile.py",".py","982","27","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Scripts that tests that every module listed as command-line arguments can be compiled""""""
import sys
for f in sys.argv[1:]:
try:
compile(open(f).read(), f, 'exec', 0, 1)
except SyntaxError as err:
print('{}:{}:{}: {}'.format(err.filename, err.lineno, err.offset, err.msg))
","Python"
"Codon","Ensembl/ensembl-compara","travisci/api_build_run_last_hour.py",".py","1204","32","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
from datetime import datetime, date
import json
def api_build_run_last_hour(x):
return x['state'] != 'canceled' and \
x['event_type'] == 'api' and \
(x['finished_at'] == None or \
float((datetime.fromtimestamp(time.time()) - datetime.strptime(x['finished_at'],'%Y-%m-%dT%H:%M:%SZ')).total_seconds())/3600 < 1.0)
builds = list(filter(api_build_run_last_hour, json.load(sys.stdin)['builds']))
print(len(builds) > 0)
","Python"
"Codon","Ensembl/ensembl-compara","travisci/perl-linter_harness.sh",".sh","1151","32","#!/bin/bash
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
prove -r ./travisci/perl-linter/
rt1=$?
# Check that all the PODs are valid (we don't mind missing PODs at the moment)
# Note the initial ""!"" to negate grep's return code
! find docs modules scripts sql travisci \( -iname '*.t' -o -iname '*.pl' -o -iname '*.pm' \) -print0 | xargs -0 podchecker 2>&1 | grep -v /HALXS/blib/ | grep -v ' pod syntax OK' | grep -v 'does not contain any pod commands'
rt2=$?
if [[ ($rt1 -eq 0) && ($rt2 -eq 0) ]]; then
exit 0
else
exit 255
fi
","Shell"
"Codon","Ensembl/ensembl-compara","travisci/perl-unittest_harness.sh",".sh","1976","51","#!/bin/bash
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo ""We are running Perl '$TRAVIS_PERL_VERSION', Coverage reporting is set to '$COVERAGE'""
# Setup the environment variables
ENSEMBL_PERL5OPT='-MDevel::Cover=+ignore,bioperl,+ignore,ensembl,+ignore,ensembl-test,+ignore,ensembl-variation,+ignore,ensembl-funcgen'
ENSEMBL_TESTER=""$PWD/ensembl-test/scripts/runtests.pl""
ENSEMBL_TESTER_OPTIONS=()
COMPARA_SCRIPTS=(""$PWD/modules/t"")
if [ ""$COVERAGE"" = 'true' ]; then
EFFECTIVE_PERL5OPT=""$ENSEMBL_PERL5OPT""
ENSEMBL_TESTER_OPTIONS+=('-verbose')
else
EFFECTIVE_PERL5OPT=""""
fi
echo ""Running ensembl-compara test suite using $PERL5LIB""
PERL5OPT=""$EFFECTIVE_PERL5OPT"" perl ""$ENSEMBL_TESTER"" ""${ENSEMBL_TESTER_OPTIONS[@]}"" ""${COMPARA_SCRIPTS[@]}""
rt1=$?
# Check that all the Perl files can be compiled
find docs modules scripts sql travisci -iname '*.t' -print0 | xargs -0 -n 1 perl -c
rt2=$?
find docs modules scripts sql travisci -iname '*.pl' \! -name 'sample_genomic_regions.pl' \! -name 'test_hal_gab_access.pl' -print0 | xargs -0 -n 1 perl -c
rt3=$?
find docs modules scripts sql travisci -iname '*.pm' \! -name 'LoadHalMapping.pm' \! -name 'LoadSynonyms.pm' \! -name 'HALAdaptor.pm' \! -name 'HALXS.pm' -print0 | xargs -0 -n 1 perl -c
rt4=$?
if [[ ($rt1 -eq 0) && ($rt2 -eq 0) && ($rt3 -eq 0) && ($rt4 -eq 0)]]; then
exit 0
else
exit 255
fi
","Shell"
"Codon","Ensembl/ensembl-compara","src/python/examples/unit_tests/test_orthology_benchmark.py",".py","12907","304","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Unit testing of `orthology_benchmark.py` script.
Typical usage example::
$ pytest test_orthology_benchmark.py
""""""
from contextlib import nullcontext as does_not_raise
from importlib.abc import Loader
from importlib.machinery import ModuleSpec
from importlib.util import module_from_spec, spec_from_file_location
import os
from pathlib import Path
import sys
from typing import ContextManager, Dict, List
import sqlalchemy
import pytest
from pytest import FixtureRequest, raises, warns
from ensembl.compara.filesys import file_cmp
script_path = Path(__file__).parents[3] / ""scripts"" / ""pipeline"" / ""orthology_benchmark.py""
script_name = script_path.stem
script_spec = spec_from_file_location(script_name, script_path)
if not isinstance(script_spec, ModuleSpec):
raise ImportError(f""ModuleSpec not created for module file '{script_path}'"")
if not isinstance(script_spec.loader, Loader):
raise ImportError(f""no loader found for module file '{script_path}'"")
orthology_benchmark_module = module_from_spec(script_spec)
sys.modules[script_name] = orthology_benchmark_module
script_spec.loader.exec_module(orthology_benchmark_module)
# pylint: disable=import-error,wrong-import-order,wrong-import-position
import orthology_benchmark # type: ignore
# pylint: enable=import-error,wrong-import-order,wrong-import-position
@pytest.mark.parametrize(
""multi_dbs"",
[
[{'src': 'core/gallus_gallus_core_99_6'}, {'src': 'core/homo_sapiens_core_99_38'}]
],
indirect=True
)
class TestDumpGenomes:
""""""Tests :func:`orthology_benchmark.dump_genomes()` function.
Attributes:
core_dbs: A set of test core databases.
host: Host of the test database server.
port: Port of the test database server.
username: Username to access the test `host:port`
""""""
core_dbs: dict = {}
host: str | None = None
port: int | None = None
username: str | None = None
# autouse=True makes this fixture be executed before any test_* method of this class, and scope='class' to
# execute it only once per class parametrization
@pytest.fixture(scope='class', autouse=True)
def setup(self, request: FixtureRequest, multi_dbs: Dict) -> None:
""""""Loads the required fixtures and values as class attributes.
Args:
request: Access to the requesting test context.
multi_dbs: Dictionary of unit test databases (fixture).
""""""
type(self).core_dbs = multi_dbs
server_url = sqlalchemy.engine.url.make_url(request.config.getoption('server'))
type(self).host = server_url.host
type(self).port = server_url.port
type(self).username = ""ensro"" if server_url.username == ""ensadmin"" else server_url.username
@pytest.mark.skipif(os.environ['USER'] == 'travis',
reason=""The test requires both Perl and Python which is not supported by Travis."")
@pytest.mark.parametrize(
""core_list, species_set_name, id_type, expectation"",
[
(
[f""{os.environ['USER']}_gallus_gallus_core_99_6"",
f""{os.environ['USER']}_homo_sapiens_core_99_38""],
""default"", ""protein"", does_not_raise()
),
([], ""test"", ""gene"", raises(ValueError, match=r""No cores to dump.""))
]
)
def test_dump_genomes(self, core_list: List[str], species_set_name: str,
tmp_path: Path, id_type: str, expectation: ContextManager) -> None:
""""""Tests :func:`orthology_benchmark.dump_genomes()` when server connection can be established.
Args:
core_list: A list of core database names.
species_set_name: Species set (collection) name.
tmp_path: Unit test temp directory (fixture).
expectation: Context manager for the expected exception, i.e. the test will only pass if that
exception is raised. Use :class:`~contextlib.nullcontext` if no exception is expected.
""""""
with expectation:
orthology_benchmark.dump_genomes(core_list, species_set_name, self.host, self.port, tmp_path,
id_type)
out_files = tmp_path / species_set_name
# pylint: disable-next=no-member
exp_out = pytest.files_dir / ""orth_benchmark"" # type: ignore[attr-defined,operator]
for db_name, unittest_db in self.core_dbs.items():
assert file_cmp(out_files / f""{unittest_db.dbc.db_name}.fasta"", exp_out / f""{db_name}.fasta"")
def test_dump_genomes_fake_connection(self, tmp_path: Path) -> None:
""""""Tests :func:`orthology_benchmark.dump_genomes()` with fake server details.
Args:
tmp_path: Unit test temp directory (fixture).
""""""
with raises(RuntimeError):
orthology_benchmark.dump_genomes([""mus_musculus"", ""naja_naja""], ""fake"",
""fake-host"", 65536, tmp_path, ""protein"")
def test_dump_genomes_fake_output_path(self) -> None:
""""""Tests :func:`orthology_benchmark.dump_genomes()` with fake output path.""""""
with raises(OSError, match=r""Failed to create '/nonexistent/path/default' directory.""):
orthology_benchmark.dump_genomes([""mus_musculus"", ""naja_naja""], ""default"",
self.host, self.port, ""/nonexistent/path"", ""protein"")
@pytest.mark.parametrize(
""core_names, exp_output, expectation"",
[
([""mus_musculus_core_105_1"", ""mus_musculus_core_52_105_3"", ""mus_musculus_core_104_4""],
""mus_musculus_core_52_105_3"", does_not_raise()),
([], None, raises(ValueError,
match=r""Empty list of core databases. Cannot determine the latest one.""))
]
)
def test_find_latest_core(core_names: List[str], exp_output: str, expectation: ContextManager) -> None:
""""""Tests :func:`orthology_benchmark.find_latest_core()` function.
Args:
core_names: A list of core database names.
exp_output: Expected return value of the function.
expectation: Context manager for the expected exception, i.e. the test will only pass if that
exception is raised. Use :class:`~contextlib.nullcontext` if no exception is expected.
""""""
with expectation:
assert orthology_benchmark.find_latest_core(core_names) == exp_output
@pytest.mark.parametrize(
""multi_dbs"",
[
[{'src': 'core/danio_rerio_core_105_11'}, {'src': 'core/mus_musculus_cbaj_core_107_1'},
{'src': 'core/mus_musculus_core_106_39'}]
],
indirect=True
)
class TestGetCoreNames:
""""""Tests :func:`orthology_benchmark.get_core_names()` function.
Attributes:
core_dbs: A set of test core databases.
host: Host of the test database server.
port: Port of the test database server.
username: Username to access the test `host:port`
""""""
core_dbs: Dict = {}
host: str | None = None
port: int | None = None
username: str | None = None
# autouse=True makes this fixture be executed before any test_* method of this class, and scope='class' to
# execute it only once per class parametrization
@pytest.fixture(scope='class', autouse=True)
def setup(self, request: FixtureRequest, multi_dbs: Dict) -> None:
""""""Loads the required fixtures and values as class attributes.
Args:
request: Access to the requesting test context.
multi_dbs: Dictionary of unit test databases (fixture).
""""""
type(self).core_dbs = multi_dbs
server_url = sqlalchemy.engine.url.make_url(request.config.getoption('server'))
type(self).host = server_url.host
type(self).port = server_url.port
type(self).username = ""ensro"" if server_url.username == ""ensadmin"" else server_url.username
@pytest.mark.parametrize(
""species_names, exp_output, expectation"",
[
([""danio_rerio"", ""mus_musculus"", ""zea_mays""],
{""danio_rerio"": os.environ['USER'] + ""_danio_rerio_core_105_11"",
""mus_musculus"": os.environ['USER'] + ""_mus_musculus_core_106_39""},
does_not_raise()),
([], None, raises(ValueError,
match=r""Empty list of species names. Cannot search for core databases.""))
]
)
def test_get_core_names(self, species_names: List[str], exp_output: Dict[str, str],
expectation: ContextManager) -> None:
""""""Tests :func:`orthology_benchmark.get_core_names()` when server connection can be established.
Args:
species_names: Species (genome) names.
exp_output: Expected return value of the function.
expectation: Context manager for the expected exception, i.e. the test will only pass if that
exception is raised. Use :class:`~contextlib.nullcontext` if no exception is expected.
""""""
with expectation:
assert orthology_benchmark.get_core_names(species_names, self.host, self.port,
self.username) == exp_output
def test_get_core_names_fake_connection(self) -> None:
""""""Tests :func:`orthology_benchmark.get_core_names()` with fake server details.""""""
with raises(sqlalchemy.exc.OperationalError):
orthology_benchmark.get_core_names([""danio_rerio"", ""mus_musculus""], ""fake-host"", 65536, ""compara"")
@pytest.mark.parametrize(
""core_name, expectation"",
[
(""juglans_regia_core_51_104_1"", does_not_raise()),
(""ensembl_compara_core_53_106_30"", warns(UserWarning,
match=r""GTF file for 'ensembl_compara_core_53_106_30' ""
r""not found.""))
]
)
def test_get_gtf_file(core_name: str, tmp_path: Path, expectation: ContextManager) -> None:
""""""Tests :func:`orthology_benchmark.get_gtf_file()` function.
Args:
core_name: Core db name.
tmp_path: Unit test temp directory (fixture).
expectation: Context manager for the expected exception, i.e. the test will only pass if that
exception is raised. Use :class:`~contextlib.nullcontext` if no exception is expected.
""""""
# pylint: disable-next=no-member
test_source_dir = pytest.files_dir / ""orth_benchmark"" # type: ignore[attr-defined,operator]
with expectation:
orthology_benchmark.get_gtf_file(core_name, test_source_dir, tmp_path)
exp_out = test_source_dir / ""release-51"" / ""plants"" / ""gtf"" / ""juglans_regia"" / \
""Juglans_regia.Walnut_2.0.51.gtf.gz""
assert file_cmp( tmp_path / ""Juglans_regia.Walnut_2.0.51.gtf.gz"", exp_out)
@pytest.mark.parametrize(
""core_names, expectation"",
[
([""juglans_regia_core_51_104_1"", ""anopheles_albimanus_core_51_104_2""], does_not_raise()),
([], raises(ValueError, match=r""Empty list of core db names. Cannot search for GTF files.""))
]
)
def test_prepare_gtf_files(core_names: str, tmp_path: Path, expectation: ContextManager) -> None:
""""""Tests :func:`orthology_benchmark.prepare_gtf_files()` function.
Args:
core_names: Core db names.
tmp_path: Unit test temp directory (fixture).
expectation: Context manager for the expected exception, i.e. the test will only pass if that
exception is raised. Use :class:`~contextlib.nullcontext` if no exception is expected.
""""""
# pylint: disable-next=no-member
test_source_dir = pytest.files_dir / ""orth_benchmark"" # type: ignore[attr-defined,operator]
with expectation:
orthology_benchmark.prepare_gtf_files(core_names, test_source_dir, tmp_path)
rel_dir = test_source_dir / ""release-51""
exp_out1 = rel_dir / ""plants"" / ""gtf"" / ""juglans_regia"" / ""Juglans_regia.Walnut_2.0.51.gtf""
exp_out2 = rel_dir / ""metazoa"" / ""gtf"" / ""anopheles_albimanus"" / ""Anopheles_albimanus.AalbS2.51.gtf""
assert file_cmp( tmp_path / ""Juglans_regia.Walnut_2.0.51.gtf"", exp_out1)
assert file_cmp( tmp_path / ""Anopheles_albimanus.AalbS2.51.gtf"", exp_out2)
","Python"
"Codon","Ensembl/ensembl-compara","src/python/examples/unit_tests/test_data_dumps.py",".py","7478","194","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Unit testing of `data_dumps.py` script.
Typical usage example::
$ pytest test_data_dumps.py
""""""
from contextlib import nullcontext as does_not_raise
from importlib.abc import Loader
from importlib.machinery import ModuleSpec
from importlib.util import module_from_spec, spec_from_file_location
import os
from pathlib import Path
import sys
from typing import ContextManager, Dict, List
import sqlalchemy
import pytest
from ensembl.compara.filesys import file_cmp
script_path = Path(__file__).parents[3] / ""scripts"" / ""pipeline"" / ""data_dumps.py""
script_name = script_path.stem
script_spec = spec_from_file_location(script_name, script_path)
if not isinstance(script_spec, ModuleSpec):
raise ImportError(f""ModuleSpec not created for module file '{script_path}'"")
if not isinstance(script_spec.loader, Loader):
raise ImportError(f""no loader found for module file '{script_path}'"")
data_dumps_module = module_from_spec(script_spec)
sys.modules[script_name] = data_dumps_module
script_spec.loader.exec_module(data_dumps_module)
# pylint: disable=import-error,wrong-import-order,wrong-import-position
import data_dumps # type: ignore
# pylint: enable=import-error,wrong-import-order,wrong-import-position
def test_find_latest_core_naive() -> None:
""""""Tests :func:`data_dumps.find_latest_core()` function when it does not raise an error.
Args:
core_names: A list of core database names.
""""""
core_names = [""mus_musculus_core_105_1"", ""mus_musculus_core_52_105_3"", ""mus_musculus_core_104_4""]
assert data_dumps.find_latest_core(core_names) == ""mus_musculus_core_52_105_3""
def test_find_latest_core_naive_error() -> None:
""""""Tests :func:`data_dumps.find_latest_core()` function when it raises an error.
Args:
core_names: A list of core database names.
""""""
with pytest.raises(ValueError):
data_dumps.find_latest_core([])
@pytest.mark.parametrize(
""core_names, exp_output, expectation"",
[
([""mus_musculus_core_105_1"", ""mus_musculus_core_52_105_3"", ""mus_musculus_core_104_4""],
""mus_musculus_core_52_105_3"", does_not_raise()),
([], None, pytest.raises(ValueError))
]
)
def test_find_latest_core(core_names: List[str], exp_output: str, expectation: ContextManager) -> None:
""""""Tests :func:`data_dumps.find_latest_core()` function.
Args:
core_names: A list of core database names.
exp_output: Expected return value of the function.
expectation: Context manager for the expected exception, i.e. the test will only pass if that
exception is raised. Use :class:`~contextlib.nullcontext` if no exception is expected.
""""""
with expectation:
assert data_dumps.find_latest_core(core_names) == exp_output
@pytest.mark.parametrize(
""multi_dbs"",
[
[{'src': 'core/gallus_gallus_core_99_6'}, {'src': 'core/homo_sapiens_core_99_38'}]
],
indirect=True
)
class TestDumpGenomes:
""""""Tests :func:`data_dumps.dump_genomes()` function.
Attributes:
core_dbs: A set of test core databases.
host: Host of the test database server.
port: Port of the test database server.
username: Username to access the test `host:port`
""""""
core_dbs: dict = {}
host: str | None = None
port: int | None = None
username: str | None = None
# autouse=True makes this fixture be executed before any test_* method of this class, and scope='class'
# to execute it only once per class parametrization
@pytest.fixture(scope='class', autouse=True)
def setup(self, request: pytest.FixtureRequest, multi_dbs: Dict) -> None:
""""""Loads the required fixtures and values as class attributes.
Args:
request: Access to the requesting test context.
multi_dbs: Dictionary of unit test databases (fixture).
""""""
type(self).core_dbs = multi_dbs
server_url = sqlalchemy.engine.url.make_url(request.config.getoption('server'))
type(self).host = server_url.host
type(self).port = server_url.port
type(self).username = ""ensro"" if server_url.username == ""ensadmin"" else server_url.username
@pytest.mark.skipif(os.environ['USER'] == 'travis',
reason=""The test requires both Perl and Python which is not supported by Travis."")
@pytest.mark.parametrize(
""core_list, species_set_name, id_type, expectation"",
[
(
[f""{os.environ['USER']}_gallus_gallus_core_99_6"",
f""{os.environ['USER']}_homo_sapiens_core_99_38""],
""vertebrates"", ""protein"", does_not_raise()
),
([], ""test"", ""gene"", pytest.raises(ValueError))
]
)
def test_dump_genomes(self, core_list: List[str], species_set_name: str,
tmp_path: Path, id_type: str, expectation: ContextManager) -> None:
""""""Tests :func:`data_dumps.dump_genomes()` when server connection can be established.
Args:
core_list: A list of core db names.
species_set_name: Species set (collection) name.
tmp_path: Unit test temp directory (fixture).
id_type: Type of identifier to use in the dumps.
expectation: Context manager for the expected exception, i.e. the test will only pass if that
exception is raised. Use :class:`~contextlib.nullcontext` if no exception is expected.
""""""
with expectation:
data_dumps.dump_genomes(core_list, species_set_name, self.host, self.port, tmp_path,
id_type)
out_files = tmp_path / species_set_name
# pylint: disable-next=no-member
exp_out = pytest.files_dir / ""dump_genomes"" # type: ignore[attr-defined,operator]
for db_name, unittest_db in self.core_dbs.items():
assert file_cmp(out_files / f""{unittest_db.dbc.db_name}.fasta"", exp_out / f""{db_name}.fasta"")
def test_dump_genomes_fake_connection(self, tmp_path: Path) -> None:
""""""Tests :func:`data_dumps.dump_genomes()` with fake server details.
Args:
tmp_path: Unit test temp directory (fixture).
""""""
with pytest.raises(RuntimeError):
data_dumps.dump_genomes([""mus_musculus"", ""naja_naja""], ""fake"",
""fake-host"", 65536, tmp_path, ""protein"")
def test_dump_genomes_fake_output_path(self) -> None:
""""""Tests :func:`data_dumps.dump_genomes()` with fake output path.""""""
with pytest.raises(OSError):
data_dumps.dump_genomes([""mus_musculus"", ""naja_naja""], ""default"",
self.host, self.port, ""/nonexistent/path"", ""protein"")
","Python"
"Codon","Ensembl/ensembl-compara","src/python/lib/ensembl/compara/__init__.py",".py","687","16","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Ensembl Compara namespace""""""
","Python"
"Codon","Ensembl/ensembl-compara","src/python/lib/ensembl/compara/utils/tools.py",".py","1248","42","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Collection of utils methods targeted to diverse topics and applications.
Typical usage examples::
>>> from ensembl.compara.utils.tools import *
>>> print(to_list('3'))
['3']
""""""
__all__ = ['to_list']
from typing import Any, Optional, List
def to_list(x: Optional[Any]) -> List:
""""""Returns the list version of `x`.
Returns:
`x` if `x` is a list, a list containing `x` if `x` is not a list and ``bool(x)`` is True, and an empty
list otherwise.
""""""
if not x:
return []
if not isinstance(x, list):
return [x]
return x
","Python"
"Codon","Ensembl/ensembl-compara","src/python/lib/ensembl/compara/utils/__init__.py",".py","697","18","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Utils module.""""""
from .tools import *
","Python"
"Codon","Ensembl/ensembl-compara","src/python/lib/ensembl/compara/citest/_citest.py",".py","2443","68","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""CITest abstract classes.""""""
__all__ = ['CITestMeta', 'CITestItem']
from abc import ABC, ABCMeta, abstractmethod
from collections import OrderedDict
from typing import Dict, Optional, Tuple, Union
import py
import pytest
class CITestMeta(ABCMeta, type(pytest.Item)): # type: ignore
""""""Metaclass required to solve the metaclass conflict in CITestItem.""""""
class CITestItem(ABC, pytest.Item, metaclass=CITestMeta):
""""""Abstract class of the test set to compare two (analogous) Ensembl Compara elements.
Args:
name: Name of the test to run.
parent: The parent collector node.
args: Arguments to pass to the test call.
Attributes:
args (Dict): Arguments to pass to the test call.
error_info (OrderedDict): Additional information provided when a test fails.
""""""
def __init__(self, name: str, parent: pytest.Item, args: Dict) -> None:
super().__init__(name, parent)
self.args = args
self.error_info = OrderedDict() # type: OrderedDict
def runtest(self) -> None:
""""""Executes the selected test function with the given arguments.
Raises:
SyntaxError: If the test function to call does not exist.
""""""
test_method = 'test_' + self.name
if not hasattr(self, test_method):
raise SyntaxError(f""Test '{self.name}' not found"")
getattr(self, test_method)(**self.args)
def reportinfo(self) -> Tuple[Union[py.path.local, str], Optional[int], str]:
""""""Returns the location, the exit status and the header of the report section.""""""
return self.fspath, None, self.get_report_header()
@abstractmethod
def get_report_header(self) -> str:
""""""Returns the header to display in the error report.""""""
","Python"
"Codon","Ensembl/ensembl-compara","src/python/lib/ensembl/compara/citest/pytest_citest.py",".py","7621","170","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""CITest plugin for pytest.""""""
from collections import OrderedDict
import json
import os
from typing import Dict, Iterator, Optional
import py
import pytest
from _pytest.config.argparsing import Parser
from _pytest.runner import TestReport
from ensembl.database import DBConnection
from ..filesys import DirCmp
from .testdb import CITestDBItem
from .testfiles import CITestFilesItem
@pytest.hookimpl()
def pytest_addoption(parser: Parser) -> None:
""""""Registers argparse-style options for CITest.""""""
group = parser.getgroup(""continuous integration test (citest)"")
group.addoption('--reference-db', action='store', metavar='URL', dest='reference_db',
help=""URL to reference database"")
group.addoption('--reference-dir', action='store', metavar='PATH', dest='reference_dir',
help=""Path to reference root directory"")
group.addoption('--target-db', action='store', metavar='URL', dest='target_db',
help=""URL to target database"")
group.addoption('--target-dir', action='store', metavar='PATH', dest='target_dir',
help=""Path to target root directory"")
def pytest_collect_file(parent: pytest.Session, path: py.path.local) -> Optional[pytest.File]:
""""""Returns the collection of tests to run as indicated in the given JSON file.""""""
if path.ext == '.json':
return JsonFile(path, parent)
return None
def pytest_sessionstart(session: pytest.Session) -> None:
""""""Adds required variables to the session before entering the run test loop.""""""
session.report = {}
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item: pytest.Item) -> TestReport:
""""""Returns the test report updated with custom information.""""""
outcome = yield
report = outcome.get_result()
if report.when == 'call':
item.session.report[item] = report
def pytest_sessionfinish(session: pytest.Session) -> None:
""""""Generates a custom report before returning the exit status to the system.""""""
# Use the configuration JSON file as template for the report
config_filename = session.config.getoption('file_or_dir')[0]
with open(config_filename) as f:
full_report = json.load(f, object_pairs_hook=OrderedDict)
# Update/add global information
for arg in ['reference_db', 'reference_dir', 'target_db', 'target_dir']:
full_report[arg] = session.config.getoption(arg, full_report.get(arg), True)
# Add the reported information of each test
failed = 0
for item, report in session.report.items():
if isinstance(item, CITestDBItem):
test_list = full_report['database_tests'][item.table]
else:
test_list = full_report['files_tests']
for test in test_list:
# Find the test entry corresponding to this item
if (test['test'] == item.name) and (test['args'] == item.args):
test['status'] = report.outcome.capitalize()
if report.failed:
failed += 1
test['error'] = OrderedDict([('message', report.longreprtext)])
if item.error_info:
test['error']['details'] = item.error_info
break
# Save full report in a JSON file with the same name as the citest JSON file
report_filename = os.path.basename(config_filename).rsplit(""."", 1)[0] + "".report.json""
# Make sure not to overwrite previous reports
if os.path.isfile(report_filename):
i = 1
while os.path.isfile(f""{report_filename}.{i}""):
i += 1
report_filename = f""{report_filename}.{i}""
with open(report_filename, ""w"") as f:
json.dump(full_report, f, indent=4)
# Print summary in STDOUT
total = len(session.report)
print(f""\n{total - failed} out of {total} tests ok"")
class JsonFile(pytest.File):
""""""Test collector from CITest JSON files.""""""
def collect(self) -> Iterator:
""""""Parses the JSON file and loads all the tests.
Returns:
Iterator of :class:`testdb.CITestDBItem` or :class:`testfiles.CITestFilesItem` objects (depending
on the the tests included in the JSON file).
Raises:
AttributeError: If ``test`` or ``args`` keys are missing in any test.
""""""
# Load the JSON file
with self.fspath.open() as f:
pipeline_tests = json.load(f)
# Parse each test and load it
if 'database_tests' in pipeline_tests:
# Load the reference and target DBs
ref_url = self._get_arg(pipeline_tests, 'reference_db')
target_url = self._get_arg(pipeline_tests, 'target_db')
ref_dbc = DBConnection(ref_url)
target_dbc = DBConnection(target_url)
for table, test_list in pipeline_tests['database_tests'].items():
for test in test_list:
# Ensure required keys are present in every test
if 'test' not in test:
raise AttributeError(f""Missing argument 'test' in database_tests['{table}']"")
if 'args' not in test:
raise AttributeError(
f""Missing argument 'args' in database_tests['{table}']['{test['test']}']"")
yield CITestDBItem(test['test'], self, ref_dbc, target_dbc, table, test['args'])
if 'files_tests' in pipeline_tests:
# Load the reference and target directory paths
ref_path = os.path.expandvars(self._get_arg(pipeline_tests, 'reference_dir'))
target_path = os.path.expandvars(self._get_arg(pipeline_tests, 'target_dir'))
dir_cmp = DirCmp(ref_path=ref_path, target_path=target_path)
for i, test in enumerate(pipeline_tests['files_tests'], 1):
# Ensure required keys are present in every test
if 'test' not in test:
raise AttributeError(f""Missing argument 'test' in files_tests #{i}"")
if 'args' not in test:
raise AttributeError(f""Missing argument 'args' in files_tests #{i}"")
yield CITestFilesItem(test['test'], self, dir_cmp, test['args'])
def _get_arg(self, pipeline_tests: Dict, name: str) -> str:
""""""Returns the requested parameter from the command line (priority) or the JSON configuration file.
Args:
pipeline_tests: Pipeline tests and their configuration.
name: Parameter name.
Raises:
ValueError: If the parameter has not been set in neither the command line nor the JSON
configuration file.
""""""
argument = self.config.getoption(name, pipeline_tests.get(name, ''), True)
if not argument:
raise ValueError(f""Required argument '--{name.replace('_', '-')}' or '{name}' key in JSON file"")
return argument
","Python"
"Codon","Ensembl/ensembl-compara","src/python/lib/ensembl/compara/citest/__init__.py",".py","724","19","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""CITest module.""""""
from .testdb import *
from .testfiles import *
","Python"
"Codon","Ensembl/ensembl-compara","src/python/lib/ensembl/compara/citest/testdb.py",".py","11221","231","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""CITest database comparison module.""""""
__all__ = ['CITestDBItem', 'CITestDBError', 'CITestDBContentError', 'CITestDBGroupingError',
'CITestDBNumRowsError']
from typing import Any, Dict, List, Union
import pandas
import pytest
from _pytest._code.code import ExceptionChainRepr, ExceptionInfo, ReprExceptionInfo
from _pytest.fixtures import FixtureLookupErrorRepr
from sqlalchemy import func
from sqlalchemy.sql.expression import select, text
from ensembl.database import Query, DBConnection
from ..utils import to_list
from ._citest import CITestItem
class CITestDBItem(CITestItem):
""""""Generic tests to compare a table in two (analogous) Ensembl Compara MySQL databases.
Args:
name: Name of the test to run.
parent: The parent collector node.
ref_dbc: Reference database connection handler.
target_dbc: Target database connection handler.
table: Table to be tested.
args: Arguments to pass to the test call.
Attributes:
ref_dbc (DBConnection): Reference database connection handler.
target_dbc (DBConnection): Target database connection handler.
table (str): Table to be tested.
""""""
def __init__(self, name: str, parent: pytest.Item, ref_dbc: DBConnection, target_dbc: DBConnection,
table: str, args: Dict) -> None:
super().__init__(name, parent, args)
self.ref_dbc = ref_dbc
self.target_dbc = target_dbc
self.table = table
def repr_failure(self, excinfo: ExceptionInfo, style: str = None
) -> Union[str, ReprExceptionInfo, ExceptionChainRepr, FixtureLookupErrorRepr]:
""""""Returns the failure representation that will be displayed in the report section.
Note:
This method is called when :meth:`CITestDBItem.runtest()` raises an exception.
Args:
excinfo: Exception information with additional support for navigating and traceback.
style: Traceback print mode (``auto``/``long``/``short``/``line``/``native``/``no``).
""""""
if isinstance(excinfo.value, CITestDBError):
self.error_info['expected'] = excinfo.value.expected
self.error_info['found'] = excinfo.value.found
self.error_info['query'] = excinfo.value.query
return excinfo.value.args[0] + ""\n""
if isinstance(excinfo.value, TypeError):
return excinfo.value.args[0] + ""\n""
return super().repr_failure(excinfo, style)
def get_report_header(self) -> str:
""""""Returns the header to display in the error report.""""""
return f""Database table: {self.table}, test: {self.name}""
def test_num_rows(self, variation: float = 0.0, group_by: Union[str, List] = None,
filter_by: Union[str, List] = None) -> None:
""""""Compares the number of rows between reference and target tables.
If `group_by` is provided, the same variation will be applied to each group.
Args:
variation: Allowed variation between reference and target tables.
group_by: Group rows by column(s), and count the number of rows per group.
filter_by: Filter rows by one or more conditions (joined by the AND operator).
Raise:
CITestDBGroupingError: If `group_by` is provided and the groups returned are different.
CITestDBNumRowsError: If the number of rows differ more than the expected variation for at least
one group.
""""""
# Compose the SQL query from the given parameters (both databases should have the same table schema)
table = self.ref_dbc.tables[self.table]
group_by = to_list(group_by)
columns = [table.columns[col] for col in group_by]
# Use primary key (if any) in count to improve the query performance
primary_keys = self.ref_dbc.get_primary_key_columns(self.table)
primary_key_col = table.columns[primary_keys[0]] if primary_keys else None
query = select(columns + [func.count(primary_key_col).label('nrows')]).select_from(table)
if columns:
# ORDER BY to ensure that the results are always in the same order (for the same groups)
query = query.group_by(*columns).order_by(*columns)
for clause in to_list(filter_by):
query = query.where(text(clause))
# Get the number of rows for both databases
ref_data = pandas.read_sql(query, self.ref_dbc.connect())
target_data = pandas.read_sql(query, self.target_dbc.connect())
if group_by:
# Check if the groups returned are the same
merged_data = ref_data.merge(target_data, on=group_by, how='outer', indicator=True)
if not merged_data[merged_data['_merge'] != 'both'].empty:
# Remove columns ""nrows_x"", ""nrows_y"" and ""_merge"" in the dataframes to include in the report
ref_only = merged_data[merged_data['_merge'] == 'left_only'].iloc[:, :-3]
target_only = merged_data[merged_data['_merge'] == 'right_only'].iloc[:, :-3]
raise CITestDBGroupingError(self.table, ref_only, target_only, query)
# Check if the number of rows (per group) are within the allowed variation
difference = abs(ref_data['nrows'] - target_data['nrows'])
allowed_variation = ref_data['nrows'] * variation
failing_rows = difference > allowed_variation
if failing_rows.any():
raise CITestDBNumRowsError(self.table, ref_data.loc[failing_rows], target_data.loc[failing_rows],
query)
def test_content(self, *, columns: Union[str, List] = None, ignore_columns: Union[str, List] = None,
filter_by: Union[str, List] = None) -> None:
""""""Compares the content between reference and target tables.
The data and the data type of each column have to be the same in both tables in order to be considered
equal.
Args:
columns: Columns to take into account in the comparison.
ignore_columns: Columns to exclude in the comparison, i.e. all columns but those included in this
parameter will be compared.
filter_by: Filter rows by one or more conditions (joined by the AND operator).
Raise:
TypeError: If both `columns` and `ignore_columns` are provided.
CITestDBNumRowsError: If the number of rows differ.
CITestDBContentError: If one or more rows have different content.
""""""
if columns and ignore_columns:
raise TypeError(""Expected either 'columns' or 'ignore_columns', not both"")
# Compose the SQL query from the given parameters (both databases should have the same table schema)
table = self.ref_dbc.tables[self.table]
if columns:
columns = to_list(columns)
db_columns = [table.columns[col] for col in columns]
else:
ignore_columns = to_list(ignore_columns)
db_columns = [col for col in table.columns if col.name not in ignore_columns]
columns = [col.name for col in db_columns]
query = select(db_columns)
for clause in to_list(filter_by):
query = query.where(text(clause))
# Get the table content for the selected columns
ref_data = pandas.read_sql(query, self.ref_dbc.connect())
target_data = pandas.read_sql(query, self.target_dbc.connect())
# Check if the size of the returned tables are the same
# Note: although not necessary, this control provides a better error message
if ref_data.shape != target_data.shape:
raise CITestDBNumRowsError(self.table, ref_data.shape[0], target_data.shape[0], query)
# Compare the content of both dataframes
merged_data = ref_data.merge(target_data, how='outer', indicator=True)
if not merged_data[merged_data['_merge'] != 'both'].empty:
# Remove column ""_merge"" in the dataframes to include in the report
ref_only = merged_data[merged_data['_merge'] == 'left_only'].iloc[:, :-1]
target_only = merged_data[merged_data['_merge'] == 'right_only'].iloc[:, :-1]
raise CITestDBContentError(self.table, ref_only, target_only, query)
class CITestDBError(Exception):
""""""Exception subclass created to handle test failures separatedly from unexpected exceptions.
Args:
message: Error message to display.
expected: Expected value(s) (reference database).
found: Value(s) found (target database).
query: SQL query used to retrieve the information.
Attributes:
expected (Any): Expected value(s) (reference database).
found (Any): Value(s) found (target database).
query (Query): SQL query used to retrieve the information.
""""""
def __init__(self, message: str, expected: Any, found: Any, query: Query) -> None:
super().__init__(message)
self.expected = self._parse_data(expected)
self.found = self._parse_data(found)
self.query = str(query).replace('\n', '').strip()
@staticmethod
def _parse_data(data: Any) -> Any:
""""""Returns a list representation of `data` if it is a dataframe, `data` otherwise.""""""
if isinstance(data, pandas.DataFrame):
# Avoid the default list representation for empty dataframes:
# ['Empty DataFrame', 'Columns: []', 'Index: []']
return [] if data.empty else data.to_string(index=False).splitlines()
return data
class CITestDBContentError(CITestDBError):
""""""Exception raised when `table` has different content in reference and target databases.""""""
def __init__(self, table: str, *args: Any) -> None:
message = f""Different content found in table '{table}'""
super().__init__(message, *args)
class CITestDBGroupingError(CITestDBError):
""""""Exception raised when `table` returns different groups for reference and target databases.""""""
def __init__(self, table: str, *args: Any) -> None:
message = f""Different groups found for table '{table}'""
super().__init__(message, *args)
class CITestDBNumRowsError(CITestDBError):
""""""Exception raised when `table` has different number of rows in reference and target databases.""""""
def __init__(self, table: str, *args: Any) -> None:
message = f""Different number of rows for table '{table}'""
super().__init__(message, *args)
","Python"
"Codon","Ensembl/ensembl-compara","src/python/lib/ensembl/compara/citest/testfiles.py",".py","8489","174","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""CITest files system comparison module.""""""
__all__ = ['CITestFilesItem', 'CITestFilesError', 'CITestFilesContentError', 'CITestFilesSizeError',
'CITestFilesTreeError']
import os
from typing import Dict, List, Union
import pytest
from _pytest.fixtures import FixtureLookupErrorRepr
from _pytest._code.code import ExceptionChainRepr, ExceptionInfo, ReprExceptionInfo
from ..filesys import DirCmp, PathLike, file_cmp
from ..utils import to_list
from ._citest import CITestItem
class CITestFilesItem(CITestItem):
""""""Generic tests to compare two (analogous) Ensembl Compara files (or directories).
Args:
name: Name of the test to run.
parent: The parent collector node.
dir_cmp: Directory comparison object to run the test against.
args: Arguments to pass to the test call.
Attributes:
dir_cmp (DirCmp): Directory comparison object to run the test against.
""""""
def __init__(self, name: str, parent: pytest.Item, dir_cmp: DirCmp, args: Dict) -> None:
super().__init__(name, parent, args)
self.dir_cmp = dir_cmp
def repr_failure(self, excinfo: ExceptionInfo, style: str = None
) -> Union[str, ReprExceptionInfo, ExceptionChainRepr, FixtureLookupErrorRepr]:
""""""Returns the failure representation that will be displayed in the report section.
Note:
This method is called when :meth:`CITestFilesItem.runtest()` raises an exception.
Args:
excinfo: Exception information with additional support for navigating and traceback.
style: Traceback print mode (``auto``/``long``/``short``/``line``/``native``/``no``).
""""""
if isinstance(excinfo.value, CITestFilesError):
self.error_info['mismatches'] = excinfo.value.mismatches
self.error_info['reference_only'] = excinfo.value.ref_only
self.error_info['target_only'] = excinfo.value.target_only
return excinfo.value.args[0] + ""\n""
return super().repr_failure(excinfo, style)
def get_report_header(self) -> str:
""""""Returns the header to display in the error report.""""""
return f""File test: {self.name}""
def test_size(self, variation: float = 0.0, patterns: Union[str, List] = None,
paths: Union[PathLike, List] = None) -> None:
""""""Compares the size (in bytes) between reference and target files.
Args:
variation: Allowed size variation between reference and target files.
patterns: The filenames of the files tested will match at least one of these glob patterns.
paths: Relative directory/file path(s) to be compared (including their subdirectories).
Raises:
CITestFilesTreeError: If reference and target directory trees differ (for any selected path).
CITestFilesSizeError: If at least one file differ in size between reference and target.
""""""
paths = to_list(paths)
# Nested function (closure) to compare the reference and target file sizes
def cmp_file_size(ref_filepath: PathLike, target_filepath: PathLike) -> bool:
""""""Returns True if `target_filepath` size is larger than allowed variation, False otherwise.""""""
ref_size = os.path.getsize(ref_filepath)
target_size = os.path.getsize(target_filepath)
return abs(ref_size - target_size) > (ref_size * variation)
# Traverse the common directory tree, comparing every reference and target file sizes
mismatches = self.dir_cmp.apply_test(cmp_file_size, patterns, paths)
# Check if there are files either in the reference or the target (but not in both)
ref_only = self.dir_cmp.ref_only_list(patterns, paths)
target_only = self.dir_cmp.target_only_list(patterns, paths)
if mismatches:
raise CITestFilesSizeError(mismatches, ref_only, target_only)
if ref_only or target_only:
raise CITestFilesTreeError(ref_only, target_only)
def test_content(self, patterns: Union[str, List] = None, paths: Union[PathLike, List] = None) -> None:
""""""Compares the content between reference and target files.
Args:
patterns: Glob patterns the filenames need to match (at least one).
paths: Relative directory/file path(s) to be compared (including their subdirectories).
Raises:
CITestFilesTreeError: If reference and target directory trees differ (for any selected path).
CITestFilesContentError: If at least one file differ between reference and target.
""""""
paths = to_list(paths)
# Nested function (closure) to compare the reference and target files
def cmp_file_content(ref_filepath: PathLike, target_filepath: PathLike) -> bool:
""""""Returns True if `ref_filepath` and `target_filepath` differ, False otherwise.""""""
return not file_cmp(ref_filepath, target_filepath)
# Traverse the common directory tree, comparing every reference and target files
mismatches = self.dir_cmp.apply_test(cmp_file_content, patterns, paths)
# Check if there are files either in the reference or the target (but not in both)
ref_only = self.dir_cmp.ref_only_list(patterns, paths)
target_only = self.dir_cmp.target_only_list(patterns, paths)
if mismatches:
raise CITestFilesContentError(mismatches, ref_only, target_only)
if ref_only or target_only:
raise CITestFilesTreeError(ref_only, target_only)
class CITestFilesError(Exception):
""""""Exception subclass created to handle test failures separatedly from unexpected exceptions.
Args:
message: Error message to display.
ref_only: Files/directories only found in the reference directory tree.
target_only: Files/directories only found in the target directory tree.
mismatches: Files that differ between reference and target directory trees.
Attributes:
ref_only (List[str]): Files/directories only found in the reference directory tree.
target_only (List[str]): Files/directories only found in the target directory tree.
mismatches (List[str]): Files that differ between reference and target directory trees.
""""""
def __init__(self, message: str, mismatches: List, ref_only: List, target_only: List) -> None:
super().__init__(message)
self.mismatches = mismatches
self.ref_only = ref_only
self.target_only = target_only
class CITestFilesContentError(CITestFilesError):
""""""Exception raised when comparing the file contents between reference and target directory trees.""""""
def __init__(self, mismatches: List, ref_only: List, target_only: List) -> None:
num_mms = len(mismatches)
message = f""Found {num_mms} file{'s' if num_mms > 1 else ''} with different content""
super().__init__(message, mismatches, ref_only, target_only)
class CITestFilesSizeError(CITestFilesError):
""""""Exception raised when comparing the file sizes between reference and target directory trees.""""""
def __init__(self, mismatches: List, ref_only: List, target_only: List) -> None:
num_mms = len(mismatches)
message = (f""Found {num_mms} file{'s' if num_mms > 1 else ''} that differ in size more than the ""
""allowed variation"")
super().__init__(message, mismatches, ref_only, target_only)
class CITestFilesTreeError(CITestFilesError):
""""""Exception raised when comparing the file sizes between reference and target directory trees.""""""
def __init__(self, ref_only: List, target_only: List) -> None:
super().__init__(""Reference and target directory trees are not the same"", [], ref_only, target_only)
","Python"
"Codon","Ensembl/ensembl-compara","src/python/lib/ensembl/compara/filesys/dircmp.py",".py","8409","183","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Folder tree comparison methods.""""""
from __future__ import annotations
__all__ = ['DirCmp']
from collections import deque
import fnmatch
import functools
import itertools
import os
from pathlib import Path
from typing import Callable, Deque, Iterator
from ensembl.compara.utils import to_list
from ensembl.utils import StrPath
class DirCmp:
""""""Directory comparison object to compare reference and target directory trees.
Args:
ref_path: Reference root path, e.g. ``/home/user/pipelines/reference``.
target_path: Target root path, e.g. ``/home/user/pipelines/target``.
Attributes:
ref_path (Path): Reference directory path.
target_path (Path): Target directory path.
common_files (Set[str]): Files shared between reference and target directories.
ref_only (Set[str]): Files/subdirectories only found in the reference directory.
target_only (Set[str]): Files/subdirectories only found in the target directory.
subdirs (Dict[Path, DirCmp]): Shared subdirectories between reference and target directories.
Raises:
OSError: If either reference or target directories do not exist.
""""""
def __init__(self, ref_path: StrPath, target_path: StrPath) -> None:
self.ref_path = Path(ref_path)
if not self.ref_path.exists():
raise OSError(f""Reference directory '{ref_path}' not found"")
self.target_path = Path(target_path)
if not self.target_path.exists():
raise OSError(f""Target directory '{target_path}' not found"")
ref_dirnames, ref_filenames = next(os.walk(self.ref_path))[1:]
ref_dnames = set(ref_dirnames)
ref_fnames = set(ref_filenames)
target_dirnames, target_filenames = next(os.walk(self.target_path))[1:]
target_dnames = set(target_dirnames)
target_fnames = set(target_filenames)
self.common_files = ref_fnames & target_fnames
# Get files/subdirectories only present in the reference directory
self.ref_only = ref_fnames - target_fnames
for ref_only_dname in ref_dnames - target_dnames:
for path, _, files in os.walk(self.ref_path / ref_only_dname):
rel_path = os.path.relpath(path, self.ref_path)
self.ref_only |= {os.path.join(rel_path, fname) for fname in files}
# Get files/subdirectories only present in the target directory
self.target_only = target_fnames - ref_fnames
for target_only_dname in target_dnames - ref_dnames:
for path, _, files in os.walk(self.target_path / target_only_dname):
rel_path = os.path.relpath(path, self.target_path)
self.target_only |= {os.path.join(rel_path, fname) for fname in files}
self.subdirs: dict[Path, DirCmp] = {}
for dirname in ref_dnames & target_dnames:
self.subdirs[Path(dirname)] = DirCmp(self.ref_path / dirname, self.target_path / dirname)
def _traverse(
self, attr: str, patterns: str | list | None = None, paths: StrPath | list | None = None
) -> Iterator[str]:
""""""Yields each element of the requested attribute found in the directory trees.
This method traverses the shared directory tree in breadth-first order.
Args:
attr: Attribute to return, i.e. ``common_files``, ``ref_only`` or ``target_only``.
patterns: Filenames yielded will match at least one of these glob patterns.
paths: Relative directory/file paths to traverse.
Raises:
ValueError: If one of `paths` is not part of the shared directory tree.
""""""
nodes_left: Deque[tuple[Path, DirCmp]] = deque()
# Fetch and append the root node of each relative path
for rel_path in to_list(paths):
try:
node = functools.reduce(lambda x, y: x.subdirs[Path(y)], Path(rel_path).parts, self)
except KeyError:
# Suppress exception context to display only the ValueError
raise ValueError(f""Path '{rel_path}' not found in shared directory tree"") from None
nodes_left.append((Path(rel_path), node))
# If no nodes were added, add the root as the starting point
if not nodes_left:
nodes_left.append((Path(), self))
# Prefix each pattern with ""**"" to match also files within subdirectories (for reference- /
# target-only files)
patterns = [f""**{glob}"" for glob in to_list(patterns)]
while nodes_left:
dirname, node = nodes_left.pop()
# Append subdirectories to the list of directories left to traverse
nodes_left.extend([(dirname / subdir, subnode) for subdir, subnode in node.subdirs.items()])
if patterns:
# Get every element of the requested attribute that matches at least one of the patterns
mapping = map(functools.partial(fnmatch.filter, getattr(node, attr)), patterns)
# Remove element repetitions, result of its name matching more than one pattern
elements = set(itertools.chain(*mapping))
else:
elements = getattr(node, attr)
for ename in elements:
yield str(dirname / str(ename))
def apply_test(
self, test_func: Callable, patterns: str | list | None = None, paths: StrPath | list | None = None
) -> list[str]:
""""""Returns the files in the shared directory tree for which the test function returns True.
Args:
test_func: Test function applied to each tuple reference- / target-file. It has to expect two
``StrPath`` parameters and return a boolean, like::
def test_func(ref_filepath: StrPath, target_filepath: StrPath) -> bool:
patterns: Filenames returned will match at least one of these glob patterns.
paths: Relative directory/file paths to evaluate (including their subdirectories).
""""""
positives = []
for filepath in self._traverse('common_files', patterns, paths):
if test_func(self.ref_path / filepath, self.target_path / filepath):
positives.append(filepath)
return positives
def common_list(
self, patterns: str | list | None = None, paths: StrPath | list | None = None
) -> list[str]:
""""""Returns the files/directories found in the shared directory tree.
Args:
patterns: Filenames returned will match at least one of these glob patterns.
paths: Relative directory/file paths to return (including their subdirectories).
""""""
return list(self._traverse('common_files', patterns, paths))
def ref_only_list(
self, patterns: str | list | None = None, paths: StrPath | list | None = None
) -> list[str]:
""""""Returns the files/directories only found in the reference directory tree.
Args:
patterns: Filenames returned will match at least one of these glob patterns.
paths: Relative directory/file paths to return (including their subdirectories).
""""""
return list(self._traverse('ref_only', patterns, paths))
def target_only_list(
self, patterns: str | list | None = None, paths: StrPath | list | None = None
) -> list[str]:
""""""Returns the files/directories only found in the target directory tree.
Args:
patterns: Filenames returned will match at least one of these glob patterns.
paths: Relative directory/file paths to return (including their subdirectories).
""""""
return list(self._traverse('target_only', patterns, paths))
","Python"
"Codon","Ensembl/ensembl-compara","src/python/lib/ensembl/compara/filesys/__init__.py",".py","748","19","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""File system-related classes and methods.""""""
from .dircmp import *
from .filecmp import *
","Python"
"Codon","Ensembl/ensembl-compara","src/python/lib/ensembl/compara/filesys/filecmp.py",".py","2833","76","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""File comparison methods for different file formats.
This module provides a main file comparison method, :meth:`file_cmp()`, that compares two files and returns
True if they are equivalent, False otherwise. The comparison is made differently depending on the file format.
For instance, two Newick files are considered equal if one tree is the result of a permutation of the other.
Typical usage examples::
file_cmp('a/homo_sapiens.fa', 'b/homo_sapiens.fa')
from pathlib import Path
file_cmp(Path('a', 'tree1.nw'), Path('b', 'tree2.nw'))
""""""
__all__ = ['NEWICK_EXT', 'file_cmp']
import filecmp
from pathlib import Path
from Bio import Phylo
from ensembl.utils import StrPath
# File extensions that should be interpreted as the same file format:
NEWICK_EXT = {'.nw', '.nwk', '.newick', '.nh'}
def file_cmp(fpath1: StrPath, fpath2: StrPath) -> bool:
""""""Returns True if files `fpath1` and `fpath2` are equivalent, False otherwise.
Args:
fpath1: First file path.
fpath2: Second file path.
""""""
fext1 = Path(fpath1).suffix
fext2 = Path(fpath2).suffix
if (fext1 in NEWICK_EXT) and (fext2 in NEWICK_EXT):
return _tree_cmp(fpath1, fpath2)
# Resort to a shallow binary file comparison (files with identical os.stat() signatures are taken to be
# equal)
return filecmp.cmp(str(fpath1), str(fpath2))
def _tree_cmp(fpath1: StrPath, fpath2: StrPath, tree_format: str = 'newick') -> bool:
""""""Returns True if trees stored in `fpath1` and `fpath2` are equivalent, False otherwise.
Args:
fpath1: First tree file path.
fpath2: Second tree file path.
tree_format: Tree format, i.e. ``newick``, ``nexus``, ``phyloxml`` or ``nexml``.
""""""
ref_tree = Phylo.read(fpath1, tree_format)
target_tree = Phylo.read(fpath2, tree_format)
# Both trees are considered equal if they have the same leaves and the same distance from each to the root
ref_dists = {leaf.name: ref_tree.distance(leaf) for leaf in ref_tree.get_terminals()}
target_dists = {leaf.name: target_tree.distance(leaf) for leaf in target_tree.get_terminals()}
return ref_dists == target_dists
","Python"
"Codon","Ensembl/ensembl-compara","src/python/tests/test_hal_gene_liftover.py",".py","7069","164","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Unit testing of `hal_gene_liftover.py` script.
Typical usage example::
$ pytest test_hal_gene_liftover.py
""""""
from contextlib import nullcontext as does_not_raise
import filecmp
from importlib.abc import Loader
from importlib.machinery import ModuleSpec
from importlib.util import module_from_spec, spec_from_file_location
from pathlib import Path
import sys
from types import ModuleType
from typing import ContextManager, Iterable, Mapping, Union
import pytest
from pytest import raises
def import_module_from_file(module_file: Union[Path, str]) -> ModuleType:
""""""Import module from file path.
The name of the imported module is the basename of the specified module
file without its extension.
In addition to being returned by this function, the imported module is
loaded into the sys.modules dictionary, allowing for commands such as
:code:`from <module> import <class>`.
Args:
module_file: File path of module to import.
Returns:
The imported module.
""""""
if not isinstance(module_file, Path):
module_file = Path(module_file)
module_name = module_file.stem
module_spec = spec_from_file_location(module_name, module_file)
if not isinstance(module_spec, ModuleSpec):
raise ImportError(f""ModuleSpec not created for module file '{module_file}'"")
if not isinstance(module_spec.loader, Loader):
raise ImportError(f""no loader found for module file '{module_file}'"")
module = module_from_spec(module_spec)
sys.modules[module_name] = module
module_spec.loader.exec_module(module)
return module
script_path = Path(__file__).parents[3] / 'scripts' / 'hal_alignment' / 'hal_gene_liftover.py'
import_module_from_file(script_path)
# pylint: disable=import-error,wrong-import-position
import hal_gene_liftover # type: ignore
from hal_gene_liftover import SimpleRegion
# pylint: enable=import-error,wrong-import-position
class TestHalGeneLiftover:
""""""Tests script hal_gene_liftover.py""""""
ref_file_dir = None # type: Path
@pytest.fixture(scope='class', autouse=True)
def setup(self) -> None:
""""""Loads necessary fixtures and values as class attributes.""""""
# pylint: disable-next=no-member
type(self).ref_file_dir = pytest.files_dir / 'hal_alignment' # type: ignore
@pytest.mark.parametrize(
""region, exp_output, expectation"",
[
('chr1:16-18:1', SimpleRegion('chr1', 15, 18, '+'), does_not_raise()),
('chrX:23-25:-1', SimpleRegion('chrX', 22, 25, '-'), does_not_raise()),
('chr1:0-2:1', None, raises(ValueError,
match=r""region start must be greater than or equal to 1: 0"")),
('chr1:2-1:1', None, raises(ValueError,
match=r""region 'chr1:2-1:1' has inverted/empty interval"")),
('chr1:1-1:+', None, raises(ValueError,
match=r""region 'chr1:1-1:\+' has invalid strand: '\+'"")),
('dummy', None, raises(ValueError, match=r""region 'dummy' could not be parsed""))
]
)
def test_parse_region(self, region: str, exp_output: SimpleRegion,
expectation: ContextManager) -> None:
""""""Tests :func:`hal_gene_liftover.parse_region()` function.
Args:
region: Region string.
exp_output: Expected return value of the function.
expectation: Context manager for the expected exception, i.e. the test will only pass if that
exception is raised. Use :class:`~contextlib.nullcontext` if no exception is expected.
""""""
with expectation:
obs_output = hal_gene_liftover.parse_region(region)
assert obs_output == exp_output
@pytest.mark.parametrize(
""regions, chr_sizes, bed_file, flank_length, expectation"",
[
([SimpleRegion('chr1', 15, 18, '+')], {'chr1': 33}, 'a2b.one2one.plus.flank0.src.bed', 0,
does_not_raise()),
([SimpleRegion('chr1', 15, 18, '+')], {'chr1': 33}, 'a2b.one2one.plus.flank1.src.bed', 1,
does_not_raise()),
([SimpleRegion('chr1', 0, 2, '+')], {'chr1': 33}, 'a2b.chr_start.flank1.src.bed', 1,
does_not_raise()),
([SimpleRegion('chr1', 31, 33, '+')], {'chr1': 33}, 'a2b.chr_end.flank1.src.bed', 1,
does_not_raise()),
([SimpleRegion('chr1', 15, 18, '+')], {'chr1': 33}, 'a2b.negative_flank.src.bed', -1,
raises(ValueError, match=r""'flank_length' must be greater than or equal to 0: -1"")),
([SimpleRegion('chrN', 0, 3, '+')], {'chr1': 33}, 'a2b.unknown_chr.src.bed', 0,
raises(ValueError, match=r""chromosome ID not found in input file: 'chrN'"")),
([SimpleRegion('chr1', 31, 34, '+')], {'chr1': 33}, 'a2b.chr_end.oor.src.bed', 0,
raises(ValueError, match=r""region end \(34\) must not be greater than the""
r"" corresponding chromosome length \(chr1: 33\)"")),
([SimpleRegion('chr1', -4, 18, '+')], {'chr1': 33}, 'a2b.chr_start.oor.src.bed', 0,
raises(ValueError, match=r""region start must be greater than or equal to 0: -4""))
]
)
def test_make_src_region_file(self, regions: Iterable[SimpleRegion],
chr_sizes: Mapping[str, int], bed_file: str, flank_length: int,
expectation: ContextManager, tmp_path: Path) -> None:
""""""Tests :func:`hal_gene_liftover.make_src_region_file()` function.
Args:
regions: Regions to write to output file.
chr_sizes: Mapping of chromosome names to their lengths.
bed_file: Path of BED file to output.
flank_length: Length of upstream/downstream flanking regions to request.
expectation: Context manager for the expected exception, i.e. the test will only pass if that
exception is raised. Use :class:`~contextlib.nullcontext` if no exception is expected.
tmp_path: Unit test temp directory (fixture).
""""""
with expectation:
out_file_path = tmp_path / bed_file
hal_gene_liftover.make_src_region_file(regions, chr_sizes, out_file_path, flank_length)
ref_file_path = self.ref_file_dir / bed_file
assert filecmp.cmp(out_file_path, ref_file_path)
","Python"
"Codon","Ensembl/ensembl-compara","src/python/tests/test_filesys.py",".py","9499","217","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Unit testing of :mod:`filesys` module.
The unit testing is divided into one test class per submodule/class found in this module, and one test method
per public function/class method. A base test class has been added to load the basic attributes required by
the unit test classes.
Typical usage example::
$ pytest test_filesys.py
""""""
from contextlib import ExitStack as does_not_raise
import filecmp
from pathlib import Path
from typing import ContextManager, Dict, Set
import pytest
from pytest import raises
from ensembl.compara.filesys import DirCmp, file_cmp
from ensembl.utils import StrPath
class BaseTestFilesys:
""""""Base class to configure all the attributes required by the test classes of this module.
Attributes:
dir_cmp (DirCmp): Directory tree comparison.
""""""
dir_cmp: DirCmp = None # type: ignore
# autouse=True makes this fixture be executed before any test_* method of this class, and scope='class' to
# execute it only once per class parametrization
@pytest.fixture(scope='class', autouse=True)
def setup(self, dir_cmp: DirCmp) -> None:
""""""Loads the required fixtures and values as class attributes.
Args:
dir_cmp: Directory tree comparison (fixture).
""""""
# Use type(self) instead of self as a workaround to @classmethod decorator (unsupported by pytest and
# required when scope is set to ""class"" <https://github.com/pytest-dev/pytest/issues/3778>)
type(self).dir_cmp = dir_cmp
@pytest.mark.parametrize(""dir_cmp"", [{'ref': 'citest/reference', 'target': 'citest/target'}], indirect=True)
class TestDirCmp(BaseTestFilesys):
""""""Tests :class:`DirCmp` class.""""""
@pytest.mark.dependency(name='test_init', scope='class')
def test_init(self) -> None:
""""""Tests that the object :class:`DirCmp` is initialised correctly.""""""
assert ""citest_reference"" == self.dir_cmp.ref_path.name, ""Unexpected reference root path""
assert ""citest_target"" == self.dir_cmp.target_path.name, ""Unexpected target root path""
# Check the files at the root
assert self.dir_cmp.common_files == set(), ""Found unexpected files at the root of both trees""
assert self.dir_cmp.ref_only == {'3/a.txt'}, ""Expected '3/a.txt' at reference tree's root""
assert self.dir_cmp.target_only == {'4/a.txt'}, ""Expected '4/a.txt' at target tree's root""
# Check each subdirectory
expected = {
1: {'common_files': {'b.txt', 'c.txt'}},
2: {'common_files': {'a.nw', 'b.nwk'}},
}
for i, value in expected.items():
key = Path(str(i))
for attr in ['common_files', 'ref_only', 'target_only', 'subdirs']:
if attr in value:
assert getattr(self.dir_cmp.subdirs[key], attr) == value[attr], \
f""Expected {attr} '{', '.join(value[attr])}' at '{i}/'""
else:
assert not getattr(self.dir_cmp.subdirs[key], attr), \
f""Found unexpected {attr} elements at '{i}/'""
@pytest.mark.dependency(depends=['test_init'], scope='class')
@pytest.mark.parametrize(
""kwargs, output, expectation"",
[
({}, {'0/a.txt'}, does_not_raise()),
({'patterns': 'a*'}, {'0/a.txt'}, does_not_raise()),
({'patterns': ['b*', 'c*']}, set(), does_not_raise()),
({'paths': '3'}, None, raises(ValueError)),
({'paths': ['1', '2']}, set(), does_not_raise()),
({'patterns': 'a*', 'paths': ['1', '2']}, set(), does_not_raise()),
],
)
def test_apply_test(self, kwargs: Dict, output: Set[str], expectation: ContextManager) -> None:
""""""Tests :meth:`DirCmp.apply_test()` method.
Args:
kwargs: Named arguments to be passed to the method.
output: Expected file paths returned by the method.
expectation: Context manager for the expected exception, i.e. the test will only pass if that
exception is raised. Use :class:`~contextlib.ExitStack` if no exception is expected.
""""""
with expectation:
assert set(self.dir_cmp.apply_test(filecmp.cmp, **kwargs)) == output
@pytest.mark.dependency(depends=['test_init'], scope='class')
@pytest.mark.parametrize(
""kwargs, output, expectation"",
[
({}, {'0/a.txt', '1/b.txt', '1/c.txt', '2/a.nw', '2/b.nwk'}, does_not_raise()),
({'patterns': 'a*'}, {'0/a.txt', '2/a.nw'}, does_not_raise()),
({'patterns': ['b*', 'c*']}, {'1/b.txt', '1/c.txt', '2/b.nwk'}, does_not_raise()),
({'paths': '3'}, None, raises(ValueError)),
({'paths': ['1', '2']}, {'1/b.txt', '1/c.txt', '2/a.nw', '2/b.nwk'}, does_not_raise()),
({'patterns': 'a*', 'paths': ['1', '2']}, {'2/a.nw'}, does_not_raise()),
],
)
def test_common_list(self, kwargs: Dict, output: Set[str], expectation: ContextManager) -> None:
""""""Tests :meth:`DirCmp.common_list()` method.
Args:
kwargs: Named arguments to be passed to the method.
output: Expected file paths returned by the method.
expectation: Context manager for the expected exception, i.e. the test will only pass if that
exception is raised. Use :class:`~contextlib.ExitStack` if no exception is expected.
""""""
with expectation:
assert set(self.dir_cmp.common_list(**kwargs)) == output
@pytest.mark.dependency(depends=['test_init'], scope='class')
@pytest.mark.parametrize(
""kwargs, output, expectation"",
[
({}, {'3/a.txt'}, does_not_raise()),
({'patterns': 'a*'}, {'3/a.txt'}, does_not_raise()),
({'patterns': ['b*', 'c*']}, set(), does_not_raise()),
({'paths': '3'}, None, raises(ValueError)),
({'paths': ['1', '2']}, set(), does_not_raise()),
({'patterns': 'a*', 'paths': ['1', '2']}, set(), does_not_raise()),
],
)
def test_ref_only_list(self, kwargs: Dict, output: Set[str], expectation: ContextManager) -> None:
""""""Tests :meth:`DirCmp.ref_only_list()` method.
Args:
kwargs: Named arguments to be passed to the method.
output: Expected file paths returned by the method.
expectation: Context manager for the expected exception, i.e. the test will only pass if that
exception is raised. Use :class:`~contextlib.ExitStack` if no exception is expected.
""""""
with expectation:
assert set(self.dir_cmp.ref_only_list(**kwargs)) == output
@pytest.mark.dependency(depends=['test_init'], scope='class')
@pytest.mark.parametrize(
""kwargs, output, expectation"",
[
({}, {'0/b.txt', '4/a.txt'}, does_not_raise()),
({'patterns': 'a*'}, {'4/a.txt'}, does_not_raise()),
({'patterns': ['b*', 'c*']}, {'0/b.txt'}, does_not_raise()),
({'paths': '3'}, None, raises(ValueError)),
({'paths': ['1', '2']}, set(), does_not_raise()),
({'patterns': 'a*', 'paths': ['1', '2']}, set(), does_not_raise()),
],
)
def test_target_only_list(self, kwargs: Dict, output: Set[str], expectation: ContextManager) -> None:
""""""Tests :meth:`DirCmp.target_only_list()` method.
Args:
kwargs: Named arguments to be passed to the method.
output: Expected file paths returned by the method.
expectation: Context manager for the expected exception, i.e. the test will only pass if that
exception is raised. Use :class:`~contextlib.ExitStack` if no exception is expected.
""""""
with expectation:
assert set(self.dir_cmp.target_only_list(**kwargs)) == output
@pytest.mark.parametrize(""dir_cmp"", [{'ref': 'citest/reference', 'target': 'citest/target'}], indirect=True)
class TestFileCmp(BaseTestFilesys):
""""""Tests :mod:`filecmp` module.""""""
@pytest.mark.parametrize(
""filepath, output"",
[
(Path('0', 'a.txt'), True),
(Path('1', 'b.txt'), False),
(Path('1', 'c.txt'), False),
(Path('2', 'a.nw'), True),
(Path('2', 'b.nwk'), False),
],
)
def test_file_cmp(self, filepath: StrPath, output: bool) -> None:
""""""Tests :meth:`filecmp.file_cmp()` method.
Args:
filepath: Relative file path to compare between reference and target directory trees.
output: Expected returned boolean value.
""""""
assert file_cmp(self.dir_cmp.ref_path / filepath, self.dir_cmp.target_path / filepath) == output, \
f""Files should be {'equivalent' if output else 'different'}""
","Python"
"Codon","Ensembl/ensembl-compara","src/python/tests/test_pick_third_site.py",".py","2000","57","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Testing of `pick_third_site.py` script.
Typical usage example::
$ pytest collate_busco_results.py
""""""
import sys
import subprocess
from pathlib import Path
from ensembl.compara.filesys import file_cmp
class TestPickThirdSite:
""""""Tests for the `pick_third_site.py` script.
""""""
def test_pick_third_site(self, tmp_path: Path) -> None:
""""""Tests the output of `collate_busco_results.py` script.
Args:
tmp_path: Unit test temp directory (fixture).
""""""
input_file = str(Path(__file__).parents[2] / 'test_data' /
'flatfiles' / 'SpeciesTreeFromBusco' / 'pick_third_site_input.fas')
output_fas = str(tmp_path / ""pick_third_site_output.fas"")
# Run the command
cmd = [sys.executable, str(Path(__file__).parents[3] / 'pipelines' /
'SpeciesTreeFromBusco' / 'scripts' / 'pick_third_site.py'),
'-i', input_file, '-o', output_fas]
location = str(Path(__file__).parents[0])
subprocess.check_call(cmd, cwd=location)
# Compare with expected output:
expected_fas = str(Path(__file__).parents[2] / 'test_data' / ""flatfiles"" /
""SpeciesTreeFromBusco"" / ""pick_third_site_expected.fas"")
assert file_cmp(output_fas, expected_fas)
","Python"
"Codon","Ensembl/ensembl-compara","src/python/tests/test_alignments_to_partitions.py",".py","3512","87","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Testing of `alignments_to_partitions.py` script.
Typical usage example::
$ pytest alignments_to_partitions.py
""""""
import sys
import subprocess
from pathlib import Path
from pytest import raises
from ensembl.compara.filesys import file_cmp
class TestAlignmentsToParitions:
""""""Tests for the `alignments_to_partitions.py` script.
""""""
def test_merge_output(self, tmp_path: Path) -> None:
""""""Tests the output of `alignments_to_partitions.py` script.
Args:
tmp_path: Unit test temp directory (fixture).
""""""
input_file = str(Path(__file__).parents[2] /
'test_data' / 'flatfiles' / 'SpeciesTreeFromBusco' / 'busco_merge_input_fofn.txt')
input_taxa = str(Path(__file__).parents[2] /
'test_data' / 'flatfiles' / 'SpeciesTreeFromBusco' / 'collate_output_taxa.tsv')
output_fasta = str(tmp_path / ""merged_fasta.tsv"")
output_parts = str(tmp_path / ""paritions.tsv"")
# Run the command
cmd = [sys.executable, str(Path(__file__).parents[3] / 'pipelines' /
'SpeciesTreeFromBusco' / 'scripts' /
'alignments_to_partitions.py'),
'-i', input_file,
'-o', output_fasta, '-p', output_parts, '-t', input_taxa]
location = str(Path(__file__).parents[0])
subprocess.check_call(cmd, cwd=location)
# Compare with expected output:
expected_fasta = str(Path(__file__).parents[2] / 'test_data' / ""flatfiles"" / ""SpeciesTreeFromBusco""
/ ""busco_merged.fas"")
assert file_cmp(output_fasta, expected_fasta)
def test_merge_for_empty_input(self, tmp_path: Path) -> None:
""""""Tests the `alignments_to_partitions.py` script when input is empty.
Args:
tmp_path: Unit test temp directory (fixture).
""""""
input_file = str(Path(__file__).parents[2] /
'test_data' / 'flatfiles' / 'SpeciesTreeFromBusco' / 'empty_file.txt')
input_taxa = str(Path(__file__).parents[2] /
'test_data' / 'flatfiles' / 'SpeciesTreeFromBusco' / 'collate_output_taxa.tsv')
output_fasta = str(tmp_path / ""merged_fasta.tsv"")
output_parts = str(tmp_path / ""paritions.tsv"")
# Run the command
cmd = [sys.executable, str(Path(__file__).parents[3] / 'pipelines' /
'SpeciesTreeFromBusco' / 'scripts' /
'alignments_to_partitions.py'),
'-i', input_file,
'-o', output_fasta, '-p', output_parts, '-t', input_taxa]
location = str(Path(__file__).parents[0])
with raises(subprocess.CalledProcessError):
subprocess.check_call(cmd, cwd=location)
","Python"
"Codon","Ensembl/ensembl-compara","src/python/tests/test_filter_for_longest_busco.py",".py","3944","97","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Testing of `filter_for_longest_busco.py` script.
Typical usage example::
$ pytest filter_for_longest_busco.py
""""""
import sys
import subprocess
from pathlib import Path
from pytest import raises
from ensembl.compara.filesys import file_cmp
class TestFilterForLongestBusco:
""""""Tests for the `filter_for_longest_busco.py` script.
""""""
def test_filter_for_longest_output(self, tmp_path: Path) -> None:
""""""Tests the output of `filter_for_longest_busco.py` script.
Args:
tmp_path: Unit test temp directory (fixture).
""""""
input_file = str(Path(__file__).parents[2] /
'test_data' / 'flatfiles' / 'SpeciesTreeFromBusco' / 'busco_filter_test.fas')
output_file = str(tmp_path / ""longest_busco.fas"")
output_genes = str(tmp_path / ""busco_genes.tsv"")
# Run the command
cmd = [sys.executable, str(Path(__file__).parents[3] / 'pipelines' /
'SpeciesTreeFromBusco' / 'scripts' / 'filter_for_longest_busco.py'),
'-i', input_file,
'-o', output_file, '-l', output_genes]
subprocess.check_call(cmd)
# Compare with expected output:
expected_genes = str(Path(__file__).parents[2] / 'test_data' / 'flatfiles' /
'SpeciesTreeFromBusco' / 'busco_filter_test_genes.tsv')
expected_output = str(Path(__file__).parents[2] / 'test_data' / ""flatfiles"" /
""SpeciesTreeFromBusco"" / ""busco_filter_test_output.fas"")
assert file_cmp(tmp_path / ""longest_busco.fas"", expected_output) # type: ignore
assert file_cmp(tmp_path / ""busco_genes.tsv"", expected_genes) # type: ignore
def test_filter_for_longest_missing_input(self) -> None:
""""""Tests `filter_for_longest_busco.py` script when input file is missing.
Args:
""""""
input_file = """"
output_file = ""dummy.fas""
output_genes = ""dummy.tsv""
# Run the command
cmd = [sys.executable, str(Path(__file__).parents[3] / 'pipelines' /
'SpeciesTreeFromBusco' / 'scripts' / 'filter_for_longest_busco.py'),
'-i', input_file,
'-o', output_file, '-l', output_genes]
with raises(subprocess.CalledProcessError):
subprocess.check_call(cmd)
def test_filter_for_longest_empty_input(self, tmp_path: Path) -> None:
""""""Tests `filter_for_longest_busco.py` script when input file is empty.
Args:
tmp_path: Unit test temp directory (fixture).
""""""
input_file = """"
input_file = str(Path(__file__).parents[2] / 'test_data' /
'flatfiles' / 'SpeciesTreeFromBusco' / 'empty_file.txt')
output_file = str(tmp_path / ""dummy.fas"")
output_genes = str(tmp_path / ""dummy.tsv"")
# Run the command
cmd = [sys.executable, str(Path(__file__).parents[3] / 'pipelines' /
'SpeciesTreeFromBusco' / 'scripts' / 'filter_for_longest_busco.py'),
'-i', input_file,
'-o', output_file, '-l', output_genes]
with raises(subprocess.CalledProcessError):
subprocess.check_call(cmd)
","Python"
"Codon","Ensembl/ensembl-compara","src/python/tests/test_collate_busco_results.py",".py","4187","97","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Testing of `collate_busco_results.py` script.
Typical usage example::
$ pytest collate_busco_results.py
""""""
import sys
import subprocess
from pathlib import Path
from pytest import raises
from ensembl.compara.filesys import file_cmp
class TestCollateBusco:
""""""Tests for the `collate_busco_results.py` script.
""""""
def test_collate_output(self, tmp_path: Path) -> None:
""""""Tests the output of `collate_busco_results.py` script.
Args:
tmp_path: Unit test temp directory (fixture).
""""""
input_file = str(Path(__file__).parents[2] /
'test_data' / 'flatfiles' / 'SpeciesTreeFromBusco' / 'busco_collate_fofn.txt')
input_genes = str(Path(__file__).parents[2] /
'test_data' / 'flatfiles' / 'SpeciesTreeFromBusco' / 'busco_collate_genes.tsv')
output_stats = str(tmp_path / ""stats.tsv"")
output_taxa = str(tmp_path / ""taxa.tsv"")
# Run the command
cmd = [sys.executable, str(Path(__file__).parents[3] / 'pipelines' /
'SpeciesTreeFromBusco' / 'scripts' / 'collate_busco_results.py'),
'-i', input_file, '-m', '0.6',
'-o', str(tmp_path), '-l', input_genes, '-s', output_stats, '-t', output_taxa]
location = str(Path(__file__).parents[0])
subprocess.check_call(cmd, cwd=location)
# Compare with expected output:
expected_stats = str(Path(__file__).parents[2] / 'test_data' / ""flatfiles"" / ""SpeciesTreeFromBusco""
/ ""collate_output_stats.tsv"")
expected_taxa = str(Path(__file__).parents[2] / 'test_data' / ""flatfiles"" / ""SpeciesTreeFromBusco""
/ ""collate_output_taxa.tsv"")
expected_gene1 = str(Path(__file__).parents[2] / 'test_data' / ""flatfiles"" / ""SpeciesTreeFromBusco""
/ ""collate_gene_prot_gene1.fas"")
expected_gene3 = str(Path(__file__).parents[2] / 'test_data' / ""flatfiles"" / ""SpeciesTreeFromBusco""
/ ""collate_gene_prot_gene3.fas"")
# Compare stats and taxa:
assert file_cmp(output_stats, expected_stats)
assert file_cmp(output_taxa, expected_taxa)
# Compare per-gene output:
assert file_cmp(str(tmp_path / ""gene_prot_gene1.fas""), expected_gene1)
assert file_cmp(str(tmp_path / ""gene_prot_gene3.fas""), expected_gene3)
def test_collate_for_empty_input(self, tmp_path: Path) -> None:
""""""Tests the `collate_busco_results.py` script when input is empty.
Args:
tmp_path: Unit test temp directory (fixture).
""""""
input_file = str(Path(__file__).parents[2] /
'test_data' / 'flatfiles' / 'SpeciesTreeFromBusco' / 'empty_file.txt')
input_genes = str(Path(__file__).parents[2] /
'test_data' / 'flatfiles' / 'SpeciesTreeFromBusco' / 'busco_collate_genes.tsv')
output_stats = str(tmp_path / ""stats.tsv"")
output_taxa = str(tmp_path / ""taxa.tsv"")
# Run the command
cmd = [sys.executable, str(Path(__file__).parents[3] / 'pipelines' /
'SpeciesTreeFromBusco' / 'scripts' / 'collate_busco_results.py'),
'-i', input_file, '-m', '0.5',
'-o', str(tmp_path), '-l', input_genes, '-s', output_stats, '-t', output_taxa]
with raises(subprocess.CalledProcessError):
subprocess.check_call(cmd)
","Python"
"Codon","Ensembl/ensembl-compara","src/python/tests/test_utils.py",".py","1580","55","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Unit testing of :mod:`utils` module.
The unit testing is divided into one test class per submodule/class found in this module, and one test method
per public function/class method.
Typical usage example::
$ pytest test_utils.py
""""""
from typing import Any, List
import pytest
from ensembl.compara.utils import to_list
class TestTools:
""""""Tests :mod:`tools` submodule.""""""
@pytest.mark.parametrize(
""arg, output"",
[
(None, []),
('', []),
(0, []),
('a', ['a']),
(['a', 'b'], ['a', 'b'])
],
)
def test_file_cmp(self, arg: Any, output: List[Any]) -> None:
""""""Tests :meth:`tools.to_list()` method.
Args:
arg: Element to be converted to a list.
output: Expected returned list.
""""""
assert to_list(arg) == output, ""List returned differs from the one expected""
","Python"
"Codon","Ensembl/ensembl-compara","src/python/tests/test_citest.py",".py","8909","205","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Unit testing of :mod:`citest` module.
The unit testing is divided into one test class per submodule/class found in this module, and one test method
per public function/class method.
Typical usage example::
$ pytest test_citest.py
""""""
from contextlib import ExitStack as does_not_raise
from typing import ContextManager, Dict
import pytest
from pytest import raises
from _pytest.fixtures import FixtureRequest
from ensembl.compara.citest import CITestDBItem, CITestDBContentError, CITestDBGroupingError, \
CITestDBNumRowsError, CITestFilesItem, CITestFilesContentError, CITestFilesSizeError, CITestFilesTreeError
from ensembl.compara.filesys import DirCmp
@pytest.mark.parametrize(""multi_dbs"", [[{'src': 'citest/reference'}, {'src': 'citest/target'}]],
indirect=True)
class TestCITestDBItem:
""""""Tests CITest's :class:`CITestDBItem` class.
Attributes:
db_item (CITestDBItem): Set of integration tests to compare a table in two (analogous) databases.
""""""
db_item = None # type: CITestDBItem
# autouse=True makes this fixture be executed before any test_* method of this class, and scope='class' to
# execute it only once per class parametrization
@pytest.fixture(scope='class', autouse=True)
def setup(self, request: FixtureRequest, multi_dbs: Dict) -> None:
""""""Loads the required fixtures and values as class attributes.
Args:
request: Access to the requesting test context.
multi_dbs: Dictionary of unit test databases (fixture).
""""""
# Use type(self) instead of self as a workaround to @classmethod decorator (unsupported by pytest and
# required when scope is set to ""class"" <https://github.com/pytest-dev/pytest/issues/3778>)
type(self).db_item = CITestDBItem('', request.session, multi_dbs['reference'].dbc,
multi_dbs['target'].dbc, 'main_table', {})
def test_missing_test(self):
""""""Tests CITestDBItem's error handling if an unknown test is passed.""""""
self.db_item.name = 'dummy'
with raises(SyntaxError):
self.db_item.runtest()
@pytest.mark.parametrize(
""kwargs, expectation"",
[
({}, does_not_raise()),
({'group_by': 'grp'}, raises(CITestDBNumRowsError)),
({'variation': 0.5, 'group_by': 'grp'}, does_not_raise()),
({'group_by': ['grp', 'value']}, raises(CITestDBGroupingError)),
({'filter_by': 'value < 30'}, does_not_raise()),
({'filter_by': ['value < 30', 'grp = ""grp2""']}, raises(CITestDBNumRowsError)),
({'variation': 0.25, 'filter_by': ['value < 30', 'grp = ""grp2""']}, does_not_raise()),
({'group_by': 'grp', 'filter_by': 'value < 24'}, does_not_raise()),
],
)
def test_num_rows_test(self, kwargs: Dict, expectation: ContextManager) -> None:
""""""Tests :meth:`CITestDBItem.test_num_rows()` method.
Args:
kwargs: Named arguments to be passed to the method.
expectation: Context manager for the expected exception, i.e. the test will only pass if that
exception is raised. Use :class:`~contextlib.ExitStack` if no exception is expected.
""""""
with expectation:
self.db_item.test_num_rows(**kwargs)
@pytest.mark.parametrize(
""kwargs, expectation"",
[
({}, raises(CITestDBContentError)),
({'columns': 'value'}, does_not_raise()),
({'columns': ['value', 'comment']}, raises(CITestDBContentError)),
({'ignore_columns': 'grp'}, raises(CITestDBContentError)),
({'ignore_columns': ['id', 'grp', 'comment']}, does_not_raise()),
({'columns': 'value', 'ignore_columns': 'grp'}, raises(TypeError)),
({'filter_by': 'grp = ""grp2""'}, raises(CITestDBNumRowsError)),
({'filter_by': ['value < 23', 'comment = ""Second group""']}, does_not_raise()),
({'columns': 'grp', 'filter_by': 'grp = ""grp3""'}, raises(CITestDBNumRowsError)),
({'ignore_columns': 'id', 'filter_by': 'value != 24'}, does_not_raise()),
],
)
def test_content_test(self, kwargs: Dict, expectation: ContextManager) -> None:
""""""Tests :meth:`CITestDBItem.test_content()` method.
Args:
kwargs: Named arguments to be passed to the method.
expectation: Context manager for the expected exception, i.e. the test will only pass if that
exception is raised. Use :class:`~contextlib.ExitStack` if no exception is expected.
""""""
with expectation:
self.db_item.test_content(**kwargs)
@pytest.mark.parametrize(""dir_cmp"", [{'ref': 'citest/reference', 'target': 'citest/target'}], indirect=True)
class TestCITestFilesItem:
""""""Tests CITest's :class:`CITestFilesItem` class.
Attributes:
files_item (CITestFilesItem): Set of integration tests to compare two (analogous) files (or
directories).
""""""
files_item = None # type: CITestFilesItem
# autouse=True makes this fixture be executed before any test_* method of this class, and scope='class' to
# execute it only once per class parametrization
@pytest.fixture(scope='class', autouse=True)
def setup(self, request: FixtureRequest, dir_cmp: DirCmp) -> None:
""""""Loads the required fixtures and values as class attributes.
Args:
request: Access to the requesting test context.
dir_cmp: Directory tree comparison (fixture).
""""""
# Use type(self) instead of self as a workaround to @classmethod decorator (unsupported by pytest and
# required when scope is set to ""class"" <https://github.com/pytest-dev/pytest/issues/3778>)
type(self).files_item = CITestFilesItem('', request.session, dir_cmp, {})
def test_missing(self):
""""""Tests CITestFilesItem's error handling if an unknown test is passed.""""""
self.files_item.name = 'dummy'
with raises(SyntaxError):
self.files_item.runtest()
@pytest.mark.parametrize(
""kwargs, expectation"",
[
({}, raises(CITestFilesSizeError)),
({'variation': 1.0}, raises(CITestFilesTreeError)),
({'patterns': 'c*'}, does_not_raise()),
({'patterns': ['a*', 'c*']}, raises(CITestFilesSizeError)),
({'paths': '1'}, raises(CITestFilesSizeError)),
({'paths': ['0', '2']}, raises(CITestFilesSizeError)),
({'patterns': 'c*', 'paths': '1'}, does_not_raise()),
({'variation': 1.0, 'patterns': 'b*', 'paths': '1'}, does_not_raise()),
],
)
def test_size_test(self, kwargs: Dict, expectation: ContextManager) -> None:
""""""Tests :meth:`CITestFilesItem.test_size()` method.
Args:
kwargs: Named arguments to be passed to the method.
expectation: Context manager for the expected exception, i.e. the test will only pass if that
exception is raised. Use :class:`~contextlib.ExitStack` if no exception is expected.
""""""
with expectation:
self.files_item.test_size(**kwargs)
@pytest.mark.parametrize(
""kwargs, expectation"",
[
({}, raises(CITestFilesContentError)),
({'patterns': 'a*'}, raises(CITestFilesTreeError)),
({'patterns': ['a*', 'c*']}, raises(CITestFilesContentError)),
({'paths': '0'}, raises(CITestFilesTreeError)),
({'paths': ['0', '2']}, raises(CITestFilesContentError)),
({'patterns': 'a*', 'paths': '0'}, does_not_raise()),
],
)
def test_content_test(self, kwargs: Dict, expectation: ContextManager) -> None:
""""""Tests :meth:`CITestFilesItem.test_content()` method.
Args:
kwargs: Named arguments to be passed to the method.
expectation: Context manager for the expected exception, i.e. the test will only pass if that
exception is raised. Use :class:`~contextlib.ExitStack` if no exception is expected.
""""""
with expectation:
self.files_item.test_content(**kwargs)
","Python"
"Codon","Ensembl/ensembl-compara","src/python/tests/test_repair_mlss_tags.py",".py","6541","148","# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Unit testing of `repair_mlss_tags.py` script.
Typical usage example::
$ pytest test_repair_mlss_tags.py
""""""
from contextlib import nullcontext as does_not_raise
from pathlib import Path
import subprocess
from typing import ContextManager
import pytest
from sqlalchemy import text
from ensembl.utils.database import DBConnection, UnitTestDB
@pytest.mark.parametrize(""test_dbs"", [[{'src': 'pan'}]], indirect=True)
class TestRepairMLSSTags:
""""""Tests `repair_mlss_tags.py` script.
Attributes:
dbc (DBConnection): Database connection to the unit test database.
""""""
dbc: DBConnection = None # type: ignore
# autouse=True makes this fixture be executed before any test_* method of this class, and scope='class' to
# execute it only once per class parametrization
@pytest.fixture(scope='class', autouse=True)
def setup(self, test_dbs: dict[str, UnitTestDB]) -> None:
""""""Loads the required fixtures and values as class attributes.
Args:
test_dbs: Unit test databases (fixture).
""""""
# Use type(self) instead of self as a workaround to @classmethod decorator (unsupported by pytest and
# required when scope is set to ""class"" <https://github.com/pytest-dev/pytest/issues/3778>)
type(self).dbc = test_dbs[""pan""].dbc
@pytest.mark.parametrize(
""mlss_tag, alt_queries, exp_stdout, exp_tag_value, expectation"",
[
('', [], set(['No repair option has been selected: Nothing to do']), {}, does_not_raise()),
('max_align', [], set(['']),
{1: 161, 2: 163, 3: 139, 4: 52068, 5: 2452, 6: 37683, 7: 13002, 8: 825240, 9: 996143, 10: 9708},
does_not_raise()),
('msa_mlss_id', [], set(['']), {5: 4, 7: 6, 9: 8, 50001: 4, 50002: 6, 50003: 8},
does_not_raise()),
(
'max_align',
[
""UPDATE method_link_species_set_tag SET value = 1 ""
""WHERE method_link_species_set_id = 2 AND tag = 'max_align'"",
""DELETE FROM method_link_species_set_tag ""
""WHERE method_link_species_set_id = 6 AND tag = 'max_align'"",
""INSERT INTO method_link_species_set_tag VALUES (404, 'max_align', 1)""
],
set([
""Repaired MLSS tag 'max_align' for MLSS id '2'"",
""Added missing MLSS tag 'max_align' for MLSS id '6'"",
""Deleted unexpected MLSS tag 'max_align' for MLSS id '404'""
]),
{1: 161, 2: 163, 3: 139, 4: 52068, 5: 2452, 6: 37683, 7: 13002, 8: 825240, 9: 996143,
10: 9708},
does_not_raise()
),
(
'msa_mlss_id',
[
""UPDATE method_link_species_set_tag SET value = 1 ""
""WHERE method_link_species_set_id = 5 AND tag = 'msa_mlss_id'"",
""DELETE FROM method_link_species_set_tag ""
""WHERE method_link_species_set_id = 50001 AND tag = 'msa_mlss_id'"",
""INSERT INTO method_link_species_set_tag VALUES (404, 'msa_mlss_id', 1)""
],
set([
""Repaired MLSS tag 'msa_mlss_id' for MLSS id '5'"",
""Added missing MLSS tag 'msa_mlss_id' for MLSS id '50001'"",
""Deleted unexpected MLSS tag 'msa_mlss_id' for MLSS id '404'""
]),
{5: 4, 7: 6, 9: 8, 50001: 4, 50002: 6, 50003: 8},
does_not_raise()
),
]
)
def test_repair_mlss_tag(
self,
mlss_tag: str,
alt_queries: list[str],
exp_stdout: set[str],
exp_tag_value: dict[int, int],
expectation: ContextManager
) -> None:
""""""Tests `repair_mlss_tags.py` script, including its output.
Args:
mlss_tag: MLSS tag as found in the ``method_link_species_set_tag`` table.
alt_queries: MySQL queries to alter the content of the database before running the test.
exp_stdout: Expected messages printed in STDOUT.
exp_tag_value: Expected MLSS id - value pairs for the given `mlss_tag` after the script is run.
expectation: Context manager for the expected exception, i.e. the test will only pass if that
exception is raised. Use :class:`~contextlib.nullcontext` if no exception is expected.
""""""
# Alter the MLSS tags table so there is something to repair
with self.dbc.begin() as connection:
connection.execute(text(""SET FOREIGN_KEY_CHECKS = 0""))
for sql in alt_queries:
connection.execute(text(sql))
connection.execute(text(""SET FOREIGN_KEY_CHECKS = 1""))
# Run the repair_mlss_tags.py command
cmd = [str(Path(__file__).parents[3] / 'scripts' / 'production' / 'repair_mlss_tags.py'),
'--url', self.dbc.url]
if mlss_tag:
cmd.append(f'--{mlss_tag}')
with expectation:
# Check the information printed in STDOUT is as expected
output = subprocess.check_output(cmd)
assert set(output.decode().strip().split(""\n"")) == exp_stdout
if exp_tag_value:
# Check the database has the expected information
with self.dbc.begin() as connection:
result = connection.execute(text(
f""SELECT method_link_species_set_id AS mlss_id, value ""
f""FROM method_link_species_set_tag WHERE tag = '{mlss_tag}'""
))
curr_tag_value = {row.mlss_id: int(row.value) for row in result.fetchall()}
assert curr_tag_value == exp_tag_value
","Python"
"Codon","Ensembl/ensembl-compara","docs/rtd_upgrade.sh",".sh","2732","54","#!/bin/bash
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Bail out if anything goes wrong
set -e
# Restart from a clean state
rm -rf ""$1""
mkdir -p ""$1""
cd ""$1""
echo ISSUE
cat /etc/issue
echo PERLVERSION
perl --version
mkdir packages
cd packages
# List of extra packages we need
echo http://archive.ubuntu.com/ubuntu/pool/main/libd/libdbi-perl/libdbi-perl_1.640-1_amd64.deb \
http://archive.ubuntu.com/ubuntu/pool/universe/libd/libdbd-sqlite3-perl/libdbd-sqlite3-perl_1.58-1_amd64.deb \
http://archive.ubuntu.com/ubuntu/pool/universe/libj/libjson-xs-perl/libjson-xs-perl_3.040-1_amd64.deb \
http://archive.ubuntu.com/ubuntu/pool/main/libj/libjson-perl/libjson-perl_2.90-1_all.deb \
http://archive.ubuntu.com/ubuntu/pool/universe/libc/libcommon-sense-perl/libcommon-sense-perl_3.74-2build2_amd64.deb \
http://archive.ubuntu.com/ubuntu/pool/main/libt/libtypes-serialiser-perl/libtypes-serialiser-perl_1.0-1_all.deb \
http://archive.ubuntu.com/ubuntu/pool/universe/libx/libxml-xpath-perl/libxml-xpath-perl_1.30-1_all.deb \
http://archive.ubuntu.com/ubuntu/pool/universe/libp/libparse-recdescent-perl/libparse-recdescent-perl_1.967013+dfsg-1_all.deb \
http://archive.ubuntu.com/ubuntu/pool/main/libi/libipc-run-perl/libipc-run-perl_0.94-1_all.deb \
http://archive.ubuntu.com/ubuntu/pool/main/libi/libio-pty-perl/libio-pty-perl_1.08-1.1build1_amd64.deb \
http://archive.ubuntu.com/ubuntu/pool/universe/libg/libgraphviz-perl/libgraphviz-perl_2.20-1_all.deb \
http://archive.ubuntu.com/ubuntu/pool/universe/d/doxypy/doxypy_0.4.2-1.1_all.deb \
http://archive.ubuntu.com/ubuntu/pool/universe/libp/libproc-daemon-perl/libproc-daemon-perl_0.23-1_all.deb \
http://archive.ubuntu.com/ubuntu/pool/universe/libd/libdbd-mysql-perl/libdbd-mysql-perl_4.046-1_amd64.deb \
http://security.ubuntu.com/ubuntu/pool/main/m/mysql-5.7/libmysqlclient20_5.7.28-0ubuntu0.18.04.4_amd64.deb \
http://archive.ubuntu.com/ubuntu/pool/main/m/mysql-defaults/mysql-common_5.8+1.0.4_all.deb \
| xargs -n 1 curl -O
mkdir ../root
for i in *.deb; do dpkg -x ""$i"" ../root/; done
","Shell"
"Codon","Ensembl/ensembl-compara","docs/conf.py",".py","12623","398","# -*- coding: utf-8 -*-
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ensembl-compara documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 15 12:59:35 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
import subprocess
import datetime
import shutil
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.graphviz',
'xhive.code_doc',
'xhive.analysis_diagram',
'xhive.misc',
'xhive.pipeline',
'xhive.sql_schema',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# Add markdown support
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
authors = [u'Ensembl']
# The suffix of source filenames.
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Ensembl Compara documentation'
project_lc = u'ensembl_compara_doc'
copyright_owner = u'EMBL-European Bioinformatics Institute'
copyright_dates = u'[2016-%d]' % datetime.datetime.now().year
copyright = copyright_dates + ' ' + copyright_owner
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = subprocess.check_output([""git"", ""name-rev"", ""--name-only"", ""HEAD""])
# The full version, including alpha/beta/rc tags.
release = version
if not os.path.islink('xhive'):
ehive_target_dir = os.path.join(os.environ[""PWD""], os.path.pardir, ""ehive"")
shutil.rmtree(ehive_target_dir, True)
subprocess.check_call(['git', 'clone', '--branch', 'master', '--depth', '1', 'https://github.com/Ensembl/ensembl-hive.git', ehive_target_dir])
os.symlink(os.path.join(ehive_target_dir, ""docs"", ""xhive""), ""xhive"")
else:
subprocess.check_call(['git', 'pull'], cwd = ""xhive"")
ehive_root_dir = os.path.join(os.path.realpath('xhive'), os.path.pardir, os.path.pardir)
os.environ.setdefault(""ENSEMBL_ROOT_DIR"", """")
from xhive import setup_if_needed
setup_if_needed(release, False)
os.environ[""EHIVE_ROOT_DIR""] = ehive_root_dir
os.environ[""PERL5LIB""] = os.path.join(os.environ[""EHIVE_ROOT_DIR""], ""modules"") + os.path.pathsep + os.environ[""PERL5LIB""]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as ""system message"" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# ""<project> v<release> documentation"".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named ""default.css"" will overwrite the builtin ""default.css"".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, ""Created using Sphinx"" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, ""(C) Copyright ..."" is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. "".xhtml"").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = project_lc
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': r'''
\usepackage{pdflscape}
\usepackage{charter}
\usepackage[defaultsans]{lato}
\usepackage{inconsolata}
\usepackage{verbatim}
''',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, project_lc+'.tex', project,
authors[0], 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For ""manual"" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, project_lc, project,
authors, 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project_lc, project,
authors[0], project_lc, 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the ""Top"" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = authors[0]
epub_publisher = copyright_owner
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
epub_basename = project_lc
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Add overriding stylesheets, searching in html_static_path
# (see https://github.com/rtfd/sphinx_rtd_theme/issues/117)
def setup(app):
app.add_stylesheet(""theme_overrides.css"")
","Python"
"Codon","Ensembl/ensembl-compara","docs/api.md",".md","1061","30","# Perl API
There is extensive documentation of our Perl modules on the [Ensembl
website](https://www.ensembl.org/info/docs/Doxygen/compara-api/namespaceBio_1_1EnsEMBL_1_1Compara.html),
together with a
[tutorial](https://www.ensembl.org/info/docs/api/compara/compara_tutorial.html).
Materials from our latest Perl API courses can be found on
[GitHub](https://github.com/Ensembl/ensembl-presentation/tree/master/API/Compara)
(course based on Ensembl 84) and the [Ensembl training
site](https://training.ensembl.org/events/2017/2017-02-09-APITaiwan) (course
based on Ensembl 87).
# REST API
There is documentation about each endpoint on our [REST
server](https://rest.ensembl.org/).
Materials from our latest Perl API courses can be found on
the [Ensembl training
site](https://training.ensembl.org/events/2017/2017-11-27-REST_API_EBI_Nov)
(course based on Ensembl 90).
A Python-oriented course is now available on the [Ensembl training
site](https://training.ensembl.org/events/2018/2018-07-12-REST_API_EBI).
![e!Compara word cloud](ebang-wordcloud.png)
","Markdown"
"Codon","Ensembl/ensembl-compara","docs/production/READMEs/test_db.md",".md","2661","59","# Instructions to make a new test database for homologies
You'll need the ensembl-hive scripts to be set up in your $PATH, and the environment variables $ENSADMIN\_PSW and $ENSEMBL\_CVS\_ROOT\_DIR.
## Test database for the REST API
1. Define a url
```bash
export _TEST_DB_URL=mysql://ensadmin:${ENSADMIN_PSW}@compara5/mm14_homology_test_db_85
```
2. Create the database
```bash
db_cmd.pl -url ${_TEST_DB_URL} -sql 'DROP DATABASE IF EXISTS'
db_cmd.pl -url ${_TEST_DB_URL} -sql 'CREATE DATABASE'
db_cmd.pl -url ${_TEST_DB_URL} < ${ENSEMBL_ROOT_DIR}/ensembl-compara/sql/table.sql
```
3. Import the data
This will copy data from cc21\_ensembl\_compara\_84 to the test database, following the foreign keys found in cc21\_compara\_nctrees\_85 (this is needed because neither the source nor the target databases are in InnoDB and have foreign keys).
We copy two gene-trees: ENSGT00390000003602 -BRCA2- and RF01299 -SNORD2-.
```bash
standaloneJob.pl Bio::EnsEMBL::Compara::RunnableDB::CopyDataWithFK -foreign_keys_db mysql://ensro@compara3/cc21_compara_nctrees_85 -db_conn mysql://ensro@compara5/cc21_ensembl_compara_84 -rfam_model_id RF01299 -protein_tree_stable_id ENSGT00390000003602 -compara_db $_TEST_DB_URL
```
RF00012 is a good example of a gene-tree with a super-tree, but it is obviously pretty big.
ENSFM00730001521062 is a good example of a Family. TODO: replace with PTHR
4. Delete unwanted stuff
```bash
for i in species_tree_node_attr species_tree_node_tag method_link_species_set_tag gene_tree_root_attr
do
echo ""TRUNCATE $i;""
done | db_cmd.pl -url $_TEST_DB_URL
db_cmd.pl -url $_TEST_DB_URL -sql 'DELETE FROM gene_tree_root_tag WHERE tag NOT LIKE ""model\_%""'
db_cmd.pl -url $_TEST_DB_URL -sql 'DELETE FROM other_member_sequence WHERE seq_type != ""cds""'
db_cmd.pl -url $_TEST_DB_URL -sql 'DELETE gene_align_member FROM gene_align JOIN gene_align_member USING (gene_align_id) WHERE seq_type != ""cds""'
db_cmd.pl -url $_TEST_DB_URL -sql 'DELETE FROM gene_align WHERE seq_type != ""cds""'
db_cmd.pl -url $_TEST_DB_URL -sql 'DELETE FROM other_member_sequence WHERE seq_type != ""cds""'
```
5. Dump
```bash
db_cmd.pl -url $_TEST_DB_URL -executable $ENSEMBL_ROOT_DIR/ensembl/misc-scripts/db/dump_mysql.pl -- --database mm14_homology_test_db_85 --verbose --testcompatible --directory dump_directory
```
Then edit the sql file and add `CHARSET=latin1` to the meta and external\_db tables.
6. Manual edits
* To match the genes found in the human core database, I renamed a gene stable ID in the compara database
* To make some aligned strings different, I manually edited some cigar lines
","Markdown"
"Codon","Ensembl/ensembl-compara","scripts/hal_alignment/hal_cov_one_seq_chunk.py",".py","6659","203","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Calculate genomic coverage for a sequence chunk in a HAL file.""""""
from argparse import ArgumentParser
import json
from pathlib import Path
import re
import subprocess
from tempfile import SpooledTemporaryFile
from typing import Dict, Iterable, TextIO, Union
def hal_genomic_coverage(
hal_path: Union[Path, str],
ref_genome: str,
ref_sequence: str,
start: int,
length: int,
target_genomes: Iterable[str],
hal_alignment_depth_exe: Union[Path, str] = ""halAlignmentDepth"",
) -> Dict:
""""""Uses halAlignmentDepth to get genomic coverage for the given sequence chunk.
Args:
hal_path: Input HAL file.
ref_genome: Reference genome against which coverage is calculated.
ref_sequence: Sequence in reference genome for which coverage is calculated.
start: Start of sequence chunk for which coverage is calculated.
length: Length of sequence chunk for which coverage is calculated.
target_genomes: Target genomes to be considered for genomic coverage calculation.
hal_alignment_depth_exe: Path of halAlignmentDepth executable. By default, assumed
to be available via the PATH environment variable.
Returns:
Dictionary of genomic coverage stats for the
specified reference genome sequence chunk.
""""""
cmd_args = [
hal_alignment_depth_exe,
hal_path,
ref_genome,
""--noAncestors"",
""--refSequence"",
ref_sequence,
""--targetGenomes"",
"","".join(target_genomes),
""--start"",
str(start),
""--length"",
str(length),
]
rollover_limit = 10_485_760
with SpooledTemporaryFile(max_size=rollover_limit, mode=""w+t"") as tmp_file_obj:
try:
subprocess.run(
cmd_args, stdout=tmp_file_obj, text=True, encoding=""ascii"", check=True
)
except subprocess.CalledProcessError as exc:
status_type = ""exit code"" if exc.returncode > 0 else ""signal""
raise RuntimeError(
f""halAlignmentDepth terminated with {status_type} {abs(exc.returncode)}""
f"" for sequence '{ref_sequence}' of genome 'ref_genome'""
) from exc
tmp_file_obj.flush()
tmp_file_obj.seek(0)
result_part = load_genomic_coverage_wiggle(tmp_file_obj)
return result_part[ref_sequence]
def load_genomic_coverage_wiggle(
wiggle_file_obj: Union[SpooledTemporaryFile, TextIO]
) -> Dict:
""""""Load data from wiggle file generated by halAlignmentDepth.
Args:
wiggle_file_obj: Input wiggle file object.
Returns:
Nested dictionary of coverage stats, with the key being the sequence name,
and the value being a dictionary of coverage stats for that sequence.
""""""
declaration_line_re = re.compile(
r""fixedStep chrom=(?P<chrom>\S+) start=(?P<start>[0-9]+) step=(?P<step>[0-9]+)\s*""
)
cov_stats = {}
curr_seq_name = None
for line in wiggle_file_obj:
try:
aligned_genome_count = int(line)
except ValueError as exc:
if match := declaration_line_re.fullmatch(line):
curr_seq_name = match[""chrom""]
if curr_seq_name in cov_stats:
raise ValueError(
f""multiple occurrences of sequence '{curr_seq_name}' found""
) from exc
cov_stats[curr_seq_name] = {
""num_aligned_positions"": 0,
""num_positions"": 0,
""start"": int(match[""start""]),
""step"": int(match[""step""]),
}
elif line.startswith(""variableStep""):
raise ValueError(""variableStep blocks not supported"") from exc
else:
raise ValueError(f""failed to parse wiggle line: {line}"") from exc
else:
if aligned_genome_count > 0:
cov_stats[curr_seq_name][""num_aligned_positions""] += 1
cov_stats[curr_seq_name][""num_positions""] += 1
return cov_stats
def main() -> None:
""""""Main function of script.""""""
parser = ArgumentParser(description=__doc__)
parser.add_argument(""hal_path"", help=""Input HAL file."")
parser.add_argument(
""ref_genome"", help=""Name of genome for which genomic coverage is calculated.""
)
parser.add_argument(
""--ref-sequence"",
metavar=""STR"",
help=""Name of sequence for which genomic coverage is calculated."",
)
parser.add_argument(
""--start"",
metavar=""INT"",
required=True,
type=int,
help=""Start of sequence chunk (0-based)."",
)
parser.add_argument(
""--length"",
metavar=""INT"",
required=True,
type=int,
help=""Length of sequence chunk."",
)
parser.add_argument(
""--target-genomes"",
metavar=""STR"",
required=True,
help=""Comma-separated list of target genomes."",
)
parser.add_argument(
""--hal_alignment_depth_exe"",
metavar=""STR"",
help=""Path of halAlignmentDepth executable. By default, assumed""
"" to be available via the PATH environment variable."",
)
args = parser.parse_args()
hal_cov_result = hal_genomic_coverage(
args.hal_path,
args.ref_genome,
args.ref_sequence,
args.start,
args.length,
args.target_genomes.split("",""),
hal_alignment_depth_exe=args.hal_alignment_depth_exe,
)
obs_num_positions = hal_cov_result[""num_positions""]
if obs_num_positions != args.length:
raise ValueError(
f""sequence-length mismatch: {obs_num_positions} vs {args.length}""
)
output = {
""num_positions"": obs_num_positions,
""num_aligned_positions"": hal_cov_result[""num_aligned_positions""],
}
print(json.dumps(output))
if __name__ == ""__main__"":
main()
","Python"
"Codon","Ensembl/ensembl-compara","scripts/hal_alignment/process_cactus_maf.py",".py","8227","213","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Process Cactus MAF file according to specified parameters.""""""
import argparse
import json
import os
import shutil
from tempfile import TemporaryDirectory
from typing import Iterator, TextIO
from Bio.Align import MultipleSeqAlignment
from Bio.AlignIO.MafIO import MafIterator, MafWriter
from Bio.Seq import Seq
import numpy as np
def _make_overhang_column_mask(aln_block: MultipleSeqAlignment, block_arr: np.ndarray) -> np.ndarray:
""""""Returns an overhang column mask for the input alignment block.
The overhang column mask is expected to be used to filter overhang columns
at either end of the alignment block. If an overhang is identified at the
start of the block, this function will also update the annotations of the
input alignment block so that they reflect the overhang removal.
Args:
aln_block: Input multiple sequence alignment block.
block_arr: A NumPy array of sequence data for ``aln_block``.
Due to the way this internal function is used, this array does
not necessarily have the same number of columns as ``aln_block``.
Returns:
An overhang column mask, which may be used to remove
overhang columns from the input alignment.
""""""
gap_mask = block_arr.transpose() == b""-""
gap_mask_first_col = gap_mask[0]
gap_mask_final_col = gap_mask[-1]
num_rows = len(aln_block)
num_cols = len(gap_mask)
if num_rows > 1 and num_cols > 1:
left_overhang_found = gap_mask_first_col.sum() == num_rows - 1
right_overhang_found = gap_mask_final_col.sum() == num_rows - 1
else:
left_overhang_found = right_overhang_found = False
overhang_mask = np.zeros((num_cols,), dtype=bool)
if left_overhang_found or right_overhang_found:
col_idxs = range(num_cols)
if left_overhang_found:
for col_idx in col_idxs:
if not np.array_equal(gap_mask[col_idx], gap_mask_first_col):
first_non_overhang_col_idx = col_idx
break
overhang_mask[:first_non_overhang_col_idx] = True
left_overhang_length = first_non_overhang_col_idx
left_overhang_row_idx = int(np.flatnonzero(~gap_mask_first_col)[0])
left_overhang_row = aln_block[left_overhang_row_idx]
left_overhang_row.annotations[""size""] -= left_overhang_length
left_overhang_row.annotations[""start""] += left_overhang_length
if right_overhang_found:
for col_idx in reversed(col_idxs):
if not np.array_equal(gap_mask[col_idx], gap_mask_final_col):
final_non_overhang_col_idx = col_idx
break
overhang_mask[final_non_overhang_col_idx + 1 :] = True
right_overhang_length = num_cols - (final_non_overhang_col_idx + 1)
right_overhang_row_idx = int(np.flatnonzero(~gap_mask_final_col)[0])
right_overhang_row = aln_block[right_overhang_row_idx]
right_overhang_row.annotations[""size""] -= right_overhang_length
return overhang_mask
def trimming_maf_iterator(stream: TextIO) -> Iterator[MultipleSeqAlignment]:
""""""Yields a MAF block with gap-only and overhang columns trimmed out.""""""
for aln_block in MafIterator(stream):
gap_arrays =[np.array([b""-""]) for _ in range(len(aln_block))]
gap_column = np.vstack(gap_arrays)
block_arr = np.array(aln_block, dtype=bytes)
gap_col_mask = (block_arr == gap_column).all(axis=0)
gap_cols_found = gap_col_mask.any()
if gap_cols_found:
block_arr = block_arr[:, ~gap_col_mask]
overhang_col_mask = _make_overhang_column_mask(aln_block, block_arr)
overhang_found = overhang_col_mask.any()
if overhang_found:
block_arr = block_arr[:, ~overhang_col_mask]
if gap_cols_found or overhang_found:
for row_arr, aln_row in zip(block_arr, aln_block):
aln_row.seq = Seq(row_arr.tobytes().decode(""ascii""))
yield aln_block
if __name__ == ""__main__"":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(""input_maf"", help=""Input MAF file."")
parser.add_argument(""processed_maf"", help=""Output processed MAF file."")
parser.add_argument(
""--min-block-rows"",
metavar=""INT"",
type=int,
default=2,
help=""Minimum number of alignment rows per block."",
)
parser.add_argument(
""--min-block-cols"",
metavar=""INT"",
type=int,
default=20,
help=""Minimum number of alignment columns per block."",
)
parser.add_argument(
""--min-seq-length"",
metavar=""INT"",
type=int,
default=5,
help=""Minimum unaligned sequence length of each aligned sequence."",
)
parser.add_argument(
""--expected-block-count"",
metavar=""INT"",
type=int,
help=""Expected number of alignment blocks in input MAF file."",
)
parser.add_argument(
""--dataflow-file"",
help=""Optional dataflow JSON file."",
)
args = parser.parse_args()
stats_col_names = [
""block_count_before_processing"",
""block_count_after_processing"",
""seq_count_after_processing"",
]
stats = dict.fromkeys(stats_col_names, 0)
with TemporaryDirectory() as tmp_dir:
temp_maf = os.path.join(tmp_dir, ""temp.maf"")
with (
open(args.input_maf, encoding=""utf-8"") as in_file_obj,
open(temp_maf, ""w"", encoding=""utf-8"") as out_file_obj,
):
writer = MafWriter(out_file_obj)
writer.write_header()
# The trimming_maf_iterator is not the most elegant or speedy solution, but
# we need to remove gap-only columns before we apply any other filters.
for maf_block in trimming_maf_iterator(in_file_obj):
stats[""block_count_before_processing""] += 1
if maf_block.get_alignment_length() < args.min_block_cols:
continue
processed_block = MultipleSeqAlignment([])
for rec in maf_block:
if rec.annotations[""size""] < args.min_seq_length:
continue
processed_block.append(rec)
if len(processed_block) < args.min_block_rows:
continue
writer.write_alignment(processed_block)
stats[""seq_count_after_processing""] += len(processed_block)
stats[""block_count_after_processing""] += 1
if args.expected_block_count:
if stats[""block_count_before_processing""] != args.expected_block_count:
raise RuntimeError(
f""Number of input blocks ({stats['block_count_before_processing']}) does""
f"" not match expected block count ({args.expected_block_count})""
)
shutil.move(temp_maf, args.processed_maf)
if args.dataflow_file:
dataflow_branch = 2
dataflow_json = json.dumps(
{
""maf_file"": args.processed_maf,
""maf_block_count"": stats[""block_count_after_processing""],
""maf_seq_count"": stats[""seq_count_after_processing""],
}
)
dataflow_event = f""{dataflow_branch} {dataflow_json}""
with open(args.dataflow_file, ""w"", encoding=""utf-8"") as out_file_obj:
print(dataflow_event, file=out_file_obj)
","Python"
"Codon","Ensembl/ensembl-compara","scripts/hal_alignment/hal_gene_liftover.py",".py","7773","211","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Do a liftover between two haplotypes in a HAL file.
Examples::
# Do a liftover from GRCh38 to CHM13 of the human INS gene
# along with 5 kb upstream and downstream flanking regions.
python hal_gene_liftover.py --src-region chr11:2159779-2161221:-1 \
--flank 5000 input.hal GRCh38 CHM13 output.psl
# Do a liftover from GRCh38 to CHM13 of the
# features specified in an input BED file.
python hal_gene_liftover.py --src-bed-file input.bed \
--flank 5000 input.hal GRCh38 CHM13 output.psl
""""""
from argparse import ArgumentParser
import os
from pathlib import Path
import re
from subprocess import PIPE, Popen, run
from tempfile import TemporaryDirectory
from typing import Dict, Iterable, Mapping, NamedTuple, Union
#import pybedtools # type: ignore
class SimpleRegion(NamedTuple):
""""""A simple region.""""""
chrom: str
start: int
end: int
strand: str
def load_chr_sizes(hal_file: Union[Path, str], genome_name: str) -> Dict[str, int]:
""""""Load chromosome sizes from an input HAL file.
Args:
hal_file: Input HAL file.
genome_name: Name of the genome to get the chromosome sizes of.
Returns:
Dictionary mapping chromosome names to their lengths.
""""""
cmd = ['halStats', '--chromSizes', genome_name, hal_file]
process = run(cmd, check=True, capture_output=True, text=True, encoding='ascii')
chr_sizes = {}
for line in process.stdout.splitlines():
chr_name, chr_size = line.rstrip().split('\t')
chr_sizes[chr_name] = int(chr_size)
return chr_sizes
# pylint: disable-next=c-extension-no-member
def make_src_region_file(regions: Iterable[SimpleRegion],
chr_sizes: Mapping[str, int], bed_file: Union[Path, str],
flank_length: int = 0) -> None:
""""""Make source region file.
Args:
regions: Regions to write to output file.
chr_sizes: Mapping of chromosome names to their lengths.
bed_file: Path of BED file to output.
flank_length: Length of upstream/downstream flanking regions to request.
Raises:
ValueError: If any region has an unknown chromosome or invalid coordinates,
or if `flank_length` is negative.
""""""
if flank_length < 0:
raise ValueError(f""'flank_length' must be greater than or equal to 0: {flank_length}"")
with open(bed_file, 'w') as f:
name = '.'
score = 0 # halLiftover requires an integer score in BED input
for region in regions:
try:
chr_size = chr_sizes[region.chrom]
except KeyError as e:
raise ValueError(f""chromosome ID not found in input file: '{region.chrom}'"") from e
if region.start < 0:
raise ValueError(f'region start must be greater than or equal to 0: {region.start}')
if region.end > chr_size:
raise ValueError(f'region end ({region.end}) must not be greater than the'
f' corresponding chromosome length ({region.chrom}: {chr_size})')
flanked_start = max(0, region.start - flank_length)
flanked_end = min(region.end + flank_length, chr_size)
fields = [region.chrom, flanked_start, flanked_end, name, score, region.strand]
print('\t'.join(str(x) for x in fields), file=f)
def parse_region(region: str) -> SimpleRegion:
""""""Parse a region string.
Args:
region: Region string.
Returns:
A SimpleRegion object.
Raises:
ValueError: If `region` is an invalid region string.
""""""
seq_region_regex = re.compile(
r'^(?P<chr>[^:]+):(?P<start>[0-9]+)-(?P<end>[0-9]+):(?P<strand>.+)$'
)
match = seq_region_regex.match(region)
try:
region_chr = match['chr'] # type: ignore
match_start = int(match['start']) # type: ignore
region_end = int(match['end']) # type: ignore
match_strand = match['strand'] # type: ignore
except TypeError as e:
raise ValueError(f""region '{region}' could not be parsed"") from e
if match_start < 1:
raise ValueError(f'region start must be greater than or equal to 1: {match_start}')
region_start = match_start - 1
if match_strand == '1':
region_strand = '+'
elif match_strand == '-1':
region_strand = '-'
else:
raise ValueError(f""region '{region}' has invalid strand: '{match_strand}'"")
if region_start >= region_end:
raise ValueError(f""region '{region}' has inverted/empty interval"")
return SimpleRegion(region_chr, region_start, region_end, region_strand)
if __name__ == '__main__':
parser = ArgumentParser(description='Performs a gene liftover between two haplotypes in a HAL file.')
parser.add_argument('hal_file', help=""Input HAL file."")
parser.add_argument('src_genome', help=""Source genome name."")
parser.add_argument('dest_genome', help=""Destination genome name."")
parser.add_argument('output_file', help=""Output file."")
parser.add_argument('--src-region', required=True, help=""Region to liftover."")
#group = parser.add_mutually_exclusive_group(required=True)
#group.add_argument('--src-region', help=""Region to liftover."")
#group.add_argument('--src-bed-file', help=""BED file containing regions to liftover."")
parser.add_argument('--flank', default=0, type=int,
help=""Requested length of upstream/downstream""
"" flanking regions to include in query."")
args = parser.parse_args()
with TemporaryDirectory() as tmp_dir:
query_bed_file = os.path.join(tmp_dir, 'src_regions.bed')
src_regions = [parse_region(args.src_region)]
#if args.src_region is not None:
# src_regions = [parse_region(args.src_region)]
#else: # i.e. bed_file is not None
# src_regions = pybedtools.BedTool(args.src_bed_file)
src_chr_sizes = load_chr_sizes(args.hal_file, args.src_genome)
make_src_region_file(src_regions, src_chr_sizes, query_bed_file, flank_length=args.flank)
# halLiftover --outPSL in.hal GRCh38 in.bed CHM13 stdout | pslPosTarget stdin out.psl
cmd1 = ['halLiftover', '--outPSL', args.hal_file, args.src_genome, query_bed_file,
args.dest_genome, 'stdout']
cmd2 = ['pslPosTarget', 'stdin', args.output_file]
with Popen(cmd1, stdout=PIPE) as p1:
with Popen(cmd2, stdin=p1.stdout) as p2:
p2.wait()
if p2.returncode != 0:
status_type = 'exit code' if p2.returncode > 0 else 'signal'
raise RuntimeError(
f'pslPosTarget terminated with {status_type} {abs(p2.returncode)}')
p1.wait()
if p1.returncode != 0:
status_type = 'exit code' if p1.returncode > 0 else 'signal'
raise RuntimeError(
f'halLiftover terminated with {status_type} {abs(p1.returncode)}')
","Python"
"Codon","Ensembl/ensembl-compara","scripts/production/list_configured_genomes.py",".py","2873","67","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Update an allowed-species JSON file from an MLSS config file.
This script requires that all the relevant genomes in the
MLSS config file have been configured with genome elements.
""""""
import argparse
import json
from lxml import etree
def make_allowed_species_from_mlss_conf(mlss_conf_file: str, allowed_sp_file: str) -> None:
""""""Make allowed species from mlss_conf.xml file.
Args:
mlss_conf_file: Input mlss_conf.xml file in which collections
are exclusively configured with 'genome' elements.
allowed_sp_file: Output allowed-species JSON file.
""""""
prod_names = set()
with open(mlss_conf_file, encoding=""ascii"") as in_file_obj:
xml_tree = etree.parse(in_file_obj) # pylint: disable=c-extension-no-member
root_elem = xml_tree.getroot()
for collection_elem in root_elem.findall("".//collection""):
collection_name = collection_elem.attrib[""name""]
for child_elem in collection_elem.getchildren():
if child_elem.tag is etree.Comment: # pylint: disable=c-extension-no-member
continue
if child_elem.tag == ""genome"":
if child_elem.attrib.get(""exclude"", False):
continue
prod_names.add(child_elem.attrib[""name""])
else:
raise ValueError(
f""cannot list allowed species - child of collection '{collection_name}'""
f"" is a '{child_elem.tag}' element, but must be a 'genome' element""
)
allowed_species = sorted(prod_names)
with open(allowed_sp_file, ""w"", encoding=""ascii"") as out_file_obj:
json.dump(allowed_species, out_file_obj, indent=4)
out_file_obj.write(""\n"")
if __name__ == ""__main__"":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(""mlss_conf_file"", metavar=""PATH"", help=""Input MLSS config file."")
parser.add_argument(""allowed_species_file"", metavar=""PATH"", help=""Output allowed-species file."")
args = parser.parse_args()
make_allowed_species_from_mlss_conf(args.mlss_conf_file, args.allowed_species_file)
","Python"
"Codon","Ensembl/ensembl-compara","scripts/production/lastz_coverage_stats.py",".py","7374","236","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""This script obtains lastz coverage statistics for a release or lastz db.
To get relevant statistics from release DB use the --release flag.
Example command:
#Lastz db:
./lastz_coverage_stats.py url output_file
#Release db:
./lastz_coverage_stats.py url output_file --release release_version
""""""
import argparse
import csv
from collections import defaultdict
from sqlalchemy import create_engine, text
get_lastz_mlss_from_first_release = """"""
SELECT
method_link_species_set_id
FROM
method_link_species_set
WHERE
method_link_id = 16
AND
first_release = :release;
""""""
# The method_link_id 16 is intended to match LASTZ_NET method_link_type in this script.
get_mlss_for_rerun_tags = """"""
SELECT
mlss_tag.method_link_species_set_id
FROM
method_link_species_set_tag AS mlss_tag
INNER JOIN
method_link_species_set AS mlss
USING
(method_link_species_set_id)
WHERE
mlss_tag.tag IN (:rerun_tag, :patched_tag)
AND
mlss.method_link_id = 16;
""""""
get_mlss_from_lastz_db = """"""
SELECT
method_link_species_set_id
FROM
method_link_species_set
WHERE
method_link_id = 16;
""""""
get_query_for_results = """"""
SELECT
method_link_species_set_id,
tag,
value
FROM
method_link_species_set_tag
WHERE
method_link_species_set_id IN :mlss_ids
AND
tag IN (
'reference_species',
'ref_genome_coverage',
'ref_genome_length',
'non_reference_species',
'non_ref_genome_coverage',
'non_ref_genome_length'
);
""""""
def calculate_coverage_ratios(results):
""""""
Calculate the coverage ratios from the query results.
Args:
results: The query results.
Returns:
A list of dictionaries with the calculated coverage ratios.
""""""
data = defaultdict(dict)
for mlss, tag, value in results:
data[mlss][tag] = value
calculated_results = []
for mlss, tags in data.items():
ref_genome_coverage = tags.get(""ref_genome_coverage"")
ref_genome_length = tags.get(""ref_genome_length"")
if ref_genome_coverage is not None and ref_genome_length is not None:
ref_coverage_ratio = (
int(ref_genome_coverage) / int(ref_genome_length)
) * 100
else:
ref_coverage_ratio = float(""nan"")
non_ref_genome_coverage = tags.get(""non_ref_genome_coverage"")
non_ref_genome_length = tags.get(""non_ref_genome_length"")
if non_ref_genome_coverage is not None and non_ref_genome_length is not None:
non_ref_coverage_ratio = (
int(non_ref_genome_coverage) / int(non_ref_genome_length)
) * 100
else:
non_ref_coverage_ratio = float(""nan"")
calculated_results.append(
{
""mlss"": mlss,
""reference_species"": tags.get(""reference_species""),
""ref_genome_coverage"": tags.get(""ref_genome_coverage""),
""ref_coverage_ratio"": round(ref_coverage_ratio, 4),
""non_reference_species"": tags.get(""non_reference_species""),
""non_ref_genome_coverage"": tags.get(""non_ref_genome_coverage""),
""non_ref_coverage_ratio"": round(non_ref_coverage_ratio, 4),
}
)
return calculated_results
def write_to_tsv(filename, results):
""""""
Write the calculated results to a TSV file.
Args:
filename: The name of the TSV file.
results: The calculated results to write.
""""""
with open(filename, ""w"", newline="""", encoding=""utf-8"") as tsvfile:
writer = csv.writer(tsvfile, delimiter=""\t"")
writer.writerow(
[
""mlss"",
""reference_species"",
""ref_genome_coverage"",
""ref_coverage_ratio"",
""non_reference_species"",
""non_ref_genome_coverage"",
""non_ref_coverage_ratio"",
]
)
for row in results:
writer.writerow(
[
row[""mlss""],
row[""reference_species""],
row[""ref_genome_coverage""],
row[""ref_coverage_ratio""],
row[""non_reference_species""],
row[""non_ref_genome_coverage""],
row[""non_ref_coverage_ratio""],
]
)
def process_database(url, output_file, release=None):
""""""
Process the database to retrieve coverage ratios and write the result to a TSV file.
Args:
url: The database URL.
output_file: The name of the output TSV file.
release: The release version (optional).
""""""
engine = create_engine(url)
with engine.connect() as connection:
if release is None:
initial_result = connection.execute(text(get_mlss_from_lastz_db))
mlss_ids = [row[0] for row in initial_result]
else:
param_release = {""release"": release}
get_first_release_ids = connection.execute(
text(get_lastz_mlss_from_first_release), param_release
)
mlss_ids = [row[0] for row in get_first_release_ids]
params_rerun_tags = {
""rerun_tag"": f""rerun_in_{release}"",
""patched_tag"": f""patched_in_{release}"",
}
get_rerun_mlsses = connection.execute(
text(get_mlss_for_rerun_tags), params_rerun_tags
)
mlss_ids.extend([row[0] for row in get_rerun_mlsses])
if mlss_ids:
param_ids = {""mlss_ids"": mlss_ids}
result_query = connection.execute(text(get_query_for_results), param_ids)
calculated_results = calculate_coverage_ratios(result_query)
write_to_tsv(output_file, calculated_results)
def main():
""""""
Main function to parse user input and process the database.
""""""
parser = argparse.ArgumentParser(
description=""Process database and write coverage ratios to TSV file.""
)
parser.add_argument(""url"", type=str, help=""The database URL."")
parser.add_argument(
""output_file"", type=str, help=""The name of the output TSV file.""
)
parser.add_argument(
""--release"", type=int, default=None, help=""The release version (optional).""
)
args = parser.parse_args()
process_database(args.url, args.output_file, args.release)
if __name__ == ""__main__"":
main()
","Python"
"Codon","Ensembl/ensembl-compara","scripts/production/list_must_reuse_species.py",".py","4155","111","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Generate list of must-reuse species.
Example::
# To list species that must be reused in release 111:
${ENSEMBL_ROOT_DIR}/ensembl-compara/scripts/production/list_must_reuse_species.py \
--input-file ${ENSEMBL_ROOT_DIR}/ensembl-compara/conf/metazoa/must_reuse_collections.json \
--mlss-conf-file ${ENSEMBL_ROOT_DIR}/ensembl-compara/conf/metazoa/mlss_conf.xml \
--ensembl-release 111 \
--output-file must_reuse_species.json
# To list species that must always be reused, assuming the
# current collections are updated in alternating releases:
${ENSEMBL_ROOT_DIR}/ensembl-compara/scripts/production/list_must_reuse_species.py \
--input-file ${ENSEMBL_ROOT_DIR}/ensembl-compara/conf/metazoa/must_reuse_collections.json \
--mlss-conf-file ${ENSEMBL_ROOT_DIR}/ensembl-compara/conf/metazoa/mlss_conf.xml \
--ensembl-release all \
--output-file always_reuse_species.json
""""""
import argparse
from collections import defaultdict
import itertools
import json
from lxml import etree
if __name__ == ""__main__"":
parser = argparse.ArgumentParser(description=""Generate list of must-reuse species."")
parser.add_argument(
""-i"",
""--input-file"",
metavar=""PATH"",
required=True,
help=""Input reused collection config file."",
)
parser.add_argument(
""--mlss-conf-file"",
metavar=""PATH"",
required=True,
help=""Input MLSS conf file."",
)
parser.add_argument(
""--ensembl-release"",
metavar=""VALUE"",
required=True,
help=""Ensembl release, or 'all' to list 'always-reused' species."",
)
parser.add_argument(
""-o"",
""--output-file"",
metavar=""PATH"",
required=True,
help=""Output JSON file listing must-reuse species."",
)
args = parser.parse_args()
with open(args.input_file) as in_file_obj:
reused_collection_conf = json.load(in_file_obj)
reused_collection_names = sorted(itertools.chain.from_iterable(reused_collection_conf.values()))
with open(args.mlss_conf_file) as in_file_obj:
xml_tree = etree.parse(in_file_obj) # pylint: disable=c-extension-no-member
xml_root = xml_tree.getroot()
reused_collection_genomes = defaultdict(set)
for collection_name in reused_collection_names:
collection = xml_root.find(f"".//collection[@name='{collection_name}']"")
for genome in collection.findall(""genome""):
if genome.attrib.get(""exclude"", False):
continue
reused_collection_genomes[collection_name].add(genome.attrib[""name""])
reused_genomes_by_parity = {}
for parity in [""odd"", ""even""]:
reused_genomes_by_parity[parity] = set.union(
*[reused_collection_genomes[collection] for collection in reused_collection_conf[parity]]
)
if args.ensembl_release == ""all"":
must_reuse_genomes = reused_genomes_by_parity[""odd""] & reused_genomes_by_parity[""even""]
else:
try:
ensembl_release = int(args.ensembl_release)
except ValueError as exc:
raise ValueError(f""invalid/unsupported Ensembl release: {args.ensembl_release}"") from exc
parity = ""even"" if ensembl_release % 2 == 0 else ""odd""
must_reuse_genomes = reused_genomes_by_parity[parity]
with open(args.output_file, ""w"") as out_file_obj:
json.dump(sorted(must_reuse_genomes), out_file_obj)
","Python"
"Codon","Ensembl/ensembl-compara","scripts/production/enable_mysql_keys.sh",".sh","995","28","#!/bin/bash
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
[ $# -ne 4 ] && { echo ""Usage: $0 --host=mysql-server --port=port --user=user database_name""; exit 1; }
set -euo pipefail
mysql ""$@"" --column-names=false -e ""SHOW FULL TABLES WHERE TABLE_TYPE = 'BASE TABLE'"" | cut -f1 | while read -r table; do
echo ""$table""
mysql ""$@"" -e ""ALTER TABLE $table ENABLE KEYS""
done
","Shell"
"Codon","Ensembl/ensembl-compara","scripts/production/link_cactus_pw_mlsses.py",".py","11813","356","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Link each pairwise Cactus MLSS to a suitable reference Cactus MLSS.""""""
import argparse
from collections import defaultdict
from itertools import combinations
import re
import sys
from typing import DefaultDict, Dict, FrozenSet, List, Sequence
import warnings
import sqlalchemy
from sqlalchemy import create_engine, text
def _calculate_pairwise_genomic_coverage(
connection: sqlalchemy.engine.Connection, msa_mlss_ids: Sequence[int], mlss_to_gdbs: Dict[int, FrozenSet]
) -> Dict:
pw_cov_query = text(
""""""\
SELECT
genome_db_id,
tag,
value
FROM
species_tree_node
JOIN
species_tree_node_tag USING(node_id)
JOIN
species_tree_root USING(root_id)
WHERE
method_link_species_set_id = :mlss_id
AND
(tag = 'total_genome_length' OR tag REGEXP '^genome_coverage_[0-9]+$')
""""""
)
pw_cov_tag_re = re.compile(""genome_coverage_(?P<gdb_id>[0-9]+)"")
pairwise_coverage: Dict = {}
for mlss_id in msa_mlss_ids:
pairwise_coverage[mlss_id] = {}
gdb_to_length = {}
genome_db_to_pairwise_coverage: DefaultDict = defaultdict(lambda: defaultdict(int))
for gdb1_id, tag, value in connection.execute(pw_cov_query, {""mlss_id"": mlss_id}):
if match := pw_cov_tag_re.fullmatch(tag):
gdb2_id = int(match[""gdb_id""])
genome_db_to_pairwise_coverage[gdb1_id][gdb2_id] = float(value)
else: # tag == 'total_genome_length'
gdb_to_length[gdb1_id] = float(value)
gdb_to_pw_cov = {k: dict(x) for k, x in genome_db_to_pairwise_coverage.items()}
for gdb1_id, gdb2_id in combinations(mlss_to_gdbs[mlss_id], 2):
try:
pw_cov_on_gdb1 = gdb_to_pw_cov[gdb1_id][gdb2_id]
pw_cov_on_gdb2 = gdb_to_pw_cov[gdb2_id][gdb1_id]
gdb1_genome_length = gdb_to_length[gdb1_id]
gdb2_genome_length = gdb_to_length[gdb2_id]
except KeyError: # any key error here would prevent calculation of pairwise coverage
continue
gdb_pair = frozenset([gdb1_id, gdb2_id])
pairwise_coverage[mlss_id][gdb_pair] = (pw_cov_on_gdb1 + pw_cov_on_gdb2) / (
gdb1_genome_length + gdb2_genome_length
)
return pairwise_coverage
def _fetch_all_current_mlsses_by_method_link_type(
connection: sqlalchemy.engine.Connection, method_link_type: str, current_release: int
) -> List[int]:
query = text(
""""""\
SELECT
method_link_species_set_id
FROM
method_link_species_set
JOIN
method_link USING(method_link_id)
WHERE
method_link.type = :method_link_type
AND
(first_release IS NOT NULL AND first_release <= :release)
AND
(last_release IS NULL OR last_release >= :release)
ORDER BY
method_link_species_set_id
""""""
)
mlss_ids = []
params = {""method_link_type"": method_link_type, ""release"": current_release}
for (mlss_id,) in connection.execute(query, params):
mlss_ids.append(mlss_id)
return mlss_ids
def _fetch_mlss_gdb_map(connection: sqlalchemy.engine.Connection, mlss_ids: Sequence[int]) -> Dict:
query = text(
""""""\
SELECT
genome_db_id
FROM
method_link_species_set
JOIN
species_set USING(species_set_id)
WHERE
method_link_species_set_id = :mlss_id
""""""
)
mlss_gdb_map = {}
for mlss_id in mlss_ids:
gdb_set = set()
for (genome_db_id,) in connection.execute(query, {""mlss_id"": mlss_id}):
gdb_set.add(genome_db_id)
mlss_gdb_map[mlss_id] = frozenset(gdb_set)
return mlss_gdb_map
def _get_original_url(connection: sqlalchemy.engine.Connection, mlss_id: int) -> str:
mlss_url_query = text(
""""""\
SELECT
url
FROM
method_link_species_set
JOIN
method_link USING(method_link_id)
WHERE
method_link_species_set_id = :mlss_id
""""""
)
return connection.execute(mlss_url_query, {""mlss_id"": mlss_id}).scalar_one()
def _link_cactus_pairwise_mlsses(
connection: sqlalchemy.engine.Connection, reference_mlsses: Dict, mlss_to_url: Dict, dry_run: bool = False
) -> None:
tag_delete_statement = text(
""""""\
DELETE FROM
method_link_species_set_tag
WHERE
method_link_species_set_id = :pw_mlss_id
AND
tag = 'alt_hal_mlss'
""""""
)
tag_insert_statement = text(
""""""\
INSERT INTO
method_link_species_set_tag (method_link_species_set_id, tag, value)
VALUES
(:pw_mlss_id, 'alt_hal_mlss', :ref_mlss_id)
""""""
)
url_update_statement = text(
""""""\
UPDATE
method_link_species_set
SET
url = :mlss_url
WHERE
method_link_species_set_id = :mlss_id;
""""""
)
num_singletons = 0
action = ""Would link"" if dry_run else ""Linking""
for pw_mlss_id, ref_mlss_info in reference_mlsses.items():
ref_mlss_id = ref_mlss_info[""ref_mlss_id""]
reason = ref_mlss_info[""reason""]
if reason == ""a single candidate"":
num_singletons += 1
else:
print(
f""{action} pairwise Cactus MLSS {pw_mlss_id} to reference MLSS {ref_mlss_id}""
f"" on the basis of {reason} ...""
)
if dry_run:
continue
connection.execute(
tag_delete_statement,
{""pw_mlss_id"": pw_mlss_id},
)
connection.execute(
tag_insert_statement,
{""pw_mlss_id"": pw_mlss_id, ""ref_mlss_id"": ref_mlss_id},
)
connection.execute(
url_update_statement,
{""mlss_id"": pw_mlss_id, ""mlss_url"": mlss_to_url[ref_mlss_id]},
)
if num_singletons > 0:
quantifier = ""all"" if num_singletons == len(reference_mlsses) else ""remaining""
print(
f""{action} {quantifier} {num_singletons} pairwise Cactus MLSSes""
f"" to their single available reference MLSS ...""
)
def _select_reference_mlsses(
pairwise_mlss_ids: Sequence[int],
msa_mlss_ids: Sequence[int],
mlss_to_gdbs: Dict,
pairwise_coverage: Dict,
on_missing_ref_mlss: str = ""raise"",
) -> Dict:
reference_mlsses = {}
for pw_mlss_id in pairwise_mlss_ids:
cand_ref_mlss_ids = [
msa_mlss_id
for msa_mlss_id in msa_mlss_ids
if mlss_to_gdbs[pw_mlss_id] <= mlss_to_gdbs[msa_mlss_id]
]
if len(cand_ref_mlss_ids) == 0:
msg = f""no candidate reference MLSS found for pairwise Cactus MLSS {pw_mlss_id}""
if on_missing_ref_mlss == ""warn"":
warnings.warn(msg)
continue
if on_missing_ref_mlss == ""raise"":
raise RuntimeError(msg)
raise RuntimeError(f""'on_missing_ref_mlss' has unsupported value : {on_missing_ref_mlss}"")
if len(cand_ref_mlss_ids) == 1:
reference_mlsses[pw_mlss_id] = {
""ref_mlss_id"": cand_ref_mlss_ids[0],
""reason"": ""a single candidate"",
}
continue
gdb_pair = mlss_to_gdbs[pw_mlss_id]
if all(
mlss_id in pairwise_coverage and gdb_pair in pairwise_coverage[mlss_id]
for mlss_id in cand_ref_mlss_ids
):
max_pw_cov = max(pairwise_coverage[mlss_id][gdb_pair] for mlss_id in cand_ref_mlss_ids)
cand_ref_mlss_ids = [
mlss_id for mlss_id in cand_ref_mlss_ids if pairwise_coverage[mlss_id][gdb_pair] == max_pw_cov
]
if len(cand_ref_mlss_ids) == 1:
reference_mlsses[pw_mlss_id] = {
""ref_mlss_id"": cand_ref_mlss_ids[0],
""reason"": ""pairwise coverage"",
}
continue
min_mlss_size = min(len(mlss_to_gdbs[mlss_id]) for mlss_id in cand_ref_mlss_ids)
cand_ref_mlss_ids = [
mlss_id for mlss_id in cand_ref_mlss_ids if len(mlss_to_gdbs[mlss_id]) == min_mlss_size
]
if len(cand_ref_mlss_ids) == 1:
reference_mlsses[pw_mlss_id] = {
""ref_mlss_id"": cand_ref_mlss_ids[0],
""reason"": ""MLSS size"",
}
else:
reference_mlsses[pw_mlss_id] = {
""ref_mlss_id"": max(cand_ref_mlss_ids),
""reason"": ""MLSS ID"",
}
return reference_mlsses
def main():
""""""Main function to link each pairwise Cactus MLSS to a suitable reference Cactus MLSS.""""""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
""--url"",
help=""URL of Compara database in which pairwise Cactus MLSSes are to be linked."",
)
parser.add_argument(
""--release"",
type=int,
required=True,
help=""Current Ensembl release."",
)
parser.add_argument(
""--on-missing-ref-mlss"",
default=""raise"",
choices=[""raise"", ""warn""],
help=""What to do if a pairwise Cactus MLSS has no suitable reference MLSS."",
)
parser.add_argument(
""--dry-run"",
action=""store_true"",
help=""Print how pairwise Cactus MLSSes would be linked, but do not update the database."",
)
args = parser.parse_args()
engine = create_engine(args.url)
with engine.connect() as conn:
cactus_pw_mlss_ids = _fetch_all_current_mlsses_by_method_link_type(
conn, ""CACTUS_HAL_PW"", args.release
)
if not cactus_pw_mlss_ids:
print(""No pairwise Cactus MLSSes found; exiting"")
sys.exit(0)
cactus_msa_mlss_ids = _fetch_all_current_mlsses_by_method_link_type(conn, ""CACTUS_HAL"", args.release)
cactus_msa_mlss_to_url = {
mlss_id: _get_original_url(conn, mlss_id) for mlss_id in cactus_msa_mlss_ids
}
mlss_to_gdb_set = _fetch_mlss_gdb_map(conn, cactus_msa_mlss_ids + cactus_pw_mlss_ids)
mlss_pw_cov_info = _calculate_pairwise_genomic_coverage(conn, cactus_msa_mlss_ids, mlss_to_gdb_set)
pw_to_ref_mlss_info = _select_reference_mlsses(
cactus_pw_mlss_ids,
cactus_msa_mlss_ids,
mlss_to_gdb_set,
mlss_pw_cov_info,
on_missing_ref_mlss=args.on_missing_ref_mlss,
)
with engine.begin() as conn:
_link_cactus_pairwise_mlsses(conn, pw_to_ref_mlss_info, cactus_msa_mlss_to_url, dry_run=args.dry_run)
if __name__ == ""__main__"":
main()
","Python"
"Codon","Ensembl/ensembl-compara","scripts/production/homology_dump_cleanup.py",".py","12817","351","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""""" Clean-up Homology dumps """"""
import argparse
import logging
import os
import shutil
import sys
import textwrap
from typing import Any, List, Optional, Tuple
from sqlalchemy import create_engine, text
from sqlalchemy.exc import SQLAlchemyError
# Type aliases for readability
SQLResult = List[Tuple[Any, ...]]
UniqueCollections = List[str]
def mysql_query(
query: str,
db_url: str,
params: Optional[Any] = None,
) -> SQLResult:
""""""
Execute a MySQL query and return the result.
This function takes a SQL query as a string and a database URL,
executes the query, and returns the result as SQLResult.
It optionally accepts parameters to bind to the query.
Parameters:
query : The SQL query to be executed.
db_url : The database URL for connecting to the MySQL database.
params : Optional parameters to bind to the SQL query.
If not provided, an empty dictionary is used.
Returns:
A list of tuples containing the rows returned by the query.
Raises:
If an error occurs during the execution of the query.
""""""
if not params:
params = {}
try:
engine = create_engine(db_url)
with engine.connect() as conn:
info = conn.execute(text(query), params)
return [tuple(x) for x in info]
except SQLAlchemyError:
logging.exception(""MySQL Error"")
raise
def get_collections(db_url: str, current_release: int) -> UniqueCollections:
""""""
Retrieves unique collection names from a MySQL database before a specified Ensembl release.
This function connects to a MySQL database using the provided database URL, executes a query
to retrieve distinct collection names. It processes these names to remove the 'collection-'
prefix if present and returns a sorted list of unique names.
Args:
db_url: The database URL in the format 'mysql://user:password@host:port/database'.
current_release: The current Ensembl release.
Returns:
A sorted list of unique collection names.
""""""
collections_query = (
""SELECT DISTINCT ssh.name ""
""FROM method_link_species_set mlss ""
""JOIN method_link ml USING(method_link_id) ""
""JOIN species_set_header ssh USING(species_set_id) ""
""WHERE ml.type IN ('PROTEIN_TREES', 'NC_TREES') ""
""AND mlss.first_release IS NOT NULL ""
""AND mlss.first_release <= :current_release;""
)
params = {""current_release"": current_release}
sql = mysql_query(collections_query, db_url, params)
result = sorted(set(collection.removeprefix(""collection-"") for collection, in sql))
return result
def get_division_info(db_url: str) -> tuple[str, int]:
""""""
Retrieves division information from a MySQL database.
This function connects to a MySQL database using the provided database URL and executes
a query to retrieve values for the 'division' and 'schema_version' keys from the 'meta' table.
Args:
db_url: The database URL in the format 'mysql://user:password@host:port/database'.
Returns:
Tuple containing the division and release of the MySQL database.
""""""
div_rel_query = (
""SELECT meta_key, meta_value FROM meta WHERE meta_key IN ('division', 'schema_version');""
)
metadata: dict[str, Any] = dict(mysql_query(div_rel_query, db_url))
return metadata[""division""], int(metadata[""schema_version""])
def remove_directory(dir_path: str, dry_run: bool) -> None:
""""""
Removes a directory if dry_run is False.
This function attempts to remove the specified directory. If `dry_run` is True, it logs the
action that would have been taken without actually deleting the directory. If `dry_run` is
False, it deletes the directory and logs the deletion. If an error occurs during the deletion
process, it logs the error and raises the exception.
Args:
dir_path: The path to the directory to be removed.
dry_run: If True, the directory will not be removed, and the action will only be logged.
""""""
if not dry_run:
try:
shutil.rmtree(dir_path)
logging.info(""Removed directory: %s"", dir_path)
except Exception:
logging.exception(""Error removing directory: %s"", dir_path)
raise
else:
logging.info(""Dry run mode: Would have removed directory: %s"", dir_path)
def process_collection_directory(
collection_path: str, num_releases_to_keep: int, dry_run: bool
) -> bool:
""""""
Processes a collection directory and removes subdirectories.
This function scans the specified collection directory for subdirectories with numeric names.
If the numeric value of the subdirectory name is not in the set of releases to keep (as determined
by the num_releases_to_keep parameter), the subdirectory is removed, unless dry_run is True.
In dry_run mode, the function logs the actions it would take without
actually performing any deletions.
Args:
collection_path: Path to the collection directory to process.
num_releases_to_keep: Ensembl release cutoff indicating number of releases to keep;
subdirectories with numeric names less than this value will be considered for removal.
dry_run: If True, performs a dry run without actually deleting directories.
Returns:
True if any directories were removed; False otherwise.
""""""
dirs_removed = False
rel_to_cleanup_path = {}
with os.scandir(collection_path) as coll_path:
for k in coll_path:
try:
release = int(k.name)
except ValueError:
continue
rel_to_cleanup_path[release] = k.path
releases = sorted(rel_to_cleanup_path.keys(), reverse=True)
releases_to_keep = releases[:num_releases_to_keep]
for release_to_keep in releases_to_keep:
del rel_to_cleanup_path[release_to_keep]
for cleanup_path in rel_to_cleanup_path.values():
if os.path.isdir(cleanup_path):
remove_directory(cleanup_path, dry_run)
dirs_removed = True
if not dirs_removed and not dry_run:
logging.info(
""No directories found for removal in %s collection."", collection_path
)
return dirs_removed
def iterate_collection_dirs(
div_path: str, collections: UniqueCollections, num_releases_to_keep: int, dry_run: bool
) -> None:
""""""
Iterates over collection directories within a division path and processes each collection.
This function scans the specified division directory for subdirectories that match the names
in the provided collections. For each matching collection directory, it calls
`process_collection_directory` to remove subdirectories according to specified parameters, unless
dry_run is True. In dry_run mode, the function logs the actions it would take without
actually performing any deletions.
Args:
div_path: Path to the division directory containing collection directories.
collections: A list of collection names to look for within the division directory.
num_releases_to_keep: Ensembl release cutoff indicating number of releases to keep;
subdirectories within each collection directory with numeric names less than
this value will be considered for removal.
dry_run: If True, performs a dry run without actually deleting directories.
""""""
with os.scandir(div_path) as div_dir:
for j in div_dir:
if j.is_dir() and j.name in collections:
collection_path = os.path.join(div_path, j.name)
process_collection_directory(collection_path, num_releases_to_keep, dry_run)
def cleanup_homology_dumps(
homology_dumps_dir: str,
num_releases_to_keep: int,
dry_run: bool,
collections: UniqueCollections,
division: str,
log_file: Optional[str] = None,
) -> None:
""""""
Cleans up homology dump directories based on specified criteria.
This function coordinates the cleanup process for homology dump directories.
It iterates through collection directories within the specified `div_path`,
processing each collection using `iterate_collection_dirs`. For each matching
collection directory, it calls `process_collection_directory` to remove
subdirectories representing a release prior to the releases being kept, unless dry_run is True.
In dry run mode, the function logs the actions it would take without
performing any deletions.
Args:
homology_dumps_dir : Path to the homology dumps directory.
num_releases_to_keep : Ensembl release cutoff indicating number of releases to keep;
subdirectories within each collection directory with numeric names
less than this value will be considered for removal.
dry_run : If True, performs a dry run without actually deleting directories.
log_file : Path to the log file.
collections : A list of collection names to look for within each division directory.
Defaults to an empty list.
division : Division name.
""""""
logging_kwargs: dict = {
""format"": ""%(asctime)s - %(message)s"",
""level"": logging.INFO,
}
if log_file:
logging_kwargs[""filename""] = log_file
else:
logging_kwargs[""stream""] = sys.stdout
logging.basicConfig(**logging_kwargs)
div_path = os.path.join(homology_dumps_dir, division)
iterate_collection_dirs(div_path, collections, num_releases_to_keep, dry_run)
if not dry_run:
logging.info(""Cleanup process completed."")
else:
logging.info(""Dry run mode: Cleanup process completed."")
def parse_args() -> argparse.Namespace:
""""""
Returns command-line arguments for the homology dumps cleanup script.
""""""
description = textwrap.dedent(
""""""\
Homology dumps cleanup script
Example command to run this script from command line:
>>> ./homology_dump_cleanup.py
--homology_dumps_dir /hps/nobackup/flicek/ensembl/compara/sbhurji/scripts/homology_dumps
--master_db_url mysql://ensro@mysql-ens-compara-prod-5:4615/ensembl_compara_master_plants
--num_releases_to_keep 1 --dry_run
--log /hps/nobackup/flicek/ensembl/compara/sbhurji/scripts/clean.log
""""""
)
parser = argparse.ArgumentParser(
description=description, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
""--homology_dumps_dir"",
required=True,
help=""Root directory of the homology dumps."",
)
parser.add_argument(
""--master_db_url"", type=str, required=True, help=""URL of the master database.""
)
parser.add_argument(
""--num_releases_to_keep"",
type=int,
required=True,
help=(
""Keep homology dumps only for this number of releases,""
"" and delete all homology dumps from releases prior to that.""
),
)
parser.add_argument(
""--dry_run"",
action=""store_true"",
help=""Perform a dry run without deleting files."",
)
parser.add_argument(
""--log"", type=str, help=""Optional log file to record deleted files.""
)
return parser.parse_args()
def main() -> None:
""""""
This is the main function to parse arguments, retrieve database collections,
obtain division information, validate input, and initiate the
cleanup of homology dumps.
Raises:
ValueError: If `num_releases_to_keep` is not greater than zero.
""""""
args = parse_args()
division, release = get_division_info(args.master_db_url)
collections = get_collections(args.master_db_url, release)
if args.num_releases_to_keep < 1:
raise ValueError(""num_releases_to_keep must be greater than zero"")
cleanup_homology_dumps(
args.homology_dumps_dir,
args.num_releases_to_keep,
args.dry_run,
collections,
division,
args.log,
)
if __name__ == ""__main__"":
main()
","Python"
"Codon","Ensembl/ensembl-compara","scripts/production/make_a_master.sh",".sh","1973","73","#!/bin/bash
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
prod_cmd=mysql-eg-prod-1
prod_db=ensembl_compara_plants_40_93
mast_cmd=mysql-ens-compara-prod-2-ensadmin
mast_db=plants_compara_master_41_94
# ## TESTING
# $prod_cmd $prod_db
# $mast_cmd $mast_db
## Make the new master database
echo ""Creating master db""
$mast_cmd mysqladmin CREATE $mast_db
## Carefull with this, but lets clean the production database...
echo ""Cleaning the production db""
${prod_cmd}-ensrw $prod_db --show-warnings -vv -e '
DELETE g, d FROM genome_db g
INNER JOIN dnafrag d
USING (genome_db_id)
WHERE last_release IS NOT NULL;
DELETE m, t FROM method_link_species_set m
INNER JOIN method_link_species_set_tag t
USING (method_link_species_set_id)
WHERE last_release IS NOT NULL;
DELETE s, h FROM species_set s
INNER JOIN species_set_header h
USING (species_set_id)
WHERE last_release IS NOT NULL;
'
## Each of these tables should be copied over...
table_list=(
dnafrag
genome_db
mapping_session
meta
method_link
method_link_species_set
method_link_species_set_attr
method_link_species_set_tag
ncbi_taxa_name
ncbi_taxa_node
species_set
species_set_header
species_set_tag
)
echo ""Copying tables""
$prod_cmd mysqldump $prod_db ""${table_list[@]}"" | $mast_cmd $mast_db
echo ""Done! $mast_cmd $mast_db""
","Shell"
"Codon","Ensembl/ensembl-compara","scripts/production/repair_mlss_tags.py",".py","6119","140","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Repairs MLSS tags that may have the wrong value, may be missing or belong to undefined MLSSs.
The Method Link Species Set (MLSS) tags supported are:
* `max_align`
* `msa_mlss_id`
Examples::
$ python repair_mlss_tags.py --max_align \
--url mysql://ensadmin:${ENSADMIN_PSW}@mysql-ens-compara-prod-6:4616/ensembl_compara_metazoa_51_104
$ python repair_mlss_tags.py --msa_mlss_id \
--url mysql://ensadmin:${ENSADMIN_PSW}@mysql-ens-compara-prod-5:4615/ensembl_compara_plants_51_104
$ python repair_mlss_tags.py --url $(cp1-w details url)ensembl_compara_104 --msa_mlss_id --max_align
""""""
from argparse import ArgumentParser
from sqlalchemy import text
from ensembl.utils.database import DBConnection
EXPECTED_VALUES_SQL = {
'max_align': """"""
SELECT
method_link_species_set_id AS mlss_id,
MAX(dnafrag_end - dnafrag_start) + 2 AS value
FROM
constrained_element
GROUP BY method_link_species_set_id
UNION SELECT
method_link_species_set_id AS mlss_id,
MAX(dnafrag_end - dnafrag_start) + 2 AS value
FROM
genomic_align
GROUP BY method_link_species_set_id
"""""",
'msa_mlss_id': """"""
SELECT
mlss1.method_link_species_set_id AS mlss_id,
mlss2.method_link_species_set_id AS value
FROM
method_link_species_set mlss1
JOIN
method_link_species_set mlss2 ON mlss1.species_set_id = mlss2.species_set_id
JOIN
method_link ml1 ON mlss1.method_link_id = ml1.method_link_id
JOIN
method_link ml2 ON mlss2.method_link_id = ml2.method_link_id
WHERE
(ml1.class = 'ConservationScore.conservation_score'
OR ml1.class = 'ConstrainedElement.constrained_element')
AND (ml2.class = 'GenomicAlignBlock.multiple_alignment'
OR ml2.class LIKE 'GenomicAlignTree.%%')
AND ml1.type NOT LIKE 'pGERP%%'
AND ml2.type NOT LIKE 'pEPO%%'
""""""
}
def repair_mlss_tag(dbc: DBConnection, mlss_tag: str) -> None:
""""""Repairs a given method_link_species_set tag in the database by recomputing expected values.
This function uses the predefined query in `EXPECTED_VALUES_SQL` for a given
``method_link_species_set_tag`` to extract the expected value for each ``method_link_species_set_id``.
It additionally inserts possible missing tags and removes rows that may contain an invalid
``method_link_species_set_id``.
Args:
dbc: Compara database connection handler.
mlss_tag: MLSS tag as found in the ``method_link_species_set_tag`` table.
""""""
with dbc.begin() as connection:
# Get the MLSS tags in method_link_species_set_tag table
mlss_tag_values = connection.execute(text(
f""SELECT method_link_species_set_id AS mlss_id, value ""
f""FROM method_link_species_set_tag WHERE tag = '{mlss_tag}'""
))
mlss_tags = {row.mlss_id: row.value for row in mlss_tag_values.fetchall()}
# Extract the expected tag value based on the source data
expected_values = connection.execute(text(EXPECTED_VALUES_SQL[mlss_tag]))
# Check that each tag has the correct value, fixing those that do not, and add any missing tags
for row in expected_values:
if row.mlss_id in mlss_tags:
# NOTE: due to internal conversions, we need to ensure both sides have the same time
if str(mlss_tags[row.mlss_id]) != str(row.value):
connection.execute(text(
f'UPDATE method_link_species_set_tag SET value = {row.value} '
f'WHERE method_link_species_set_id = {row.mlss_id} AND tag = ""{mlss_tag}""'
))
print(f""Repaired MLSS tag '{mlss_tag}' for MLSS id '{row.mlss_id}'"")
del mlss_tags[row.mlss_id]
else:
connection.execute(text(
f'INSERT INTO method_link_species_set_tag '
f'VALUES ({row.mlss_id}, ""{mlss_tag}"", {row.value})'
))
print(f""Added missing MLSS tag '{mlss_tag}' for MLSS id '{row.mlss_id}'"")
# Delete those MLSS tags that do not match any MLSS in method_link_species_set table
for mlss_id in mlss_tags.keys():
connection.execute(text(
f'DELETE FROM method_link_species_set_tag WHERE method_link_species_set_id = {mlss_id}'
))
print(f""Deleted unexpected MLSS tag '{mlss_tag}' for MLSS id '{mlss_id}'"")
if __name__ == '__main__':
parser = ArgumentParser(description='Repairs the requested MLSS tag(s).')
parser.add_argument('--url', required=True, help='URL to the Compara database')
parser.add_argument('--max_align', action='store_true', help='Fix the ""max_align"" MLSS tag')
parser.add_argument('--msa_mlss_id', action='store_true', help='Fix the ""msa_mlss_id"" MLSS tag')
args = parser.parse_args()
if args.max_align or args.msa_mlss_id:
compara_dbc = DBConnection(args.url)
if args.max_align:
repair_mlss_tag(compara_dbc, 'max_align')
if args.msa_mlss_id:
repair_mlss_tag(compara_dbc, 'msa_mlss_id')
else:
print('No repair option has been selected: Nothing to do')
","Python"
"Codon","Ensembl/ensembl-compara","scripts/taxonomy/check_ncbi_taxa_consistency.py",".py","2478","75","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Check consistency of Ensembl NCBI Taxonomy databases.""""""
import argparse
from collections import defaultdict
import subprocess
from sqlalchemy import create_engine, text
if __name__ == ""__main__"":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(""--release"", type=int, help=""Current Ensembl release."")
parser.add_argument(
""--hosts"", help=""Comma-separated list of hosts on which the NCBI Taxonomy database may be found.""
)
args = parser.parse_args()
db_name = f""ncbi_taxonomy_{args.release}""
hosts = args.hosts.split("","")
db_query = text(""""""SHOW DATABASES LIKE :db_name"""""")
ncbi_taxa_urls = []
for host in hosts:
cmd_args = [host, ""details"", ""url""]
output = subprocess.check_output(cmd_args, text=True)
host_url = output.rstrip()
engine = create_engine(host_url)
with engine.connect() as conn:
db_found = conn.execute(db_query, {""db_name"": db_name}).scalar_one_or_none()
if db_found:
ncbi_taxa_urls.append(f""{host_url}{db_name}"")
if not ncbi_taxa_urls:
raise RuntimeError(""no NCBI Taxonomy databases found"")
import_date_query = text(
""""""\
SELECT name FROM ncbi_taxa_name
WHERE name_class = 'import date'
""""""
)
dbs_by_import_date = defaultdict(list)
for ncbi_taxa_url in ncbi_taxa_urls:
engine = create_engine(ncbi_taxa_url)
with engine.connect() as conn:
import_date = conn.execute(import_date_query).scalar_one()
dbs_by_import_date[import_date].append(engine.url)
if len(dbs_by_import_date) > 1:
raise RuntimeError(
f""NCBI Taxonomy databases have inconsistent import dates: {sorted(dbs_by_import_date)}""
)
","Python"
"Codon","Ensembl/ensembl-compara","scripts/species_tree/reroot_newick.py",".py","1273","46","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Script to root a tree given an outgroup""""""
import argparse
import os
import sys
from ete3 import Tree
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--tree')
parser.add_argument('-o', '--outgroup')
opts = parser.parse_args(sys.argv[1:])
# check arguments
if not os.path.isfile(opts.tree):
sys.stderr.write(f""File {opts.tree} not found"")
sys.exit(1)
try:
opts.outgroup
except NameError:
sys.stderr.write(""Outgroup must be defined (--outgroup)"")
sys.exit(1)
t = Tree(opts.tree)
t.set_outgroup(opts.outgroup)
print(t.get_tree_root().write(format=5))
","Python"
"Codon","Ensembl/ensembl-compara","scripts/species_tree/unroot_newick.py",".py","1736","60","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Script to unroot a tree""""""
import argparse
import os
import sys
from ete3 import Tree
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--tree')
parser.add_argument('-bl', '--branch_lengths', action='store_true')
parser.add_argument('-v', '--verbose', action='store_true')
opts = parser.parse_args(sys.argv[1:])
if not os.path.isfile(opts.tree):
sys.stderr.write(f""File {opts.tree} not found"")
sys.exit(1)
t = Tree(opts.tree)
if opts.verbose:
orig_root = t.get_tree_root()
sys.stderr.write(""ORIGINAL TREE:\n"" + orig_root.write(format=9) + ""\n\n\n"")
# intial unroot
t.unroot()
# reroot by midpoint to force unrooting later
midpoint = t.get_midpoint_outgroup()
t.set_outgroup(midpoint)
if opts.verbose:
sys.stderr.write(""MIDPOINT ROOTING:\n"" + t.write(format=9) + ""\n\n\n"")
# final forced unrooting of tree to be absolutely sure
t.unroot()
if opts.verbose:
sys.stderr.write(""UNROOTED:\n"" + t.write(format=9) + ""\n\n\n"")
if opts.branch_lengths:
print(t.write(format=5))
else:
print(t.write(format=9))
","Python"
"Codon","Ensembl/ensembl-compara","scripts/dumps/bulk_genome_dump_from_core.py",".py","9858","323","#!/usr/bin/env python
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
Wrapper of dump_genome_from_core.pl to dump a list of FASTA files
""""""
import argparse
import subprocess
import os
import sys
import logging
import shlex
import yaml
def setup_logging():
""""""
Sets up logging configuration.
""""""
logging.basicConfig(
level=logging.INFO, format=""%(asctime)s - %(levelname)s - %(message)s""
)
def parse_arguments():
""""""
Parses command-line arguments.
Returns:
argparse.Namespace: The parsed arguments.
""""""
parser = argparse.ArgumentParser(
description=""Wrapper of dump_genome_from_core.pl to dump a list of FASTA files.""
)
parser.add_argument(""--yaml"", required=True, type=str, help=""YAML input file"")
parser.add_argument(""--output"", help=""Processed output directory"")
return parser.parse_args()
def subprocess_call(
command,
stdout_file=""/dev/null"",
stderr_file=""/dev/null"",
use_job_scheduler=False,
job_name=None,
):
""""""
Subprocess function to execute the given command line.
Args:
command (list): The command that the subprocess will execute.
stdout_file (str): Job scheduler standard output file (default: '/dev/null').
stderr_file (str): Job scheduler standard error file (default: '/dev/null').
use_job_scheduler (bool): If True, the command will be submitted to the Slurm job scheduler.
job_name (bool): If using the job scheduler, this sets the job name.
Returns:
str: The subprocess output or None otherwise.
Raises:
RuntimeError if the subprocess return code is nonzero.
""""""
if use_job_scheduler:
if not job_name:
job_name = os.path.basename(__file__)
command = [
""sbatch"",
""--time=1-00"",
""--mem-per-cpu=4gb"",
""--cpus-per-task=1"",
""--export=ALL"",
f""--output={stdout_file}"",
f""--error={stderr_file}"",
f""--job-name={job_name}"",
f""--wrap={shlex.join(command)}"",
]
logging.info(""Running: %s"", "" "".join(command))
with subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
) as process:
output, stderr = process.communicate()
if process.returncode != 0:
out = f""stdout={output}, stderr={stderr}""
logging.error(
""Command %s exited %d: %s"", "" "".join(command), process.returncode, out
)
raise RuntimeError(
f""Command {' '.join(command)} exited {process.returncode}: {out}""
)
logging.info(""Successfully ran: %s"", "" "".join(command))
return output.strip()
def download_file(
host,
port,
core_db,
fasta_filename,
stdout_file=""/dev/null"",
stderr_file=""/dev/null"",
genome_component="""",
mask=""soft"",
):
""""""
Download the FASTA file from the core DB using a PERL script `dump_genome_from_core.pl`.
Args:
host (str): The DB host name.
port (str): The port number.
core_db (str): The core_db name.
fasta_filename (str): The name given for the FASTA file.
stdout_file (str): Job scheduler standard output file (default: '/dev/null').
stderr_file (str): Job scheduler standard error file (default: '/dev/null').
genome_component (str): Optional subgenome component of polyploid genome.
mask (str): The mask format for the FASTA file.
Returns:
str: The output of the subprocess call to download the FASTA file.
Raises:
KeyError if environment variable ENSEMBL_ROOT_DIR is not set.
""""""
try:
ensembl_root_dir = os.environ[""ENSEMBL_ROOT_DIR""]
except KeyError:
logging.exception(""Environment variable ENSEMBL_ROOT_DIR not set"")
raise
script_dir = os.path.join(
ensembl_root_dir,
""ensembl-compara"",
""scripts"",
""dumps"",
)
script = ""dump_genome_from_core.pl""
try:
perl_call = [
""perl"",
os.path.join(script_dir, script),
""--core_db"",
core_db,
""--host"",
host,
""--port"",
str(port),
""--mask"",
mask,
""-user"",
""ensro"",
""--outfile"",
fasta_filename,
]
# Conditionally add the genome component argument
if genome_component:
perl_call += [""--genome-component"", genome_component]
logging.info(""perl_call=%s"", perl_call)
return subprocess_call(
command=perl_call,
use_job_scheduler=True,
job_name=f""{core_db}_{genome_component}"",
stderr_file=stderr_file,
stdout_file=stdout_file,
)
except Exception:
logging.exception(""An unexpected error occurred"")
raise
def query_coredb(host, core_db, query):
""""""
Get the correct meta production name.
Args:
host (str): The DB host name.
core_db (str): The core_db name.
query (str): The query to be executed.
Returns:
str: The output of the subprocess call to query the database.
""""""
mysql_call = [host, core_db, ""-N"", ""-e"", query]
return subprocess_call(command=mysql_call)
def parse_yaml(file):
""""""
YAML parser.
Args:
file (file object): The file object.
Returns:
list[dict]: A list of dictionaries, each with information
about a genome (or genome component) to be downloaded.
""""""
content = yaml.safe_load(file)
download_content = []
for data in content:
host = data[""host""]
port = data[""port""]
include_gca_number = data.get(""gca_number"", False)
for core_db in data[""core_db""]:
fasta_file_name = query_coredb(
host=host,
core_db=core_db,
query=""SELECT meta_value FROM meta WHERE meta_key='species.production_name';"",
)
if include_gca_number:
gca_number = query_coredb(
host=host,
core_db=core_db,
query=""SELECT meta_value FROM meta WHERE meta_key='assembly.accession';"",
)
gca_number = gca_number.replace(""."", ""v"").replace(""_"", """").lower()
fasta_file_name = f""{fasta_file_name}_{gca_number}""
# Query the genome components and split the result
genome_components = [
component
for component in query_coredb(
host=host,
core_db=core_db,
query=(
""SELECT DISTINCT value FROM seq_region_attrib ""
""JOIN attrib_type USING (attrib_type_id) ""
""WHERE attrib_type.code='genome_component';""
),
).split(""\n"")
if component
]
# Generate dump filenames
dump_filenames = (
[
(f""{fasta_file_name}_{component}"", component)
for component in genome_components
]
if genome_components
else [(fasta_file_name, """")]
)
# Collect download information
for filename, genome_component in dump_filenames:
logging.info(""fasta_file_name=%s"", filename)
logging.info(""genome_component=%s"", genome_component)
download_content.append(
{
""host"": host,
""port"": port,
""core_db"": core_db,
""genome_component"": genome_component,
""fasta_filename"": filename,
}
)
return download_content
def main():
""""""
Main function to parse arguments and handle the processing of a YAML file to dump a list of FASTA files.
""""""
setup_logging()
args = parse_arguments()
with open(args.yaml, mode=""r"", encoding=""utf-8"") as f:
if args.output is None:
args.output = os.path.dirname(os.path.realpath(f.name))
else:
args.output = os.path.abspath(args.output)
if not os.path.isdir(args.output):
logging.error(
""%s does not exist for output, please create it first"", args.output
)
sys.exit(1)
download_content = parse_yaml(file=f)
for content in download_content:
download_file(
host=content[""host""],
port=content[""port""],
core_db=content[""core_db""],
genome_component=content[""genome_component""],
fasta_filename=os.path.join(args.output, f""{content['fasta_filename']}.fa""),
stdout_file=os.path.join(args.output, f""{content['fasta_filename']}.out""),
stderr_file=os.path.join(args.output, f""{content['fasta_filename']}.err""),
)
if __name__ == ""__main__"":
main()
","Python"
"Codon","Ensembl/ensembl-compara","scripts/debug/Ortheus.py",".py","4503","72","#!/usr/bin/env python2
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Script to mimic Ortheus and return the output of a previous run""""""
import os
import subprocess
import sys
# Set here the expected inputs, which you can find in the Ortheus command line
species_tree = '(((((((327:0.0585842,457:0.0598399):0.0310988,(180:0.00823412,179:0.0124659):0.09194331):0.00755855,(((334:0.0638217,214:0.064933836):0.0036732,190:0.0702538):0.0105362,((((174:0.01004235,134:0.010315324):0.0109081,212:0.0204159):0.016895,213:0.0365203):0.0231325,155:0.0629363):0.016747061):0.02896091):0.00360012,108:0.1033556):0.000861311,((((((((221:0.00217461,210:0.00336539):0.00431221,150:0.00659779):0.00185473,209:0.00857453):0.00841622,60:0.0171585):0.00275526,199:0.0196199):0.0111247,(((222:0.00407792,317:0.00410208):0.00351067,(198:0.00206424,361:0.00219576):0.0052648):0.00408468,153:0.0116608):0.01847121):0.0177918,225:0.0540072):0.0276482537,206:0.07598296):0.0211581):0.0001,(((293:0.08132458,383:0.08146022):0.00503641,(((((((443:0.00217466,147:0.00243534):0.00869776,(224:0.00173498,456:0.00390502):0.00956724):0.0169365,(((342:0.00249523,337:0.00262477):0.0001,286:0.00189147):0.0031008,392:0.00583909):0.0225753):0.00548926,435:0.0321916):0.0376598,((((445:0.00277412,422:0.00333588):0.00495005,452:0.00838995):0.01316161,397:0.020255):9.06327e-06,449:0.018974):0.0433334):0.0108219,(211:0.046385437,394:0.0474265):0.028304):0.0001,407:0.0773769):0.00881201):0.00204002,(((434:0.0434658,429:0.04457849):0.01562144,(((379:0.000795536,135:0.00108446):0.000328819,372:0.000971181):9.97128e-05,285:0.00108401):0.0568382):0.00695254,(((387:0.008825,240:0.009225):0.00225289,(416:0.00238575,237:0.00274425):0.00852647):0.0378966,396:0.054702):0.0156824):0.0167101):0.0172095):0.00431128,98:0.1029919);'
species_list = ""60 98 98 108 108 108 134 134 135 135 135 147 153 174 179 180 190 190 198 211 212 222 237 285 285 285 317 327 327 334 334 361 372 372 372 379 379 379 387 394 396 407 407 407 416 429 434 434 434 435 443"".split()
pid = 85677
# This is where you have saved Ortheus' output from a previous run
ref_fasta_dir = '/path/to/worker_muffato_mammals_epo_with_ext_104.95260'
def read_file(filename):
""""""Helper method to read a whole file""""""
with open(filename, 'r') as fh:
return fh.read()
# A typical command-line ends with:
# -f <output_alignment_path> -g <output_tree_path>
output_tree_path = sys.argv[-1]
output_alignment_path = sys.argv[-3]
# Before that there should be:
# -s <semphy_path> -z <species_tree_newick> -A <species_1> <species_2> ... <species_N>
# And the values should match
assert sys.argv[-4-len(species_list):-4] == species_list
assert sys.argv[-4-len(species_list)-2] == species_tree
# A typical command-line starts with:
# python2 /path/to/Ortheus.py -l '#-j 0' -e <fasta_1> <fasta_2> .. <fasta_n>
# Check that the Fasta files are identical
for fasta_path in sys.argv[4:4+len(species_list)]:
new_file_content = read_file(fasta_path)
ref_file_content = read_file(os.path.join(ref_fasta_dir, os.path.basename(fasta_path)))
assert new_file_content == ref_file_content
# Copy some of the reference output files
subprocess.check_call(['cp', os.path.join(ref_fasta_dir, 'output.%d.mfa' % pid), output_alignment_path])
subprocess.check_call(['cp', os.path.join(ref_fasta_dir, 'output.score'), os.path.curdir])
# Extract the temp directory in which the data are expected
expected_tmpdir = os.path.dirname(sys.argv[4])
# And edit the reference tree file
with open(os.path.join(ref_fasta_dir, 'output.%d.tree' % pid), 'r') as fh:
tl = fh.readlines()
ref_tmpdir = os.path.dirname(tl[1].split()[0])
with open(output_tree_path, 'w') as fh:
# The first line is the tree and should remain the same
print >> fh, tl[0],
# The second line has paths, which have to be edited
print >> fh, tl[1].replace(ref_tmpdir, expected_tmpdir),
","Python"
"Codon","Ensembl/ensembl-compara","scripts/synteny/BuildSynteny.java",".java","12491","403","/*
* See the NOTICE file distributed with this work for additional information
* regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the ""License"");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an ""AS IS"" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.Vector;
import apollo.datamodel.FeaturePair;
import apollo.datamodel.SeqFeature;
import apollo.seq.io.GFFFile;
import apollo.util.QuickSort;
public class BuildSynteny {
public static void main (String[] args) {
Vector<FeaturePair> fset = new Vector<FeaturePair>();
if (args.length < 3 || args.length > 6) {
System.err.println(""Usage: BuildSynteny <gff file> <maxDist> <minSize> [orientFlag]"");
System.err.println(""Usage: BuildSynteny <gff file> <maxDist1> <minSize1> <maxDist2> <minSize2> [orientFlag]"");
System.exit(1);
}
int maxDist1 = Integer.parseInt(args[1]);
int minSize1 = Integer.parseInt(args[2]);
int maxDist2;
int minSize2;
int orientFlagIndex = 3;
if (args.length > 4) {
maxDist2 = Integer.parseInt(args[3]);
minSize2 = Integer.parseInt(args[4]);
orientFlagIndex += 2;
} else {
maxDist2 = maxDist1;
minSize2 = minSize1;
}
boolean orientFlag = true;
if (args.length > orientFlagIndex) {
if (args[orientFlagIndex].equals(""true"") || args[orientFlagIndex].equals(""1"")) {
orientFlag = true;
} else if (args[orientFlagIndex].equals(""false"") || args[orientFlagIndex].equals(""0"")) {
orientFlag = false;
} else {
System.err.println(""Error: arg "" + orientFlagIndex + "" not a boolean"");
System.err.println(""Usage: BuildSynteny <gff file> <maxDist> <minSize> [orientFlag]"");
System.err.println(""Usage: BuildSynteny <gff file> <maxDist1> <minSize1> <maxDist2> <minSize2> [orientFlag]"");
System.exit(1);
}
}
try {
GFFFile gff = new GFFFile(args[0],""File"");
for (int i = 0; i < gff.seqs.size(); i++) {
if (gff.seqs.elementAt(i) instanceof FeaturePair) {
fset.addElement((FeaturePair)gff.seqs.elementAt(i));
}
}
} catch (Exception e) {
System.out.println(""Exception "" + e);
}
groupLinks(fset, maxDist1, minSize1, maxDist2, minSize2, orientFlag);
System.exit(0);
}
public static void groupLinks (Vector<FeaturePair> fset, int maxDist1, int minSize1, int maxDist2, int minSize2, boolean orientFlag) {
Vector<FeaturePair> newfset = new Vector<FeaturePair>();
if ((maxDist1 == 0) || (maxDist2 == 0)) {
return;
}
// First sort the links by start coordinate on the query (main) species
long[] featStart = new long[fset.size()];
FeaturePair[] feat = new FeaturePair[fset.size()];
for (int i = 0; i < fset.size(); i++) {
FeaturePair sf = fset.get(i);
feat[i] = sf;
featStart[i] = sf.getLow();
}
QuickSort.sort(featStart,feat);
FeaturePair prev = null;
int minStart = 1000000000;
int minHStart = 1000000000;
int maxStart = -1;
int maxHStart = -1;
long forwardCount = 0;
long reverseCount = 0;
Vector<FeaturePair> featHolder = new Vector<FeaturePair>();
//=================================================================
// FIRST LOOP: group links. maxDist is twice original maxDist
// feat -> newfset
//=================================================================
for (int i= 0; i < feat.length; i++) {
// System.err.println(""Feature is "" + feat[i].getName() + "" "" + feat[i].getLow() + "" "" + feat[i].getHigh()
// + "" "" + feat[i].getHname() + "" "" + feat[i].getHlow() + "" "" + feat[i].getHhigh());
if (prev != null) {
// dist1 is the distance between this feature and the previous one on the query (main) species.
double dist1 = (1.0*Math.abs(feat[i].getLow() - prev.getLow()));
// dist2 is the distance between this feature and the previous one on the target (secondary) species.
double dist2 = (1.0*Math.abs(feat[i].getHlow() - prev.getHlow()));
// System.err.println(""Dist is "" + feat[i].getHname() + "" "" + dist1 + "" "" + dist2);
// We've reached the end of a block
if ((dist1 > maxDist1*2) || (dist2 > maxDist2*2) || !feat[i].getHname().equals(prev.getHname())) {
//if ((dist1 > maxDist*2) || (dist2 > maxDist*2)) {
double size1 = Math.abs(maxStart - minStart);
double size2 = Math.abs(maxHStart - minHStart);
// Is the block big enough to keep?
if (size1 > minSize1 && size2 > minSize2 && featHolder.size() > 1) {
SeqFeature sf1 = new SeqFeature(minStart,maxStart,prev.getFeatureType());
SeqFeature sf2 = new SeqFeature(minHStart,maxHStart,prev.getFeatureType());
sf1.setName(prev.getName());
sf2.setName(prev.getHname());
if (Math.abs(forwardCount-reverseCount) > 5) {
if (forwardCount > reverseCount) {
sf1.setStrand(1);
sf2.setStrand(1);
} else {
sf1.setStrand(-1);
sf2.setStrand(-1);
}
} else {
sf1.setStrand(prev.getHstrand());
sf2.setStrand(prev.getHstrand());
}
FeaturePair fp = new FeaturePair(sf1,sf2);
newfset.addElement(fp);
}
prev = null;
minStart = 1000000000;
minHStart = 1000000000;
maxStart = -1;
maxHStart = -1;
forwardCount = 0;
reverseCount = 0;
featHolder = new Vector<FeaturePair>();
// System.err.println(""Starting new block "" + feat[i].getName());
} else if (!feat[i].getHname().equals(prev.getHname())) {
System.err.println(""ERROR: Should have switched from "" + prev.getHname() + "" to "" + feat[i].getHname());
}
}
if (feat[i].getLow() < minStart) {
minStart = feat[i].getLow();
}
if (feat[i].getHlow() < minHStart) {
minHStart = feat[i].getHlow();
}
if (feat[i].getHigh() > maxStart) {
maxStart = feat[i].getHigh();
}
if (feat[i].getHhigh() > maxHStart) {
maxHStart = feat[i].getHhigh();
}
// System.err.println(""New region bounds "" + minStart + "" "" + maxStart + "" "" + minHStart + "" "" + maxHStart);
if (prev != null) {
if ((feat[i].getStart() - prev.getEnd())*(feat[i].getHstart() - prev.getHend()) < 0) {
reverseCount++;
} else {
forwardCount++;
}
}
// System.err.println(""minStart = "" + minStart + ""; minHStart = "" + minHStart + ""; maxStart = "" + maxStart + ""; maxHStart "" + maxHStart + "" fwdCnt = "" + forwardCount + "" rvsCnt = "" + reverseCount);
featHolder.addElement(feat[i]);
prev = feat[i];
}
double size1 = Math.abs(maxStart - minStart);
double size2 = Math.abs(maxHStart - minHStart);
if (size1 > minSize1 && size2 > minSize2 && feat.length > 0 && featHolder.size() > 1) {
SeqFeature sf1 = new SeqFeature(minStart,maxStart,feat[feat.length-1].getFeatureType());
SeqFeature sf2 = new SeqFeature(minHStart,maxHStart,feat[feat.length-1].getFeatureType());
sf1.setName(feat[feat.length-1].getName());
sf2.setName(feat[feat.length-1].getHname());
// System.err.println(""ForwardCount = "" + forwardCount + "" ReverseCount = "" + reverseCount);
if (forwardCount > 0 || reverseCount > 0) {
if (forwardCount > reverseCount) {
sf1.setStrand(1);
sf2.setStrand(1);
} else {
sf1.setStrand(-1);
sf2.setStrand(-1);
}
} else {
sf1.setStrand(feat[feat.length-1].getHstrand());
sf2.setStrand(feat[feat.length-1].getHstrand());
}
FeaturePair fp = new FeaturePair(sf1,sf2);
newfset.addElement(fp);
}
if (newfset.size() == 0) {
return;
}
//=================================================================
// SECOND LOOP: group previous groups. maxDist is 30x the original maxDist
// newfset -> tmpfset
//=================================================================
// System.err.println(""Grouping groups"");
Vector<FeaturePair> tmpfset = new Vector<FeaturePair>();
FeaturePair[] farr = newfset.toArray(new FeaturePair[newfset.size()]);
minStart = 1000000000;
minHStart = 1000000000;
maxStart = -1;
maxHStart = -1;
prev = null;
String curHname = null;
for (int i=0; i < newfset.size(); i++) {
FeaturePair fp = newfset.get(i);
// System.err.println(""Processing feature "" + fp.getHname() + "" "" +
// fp.getLow() + "" "" + fp.getHigh() + "" - "" +
// fp.getHlow() + "" "" + fp.getHhigh());
if (prev != null) {
// int internum = find_internum(fp,prev,farr);
int ori = fp.getHstrand() * prev.getHstrand();
double dist1 = Math.abs(fp.getLow() - prev.getHigh());
double dist2 = Math.abs(fp.getHlow() - prev.getHhigh());
if (fp.getHstrand() == -1) {
dist2 = Math.abs(fp.getHhigh() - prev.getHlow());
}
// System.err.println(""Distances "" + dist1 + "" "" + dist2 + "" "" + (Math.abs(dist1 - dist2)));
// System.err.println(""Pog "" + internum + "" "" + ori);
if (! curHname.equals(fp.getHname()) ||
dist1 > maxDist1*30 ||
dist2 > maxDist2*30 ||
find_internum(fp,prev,farr) > 2 || (orientFlag && ori == -1)) { // No ori check in old code
// System.err.println(""New block "" + Math.abs(dist1 - dist2) + "" "" + minStart + "" "" + prev.getHname());
SeqFeature sf1 = new SeqFeature(minStart ,maxStart ,""synten"");
SeqFeature sf2 = new SeqFeature(minHStart,maxHStart,""synten"");
sf1.setName(prev.getName());
sf2.setName(prev.getHname());
FeaturePair newfp = new FeaturePair(sf1,sf2);
// System.err.println(""Setting group strand "" + prev.getStrand());
newfp.setStrand(prev.getStrand());
tmpfset.addElement(newfp);
minStart = 1000000000;
minHStart = 1000000000;
maxStart = -1;
maxHStart = -1;
prev = null;
}
}
if (fp.getLow() < minStart) {
minStart = fp.getLow();
}
if (fp.getHlow() < minHStart) {
minHStart = fp.getHlow();
}
if (fp.getHigh() > maxStart) {
maxStart = fp.getHigh();
}
if (fp.getHhigh() > maxHStart) {
maxHStart = fp.getHhigh();
}
if (prev == null) {
curHname = fp.getHname();
}
prev = fp;
}
SeqFeature sf1 = new SeqFeature(minStart,maxStart,""synten"");
SeqFeature sf2 = new SeqFeature(minHStart,maxHStart,""synten"");
sf1.setName(prev.getName());
sf2.setName(prev.getHname());
FeaturePair newfp = new FeaturePair(sf1,sf2);
newfp.setStrand(prev.getStrand());
tmpfset.addElement(newfp);
for (int i=0; i < tmpfset.size(); i++) {
FeaturePair fp = tmpfset.get(i);
if (Math.abs(fp.getHigh() - fp.getLow()) > minSize1) {
System.out.println(fp.getName() + ""\tcluster\tsimilarity\t"" +
fp.getLow() + ""\t"" +
fp.getHigh() + ""\t100\t"" +
fp.getStrand() + ""\t.\t"" +
fp.getHname() + ""\t"" +
fp.getHlow() + ""\t"" +
fp.getHhigh());
}
}
return;
}
public static int find_internum(FeaturePair f1, FeaturePair prev, FeaturePair[] feat) {
long start = prev.getHhigh();
long end = f1.getHlow();
if (f1.getHlow() < prev.getHhigh()) {
start = prev.getHlow();
end = f1.getHhigh();
}
int count = 0;
//System.err.println(""Feature start end "" + start + "" "" + end);
if (f1.getHlow() < prev.getHhigh()) {
start = prev.getHlow();
end = f1.getHhigh();
}
for (int i = 0; i < feat.length; i++) {
FeaturePair fp = feat[i];
if (!(feat[i].getHlow() > end || feat[i].getHhigh() < start)) {
System.out.println(fp.getName() + ""\tinternum\tsimilarity\t"" +
fp.getLow() + ""\t"" +
fp.getHigh() + ""\t100\t"" +
fp.getStrand() + ""\t.\t"" +
fp.getHname() + ""\t"" +
fp.getHlow() + ""\t"" +
fp.getHhigh());
count++;
}
if (feat[i].getHlow() > end) {
return count;
}
}
return count;
}
}
","Java"
"Codon","Ensembl/ensembl-compara","scripts/synteny/apollo/seq/io/GFFFile.java",".java","5653","176","/* Jalview - a java multiple alignment editor
* Copyright (C) 1998 Michele Clamp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
* This is a trimmed version of Apollo's module that only supplieds the
* fields needed by the BuildSynteny program.
*/
package apollo.seq.io;
import java.lang.String;
import java.io.IOException;
import java.util.Vector;
import java.util.StringTokenizer;
import apollo.io.FileParse;
import apollo.datamodel.SeqFeature;
import apollo.datamodel.FeaturePair;
public class GFFFile extends FileParse {
int noSeqs;
int maxLength = 0;
public Vector<SeqFeature> seqs;
long start;
long end;
public GFFFile(String inFile, String type) throws IOException {
//Read in the file first
super(inFile,type);
seqs = new Vector<SeqFeature>();
//Read lines from file
System.err.println(""Reading Ensembl-style GFF file "" + inFile + ""...."");
start = System.currentTimeMillis();
readLines();
end = System.currentTimeMillis();
// System.err.println(""done"");
//System.err.println(""Total time taken = "" + (end-start) + ""ms"");
// System.err.println(""Parsing file...."");
start = System.currentTimeMillis();
parse();
}
public void parse() {
//System.err.println(""parse"");
for (int i = 0; i < lineArray.size(); i++) {
String line = lineArray.elementAt(i);
//System.err.println(""LINE "" + line);
if (line.indexOf(""#"") == -1 ) {
// SMJS Added delimiter argument to call. GFF has 8 tab delimited fields.
// The last field is a fairly free text format field, which needs
// special handling.
StringTokenizer st = new StringTokenizer(line,""\t"");
if (st.countTokens() >= 8) {
try {
String s = st.nextToken();
String type = st.nextToken();
String prim = st.nextToken();
int qstart = Integer.parseInt(st.nextToken());
int qend = Integer.parseInt(st.nextToken());
double score = 0;
try {
score = (Double.valueOf(st.nextToken())).doubleValue();
} catch (Exception e) {
System.err.println(""Error parsing score : "" + e);
}
String strand = st.nextToken();
String frame = st.nextToken();
String id = prim;
SeqFeature se = new SeqFeature(qstart,qend,id);
if (strand.equals(""-"")) {
se.setStrand(-1);
} else {
se.setStrand(1);
}
se.setFeatureType(type);
//se.setScore(score);
se.setName(s);
se.setId(s);
/*if (!(frame.equals("".""))) {
se.setPhase(Integer.parseInt(frame));
} else {
// se.setPhase(-1);
}*/
if (st.hasMoreTokens() && prim.equals(""similarity"")) {
try {
// SMJS Get remainder of string by setting
// delimiter to nothing ("""")
String htok = st.nextToken("""");
// System.err.println(""htok = "" + htok);
// SMJS Setup a new tokenizer which doesn't require tabs
StringTokenizer sth = new StringTokenizer(htok);
String hid = sth.nextToken();
int hstart = Integer.parseInt(sth.nextToken());
int hend = Integer.parseInt(sth.nextToken());
int hitStrand = 0;
SeqFeature f2 = null;
if (hstart < hend){
hitStrand = 1;
f2 = new SeqFeature(hstart, hend, hid, hitStrand);
}else{
hitStrand = -1;
f2 = new SeqFeature(hend, hstart, hid, hitStrand);
}//end if
f2.setName(hid);
f2.setId(hid);
se.setId(hid);
FeaturePair fp = new FeaturePair(se,f2);
seqs.addElement(fp);
} catch (Exception e) {
System.err.println(""Can't add line - "" + line + "" "" + e);
}
} else if (st.hasMoreTokens() && prim.equals(""exon"")) {
// SMJS Modified for BDGP GFF file (get rest of string
String hid = st.nextToken(""\t"");
se.setName(hid);
se.setId(hid);
// Try Id instead of name
seqs.addElement(se);
} else {
if (!(prim.equals(""intron"") ||
prim.equals(""sequence"") ||
prim.equals(""coding_exon""))) {
seqs.addElement(se);
}
}
} catch (NumberFormatException nfe) {
System.err.println(""NumberFormatException "" + nfe);
System.err.println(""ERROR: parsing line "" + line);
}
}
}
}
noSeqs = seqs.size();
}
}
","Java"
"Codon","Ensembl/ensembl-compara","scripts/synteny/apollo/datamodel/FeaturePair.java",".java","5251","213","
/*
* This is a trimmed version of Apollo's module that only supplieds the
* fields needed by the BuildSynteny program.
*/
package apollo.datamodel;
import java.util.*;
public class FeaturePair extends SeqFeature {
/** query feature, do we really need to store query separately, couldnt query
just be this and just the associated hit would be a separate SeqFeature?
Thats really the way to go. how hard a change would this be? */
SeqFeature query;
/** hit feature */
SeqFeature hit;
/** f1 is query feature, f2 is hit feature */
public FeaturePair(SeqFeature f1, SeqFeature f2) {
this.query = f1;
setHitFeature(f2);
}
public void setQueryFeature(SeqFeature feature) {
this.query = feature;
}
public SeqFeature getQueryFeature() {
return query;
}
public void setHitFeature(SeqFeature feature) {
this.hit = feature;
// needs access to cigar for on-demand parsing with getAlignment()
hit.setQueryFeature(this);
}
public SeqFeature getHitFeature() {
return hit;
}
/** from SeqFeature */
public boolean hasHitFeature() { return hit != null; }
public void setLow(int low) {
query.setLow(low);
}
public int getLow() {
return query.getLow();
}
public void setHigh(int high) {
query.setHigh(high);
}
public int getHigh() {
return query.getHigh();
}
public void setStart(int start) {
query.setStart(start);
}
public int getStart() {
return query.getStart();
}
public void setEnd(int end) {
query.setEnd(end);
}
public int getEnd() {
return query.getEnd();
}
public void setStrand(int strand) {
query.setStrand(strand);
}
public int getStrand() {
return query.getStrand();
}
public void setName(String name) {
query.setName(name);
}
public String getName() {
return query.getName();
}
public void setId(String id) {
query.setId(id);
}
public String getId() {
return query.getId();
}
public void setFeatureType(String type) {
query.setFeatureType(type);
}
public String getTopLevelType() {
return query.getTopLevelType();
}
// setBioType??
public String getFeatureType() {
return query.getFeatureType();
}
public String getHname() {
return hit.getName();
}
public void setHname(String name) {
hit.setName(name);
}
public int getHstart() {
return hit.getStart();
}
public void setHstart(int start) {
hit.setStart(start);
}
public int getHend() {
return hit.getEnd();
}
public void setHend(int end) {
hit.setEnd(end);
}
public void setHlow(int low) {
hit.setLow(low);
}
public int getHlow() {
return hit.getLow();
}
public void setHhigh(int high) {
hit.setHigh(high);
}
public int getHhigh() {
return hit.getHigh();
}
public void setHstrand(int strand) {
hit.setStrand(strand);
}
public int getHstrand() {
return hit.getStrand();
}
/** Gets the index into the hit strings explicitAlignment for a genomic position**/
public int getHitIndex(int genomicPosition) {
int index = 0;
if (isForwardStrand()) {
index = genomicPosition - query.getLow();
} else {
index = query.getHigh() - genomicPosition;
}
return index;
}
public int insertionsBefore(int hitIndex, String alignment) {
int count = 0;
String query =
alignment.substring(0, Math.min(alignment.length(), hitIndex+1));
int index = query.indexOf('-', 0);
while (index != -1) {
count++;
query = alignment.substring(0, Math.min(alignment.length(), hitIndex+count+1));
index = query.indexOf('-', index+1);
}
return count;
}
public Range getInsertionRange(int hitIndex, String alignment) {
int start = -1;
int end = -1;
for (int hi = hitIndex; hi >= 0 && alignment.charAt(hi) == '-'; hi--) {
start = hi;
}
for(int hi = hitIndex;
hi < alignment.length() && alignment.charAt(hi) == '-'; hi++) {
end = hi;
}
// end is exclusive to make substr easier to use.
if (start != -1) {
end++;
}
return new Range(start, end);
}
/*public static void main(String[] args) {
SeqFeature sf1 = new SeqFeature(100,200,""pog"",1);
SeqFeature sf2 = new SeqFeature(100,200,""pog"",-1);
sf1.setName(""query"");
sf2.setName(""hit"");
System.err.println(""Features "" + sf1);
System.err.println(""Features "" + sf2);
FeaturePair fp = new FeaturePair(sf1,sf2);
System.err.println(""Feature is "" + fp);
System.err.println(""Left/right overlaps "" + fp.getLeftOverlap(sf1) + "" "" + fp.getRightOverlap(sf1));
System.err.println(""Overlap "" + fp.isExactOverlap(sf1) + "" "" + fp.isExactOverlap(sf2));
//fp.invert();
System.err.println(""Feature is "" + fp);
System.err.println(""Overlap "" + fp.isExactOverlap(sf1) + "" "" + fp.isExactOverlap(sf2));
}*/
private boolean isEmptyOrNull(String s) {
if (s==null) return true;
return s.equals("""");
}
}
","Java"
"Codon","Ensembl/ensembl-compara","scripts/synteny/apollo/datamodel/SeqFeature.java",".java","4070","140","
/*
* This is a trimmed version of Apollo's module that only supplieds the
* fields needed by the BuildSynteny program.
*/
package apollo.datamodel;
import java.util.Hashtable;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Vector;
import java.util.Enumeration;
import apollo.util.QuickSort;
public class SeqFeature extends Range {
// -----------------------------------------------------------------------
// Class/static variables
// -----------------------------------------------------------------------
// -----------------------------------------------------------------------
// Instance variables
// -----------------------------------------------------------------------
protected String id;
protected String refId;
protected SeqFeature refFeature;
private SeqFeature analogousOppositeStrandFeature=null;
protected String biotype = null;
// Actually keep the default score outside the hash so we don't have to do
// a hash table lookup every time we want it!!!!
protected double score;
protected byte phase = 0;
/** When translating the offset for the stop codon in genome
coordinates needs to be adjusted to account for edits to
the mRNA that alter the relative position of the Stop codon
on the mRNA vs. the genome (e.g. from translational frame
shift, or genomic sequencing errors */
protected int edit_offset_adjust;
//ADDED by TAIR User object to be used by any data adapter that needs it
private Object userObject = null;
private String syntenyLinkInfo = null;
private SeqFeature cloneSource = null;
public SeqFeature() {
}
public SeqFeature(int low, int high, String type) {
init(low,high,type);
}
public SeqFeature(int low, int high, String type, int strand) {
init(low,high,type,strand);
}
private void init(int low, int high, String type) {
setLow (low);
setHigh (high);
setFeatureType (type);
}
private void init(int low, int high, String type, int strand) {
init(low,high,type);
setStrand(strand);
}
/** If biotype is null, returns type */
public String getTopLevelType() {
String retType;
retType = getFeatureType();
return retType;
}
public void setId(String id) {
this.id = id;
}
public String getId() {
return this.id;
}
/** FeatureSet overrides - merge with getNumberOfChildren */
public int size() { return 0; }
/** By default SeqFeature has no kids so returns -1 be default. */
public int getFeatureIndex(SeqFeature sf) {
return -1;
}
/** no-op. SeqFeatures with children should override(eg FeatureSet).
a non child bearing SeqFeature neednt do anything */
public void addFeature(SeqFeature child) {}
/** no-op - overridden by FeatureSet */
public void addFeature(SeqFeature feature, boolean sort){}
/**
* The number of descendants (direct and indirect) in this FeatureSet.
* This method should find each child, and invoke numChildFeatures for each
* child that is a FeatureSet, and add 1 to the count for all others.
* FeatureSet implementors should not count themselves, but only the
* leaf SeqFeature implementations.
* This should be renamed numDescendants. numChild can lead one to think its
* its just the kids and not further descendants.
* In fact there should be 2 methods: numDescendants, numChildren
*
* @return the number of features contained anywhere under this FeatureSet
*/
public int getNumberOfDescendents() {
return 0;
}
private SeqFeature queryFeature;
/** Query feats hold cigars. This gives hit feats access to query feat & its cigar */
public void setQueryFeature(SeqFeature queryFeat) {
this.queryFeature = queryFeat;
}
public int getHstart() { return getStart(); }
public int getHend () { return getEnd(); }
public int getHlow() { return getLow(); }
public int getHhigh() { return getHigh(); }
public int getHstrand() { return getStrand(); }
}
","Java"
"Codon","Ensembl/ensembl-compara","scripts/synteny/apollo/datamodel/Range.java",".java","5748","224","
/*
* This is a trimmed version of Apollo's module that only supplieds the
* fields needed by the BuildSynteny program.
*/
package apollo.datamodel;
public class Range {
// -----------------------------------------------------------------------
// Instance variables
// -----------------------------------------------------------------------
protected int low = -1;
protected int high = -1;
protected byte strand = 0;
protected String name = null;
protected String type = null;
public Range () {}
/** Range with NO_NAME name */
public Range(int start, int end) {
this(null,start,end);
}
public Range (String name, int start, int end) {
setName (name);
setStrand (start <= end ? 1 : -1);
setStart(start);
setEnd(end);
}
/** Returns true if same start,end,type and name. This could potentially
be changed to equals, theres implications there for hashing. A range
and its clone will be identical barring modifications. */
public boolean isIdentical(Range range) {
if (this == range)
return true;
// Features have to have same type,range, AND name
return (range.getFeatureType().equals(getFeatureType()) && sameRange(range) &&
range.getName().equals(getName()));
}
public void setName(String name) {
this.name = name;
/* if (name == null) {
throw new NullPointerException(""Range.setName: can't accept feature name of null. "" +
""Use Range.NO_NAME instead."");
} else if (!name.equals(""""))
this.name = name;*/
}
public String getName() {
return name;
}
public boolean hasName() {
return (name != null) && !name.equals("""");
}
/** getType is not the ""visual"" type,
ie the type one sees in the EvidencePanel.
getType returns the ""logical"" type(the type from the data).
These are the types in the squiggly brackets in the tiers
file that map to the visual type listed before the squigglies.
gui.scheme.FeatureProperty maps logical types
to visual types (convenience function in DetailInfo.getPropertyType) */
public String getFeatureType() {
return this.type;
}
public void setFeatureType(String type) {
if(type == null) {
throw new NullPointerException(""Range.setFeatureType: can't accept feature type of null. "" +
""Use SeqFeature.NO_TYPE or 'SeqFeature.NO_TYPE' instead."");
} else if (!type.equals(""""))
this.type = type;
}
public boolean hasFeatureType() {
return ! (getFeatureType() == null);
}
/** @return 1 for forward strand, -1 for reverse strand, 0 for strandless */
public int getStrand() {
return (int)this.strand;
}
/** Convenience method for getStrand() == 1 */
public boolean isForwardStrand() {
return getStrand() == 1;
}
public void setStrand(int strand) {
this.strand = (byte)strand;
}
public void setStart(int start) {
// check if strand is proper given start value?
if (getStrand() == -1) {
high = start;
} else {
low = start;
}
}
public int getStart() {
return (getStrand() == -1 ? high : low);
}
public void setEnd(int end) {
if (getStrand() == -1) {
low = end;
} else {
high = end;
}
}
public int getEnd() {
return (getStrand() == -1 ? low : high);
}
public int getLow() {
return this.low;
}
public void setLow(int low) {
// check if low < high - if not switch, and switch strand?
this.low = low;
}
public int getHigh() {
return this.high;
}
public void setHigh(int high) {
this.high = high;
}
public String getStartAsString() {
return String.valueOf(new Integer(getStart()));
}
public String getEndAsString() {
return String.valueOf(new Integer(getEnd()));
}
// These are all overlap methods
public int getLeftOverlap(Range sf) {
return (getLow() - sf.getLow());
}
public int getRightOverlap(Range sf) {
return (sf.getHigh() - getHigh());
}
public boolean isExactOverlap (Range sf) {
if (getLeftOverlap(sf) == 0 &&
getRightOverlap(sf) == 0 &&
getStrand() == sf.getStrand()) {
return true;
} else {
return false;
}
}
public boolean contains(Range sf) {
if (overlaps(sf) &&
getLeftOverlap(sf) <= 0 &&
getRightOverlap(sf) <= 0 &&
getStrand() == sf.getStrand()) {
return true;
} else {
return false;
}
}
public boolean contains(int position) {
return (position >= getLow() && position <= getHigh());
}
public boolean overlaps(Range sf) {
return (getLow() <= sf.getHigh() &&
getHigh() >= sf.getLow() &&
getStrand() == sf.getStrand());
}
/** Return true if start and end are equal */
public boolean sameRange(Range r) {
return getStart() == r.getStart() && getEnd() == r.getEnd();
}
public int length() {
return (getHigh() - getLow() + 1);
}
/** If SeqFeature is an instanceof FeatureSet and
FeatureSet.hasChildFeatures is true then true.
Basically convenience method that does the awkward instanceof for you. */
public boolean canHaveChildren() {
return false;
}
/** Return true if range has not been assigned high & low */
public boolean rangeIsUnassigned() {
return low == -1 && high == -1;
}
public void convertFromBaseOrientedToInterbase() {
--low;
}
public void convertFromInterbaseToBaseOriented() {
++low;
}
public String toString() {
return ""Range[name="" + name + "",type="" + type + "",low="" + low + "",high="" + high + "",strand="" + strand + ""]"";
}
}
","Java"
"Codon","Ensembl/ensembl-compara","scripts/synteny/apollo/io/FileParse.java",".java","4431","156","/* Jalview - a java multiple alignment editor
* Copyright (C) 1998 Michele Clamp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
* This is a trimmed version of Apollo's module that only supplieds the
* fields needed by the BuildSynteny program.
*/
package apollo.io;
import java.lang.String;
import java.util.StringTokenizer;
import java.util.Vector;
import java.io.File;
import java.io.IOException;
import java.io.DataInputStream;
import java.io.BufferedInputStream;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.InputStreamReader;
import java.net.URL;
import java.net.URLConnection;
import java.net.MalformedURLException;
public class FileParse {
public File inFile;
public int fileSize;
int bytes_read = 0;
public byte[] dataArray;
public Vector<String> lineArray;
public int noLines;
String inType;
URL url;
URLConnection urlconn;
public FileParse() {}
public FileParse(String fileStr, String type) throws MalformedURLException, IOException {
this.inType = type;
//System.err.println(""Input type = "" + type);
//System.err.println(""Input name = "" + fileStr);
if (type.equals(""File"")) {
this.inFile = new File(fileStr);
this.fileSize = (int)inFile.length();
// System.err.println(""File: "" + inFile);
// System.err.println(""Bytes: "" + fileSize);
}
if (type.equals(""URL"")) {
url = new URL(fileStr);
this.fileSize = 0;
urlconn = url.openConnection();
// printinfo(urlconn);
}
}
public void readLines(String inStr) {
StringTokenizer str = new StringTokenizer(inStr,""\n"");
lineArray = new Vector<String>();
while (str.hasMoreTokens()) {
lineArray.addElement(str.nextToken());
}
noLines = lineArray.size();
}
public void readLines() throws IOException {
String line;
this.lineArray = new Vector<String>();
BufferedReader dataIn;
if (inType.equals(""File"")) {
//Using a bis reduces the file reading time by about a factor of 3
dataIn = new BufferedReader(new InputStreamReader(new FileInputStream(inFile)));
} else {
dataIn = new BufferedReader(new InputStreamReader(urlconn.getInputStream()));
}
while ((line = dataIn.readLine()) != null) {
lineArray.addElement(line);
}
noLines = lineArray.size();
}
public Vector<String> splitLine(char splitChar, int element) {
Vector<String> wordVector = new Vector<String>();
String line = lineArray.elementAt(element);
char[] charArray = line.toCharArray();
int i = 0;
int letter = 0;
char[] word = new char[line.length()];
char prev_char = '\n';
//System.err.println(""\nBefore loop"");
// System.err.println(""line "" + line + ""\nsplitChar :"" + splitChar + "":"");
//System.err.println(line.length());
for (i = 0; i < line.length() ; i++ ) {
if (charArray[i] != splitChar) {
word[letter] = charArray[i];
prev_char = charArray[i];
letter++;
} else {
if ((prev_char != splitChar) && (prev_char != '\n')) {
wordVector.addElement(new String(word));
letter = 0;
word = null;
word = new char[line.length()];
prev_char = charArray[i];
// System.err.println(""word: "" + wordVector.lastElement() + "":"");
}
}
}
//Tack on the last word into the vector - unless we have an empty line
//or if we have a splitchar at the end of the line
if (line.length() != 0) {
if (charArray[line.length() - 1] != splitChar) {
wordVector.addElement(new String(word));
}
} else {
//returns null vector if empty line
return(null);
}
return(wordVector);
}
}
","Java"
"Codon","Ensembl/ensembl-compara","scripts/synteny/apollo/util/QuickSort.java",".java","4516","241","
/*
* This is a trimmed version of Apollo's module that only supplieds the
* fields needed by the BuildSynteny program.
*/
package apollo.util;
public class QuickSort {
public static void sort(double[] arr,Object[] s) {
doubleSort(arr,0,arr.length-1,s);
}
public static void sort(double[] arr,Object[] s, int len) {
doubleSort(arr,0,len-1,s);
}
public static void sort(float[] arr,Object[] s) {
sort(arr,0,arr.length-1,s);
}
public static void sort(long[] arr,Object[] s) {
longSort(arr,0,arr.length-1,s);
}
public static void sort(String[] arr,Object[] s) {
stringSort(arr,0,arr.length-1,s);
}
public static void sort(int[] arr,Object[] s) {
intSort(arr,0,arr.length-1,s);
}
public static void reverse(Object[] s) {
int length = s.length;
if(length>0) {
int middle;
if(length%2 >0)
middle = (length-1)/2;
else
middle = length / 2;
length--;
for(int i=0;i<middle;i++) {
Object tmp = s[i];
s[i] = s[length-i];
s[length-i] = tmp;
}
}
}
public static void stringSort(String[] arr,int p, int r,Object[] s) {
int q;
if (p < r) {
q = stringPartition(arr,p,r,s);
stringSort(arr,p,q,s);
stringSort(arr,q+1,r,s);
}
}
public static void intSort(int[] arr,int p,int r,Object[] s) {
int q;
if (p < r) {
q = intPartition(arr,p,r,s);
intSort(arr,p,q,s);
intSort(arr,q+1,r,s);
}
}
public static void longSort(long[] arr,int p, int r,Object[] s) {
int q;
if (p < r) {
q = longPartition(arr,p,r,s);
longSort(arr,p,q,s);
longSort(arr,q+1,r,s);
}
}
public static void sort(float[] arr,int p, int r,Object[] s) {
int q;
if (p < r) {
q = partition(arr,p,r,s);
sort(arr,p,q,s);
sort(arr,q+1,r,s);
}
}
public static void doubleSort(double[] arr,int p, int r,Object[] s) {
int q;
if (p < r) {
q = doublePartition(arr,p,r,s);
doubleSort(arr,p,q,s);
doubleSort(arr,q+1,r,s);
}
}
private static int doublePartition(double[] arr, int p, int r,Object[] s) {
double x = arr[p];
int i = p-1;
int j = r+1;
while(true) {
do {
j = j-1;
} while (arr[j] > x);
do {
i = i+1;
} while (arr[i] < x);
if ( i < j) {
double tmp = arr[i];
arr[i] = arr[j];
arr[j] = tmp;
Object tmp2 = s[i];
s[i] = s[j];
s[j] = tmp2;
} else {
return j;
}
}
}
private static int partition(float[] arr, int p, int r,Object[] s) {
float x = arr[p];
int i = p-1;
int j = r+1;
while(true) {
do {
j = j-1;
} while (arr[j] > x);
do {
i = i+1;
} while (arr[i] < x);
if ( i < j) {
float tmp = arr[i];
arr[i] = arr[j];
arr[j] = tmp;
Object tmp2 = s[i];
s[i] = s[j];
s[j] = tmp2;
} else {
return j;
}
}
}
private static int longPartition(long[] arr, int p, int r,Object[] s) {
float x = arr[p];
int i = p-1;
int j = r+1;
while(true) {
do {
j = j-1;
} while (arr[j] > x);
do {
i = i+1;
} while (arr[i] < x);
if ( i < j) {
long tmp = arr[i];
arr[i] = arr[j];
arr[j] = tmp;
Object tmp2 = s[i];
s[i] = s[j];
s[j] = tmp2;
} else {
return j;
}
}
}
private static int intPartition(int[] arr, int p, int r,Object[] s) {
int x = arr[p];
int i = p-1;
int j = r+1;
while(true) {
do {
j = j-1;
} while (arr[j] > x);
do {
i = i+1;
} while (arr[i] < x);
if ( i < j) {
int tmp = arr[i];
arr[i] = arr[j];
arr[j] = tmp;
Object tmp2 = s[i];
s[i] = s[j];
s[j] = tmp2;
} else {
return j;
}
}
}
private static int stringPartition(String[] arr, int p, int r,Object[] s) {
String x = arr[p];
int i = p-1;
int j = r+1;
while(true) {
do {
j = j-1;
} while (arr[j].compareTo(x) < 0);
do {
i = i+1;
} while (arr[i].compareTo(x) > 0);
if ( i < j) {
String tmp = arr[i];
arr[i] = arr[j];
arr[j] = tmp;
Object tmp2 = s[i];
s[i] = s[j];
s[j] = tmp2;
} else {
return j;
}
}
}
}
","Java"
"Codon","Ensembl/ensembl-compara","scripts/homology/plotLorentzCurve.r",".r","1536","43","#!/usr/bin/env Rscript
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#How to plot the Jaccard index:
#Rscript plotLorentzCurve.r <INPUT> <OUTPUT.pdf>
args = commandArgs(trailingOnly=TRUE)
if (length(args)==0) {
stop(""Missing arguments: Rscript plotLorentzCurve.r <INPUT> <OUTPUT.pdf>"", call.=FALSE)
}
library(ineq)
library(scales)
pdf(args[2])
A <- read.table(args[1],header=FALSE, sep=""\t"")
current<-as.numeric(A$V2)
previous<-as.numeric(A$V1)
lorentz_curve_current<-Lc(current, n = rep(1,length(current)), plot =F)
clorentz_curve_previous<-Lc(previous, n = rep(1,length(previous)), plot =F)
plot(clorentz_curve_previous, col=""red"",lty=1,lwd=3,main=""Lorenz Curve of cluster size distributions"",xlab=""percentage of clusters"", ylab=""percentage of cluster size "" )
lines(lorentz_curve_current,lty=1, lwd=3,col=""blue"")
legend(""topleft"", c( ""previous"", ""current"" ), lty=c(1,1), lwd=3, col=c(""red"", ""blue""))
dev.off()
","R"
"Codon","Ensembl/ensembl-compara","scripts/homology/plotGocData.r",".r","15583","317","#!/usr/bin/env Rscript
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Data can be exctracted from the database by running the generateGocBreakout.pl script:
# perl generateGocBreakout.pl -outdir /homes/mateus/goc -user ensro -database mateus_tuatara_86 -hostname mysql-treefam-prod:4401
#How to plot the data:
# Rscript plotGocData.r your_tree.newick /your/output_directory/ your_reference_species_file
args = commandArgs(trailingOnly = TRUE)
if (length(args) != 3) {
stop('Four arguments are required: tree_file out_dir reference_species_file')
}
tree_file = args[1]
out_dir = args[2]
reference_species_file = args[3] #File with one reference species per line
library(ape)
library(reshape2)
library(ggplot2)
heatmap.phylo = function(x_mat, tree_row, tree_col, filename, maintitle, ...) {
# x_mat: numeric matrix, with rows and columns labelled with species names
# tree_row: phylogenetic tree (class phylo) to be used in rows
# tree_col: phylogenetic tree (class phylo) to be used in columns
# filename: path to SVG for saving plot
# maintitle: title for plot
# ... additional arguments to be passed to image function
pdf(filename, width=10, height=10)
# The matrix needs to be re-ordered, to match the order of the tree tips.
tree_row_is_tip = tree_row$edge[,2] <= length(tree_row$tip)
tree_row_tip_index = tree_row$edge[tree_row_is_tip, 2]
tree_row_tip_names = tree_row$tip[tree_row_tip_index]
tree_col_is_tip = tree_col$edge[,2] <= length(tree_col$tip)
tree_col_tip_index = tree_col$edge[tree_col_is_tip, 2]
tree_col_tip_names = tree_col$tip[tree_col_tip_index]
x_mat = x_mat[tree_row_tip_names, tree_col_tip_names]
# Work out the axes limits, then set up a 3x3 grid for plotting
x_lim = c(0.5, ncol(x_mat)+0.5)
y_lim = c(0.5, nrow(x_mat)+0.5)
layout(matrix(c(0,1,2,3,4,5,0,6,0), nrow=3, byrow=TRUE), width=c(1,3,1.5), height=c(1,3,1.5))
# Plot tree downwards, at top of plot
par(mar=c(0,0,2,0))
plot(tree_col, direction='downwards', show.tip.label=FALSE, xaxs='i', x.lim=x_lim, main=maintitle)
# Add legend
plot(NA, axes=FALSE, ylab='', xlab='', ylim=c(0,1), xlim=c(0,1))
legend('center', c('10%', '20%', '30%', '40%', '50%', '60%', '70%', '80%', '90%', '100%'), ncol=2, ...)
# Plot tree on left side of plot
par(mar=rep(0,4))
plot(tree_row, direction='rightwards', show.tip.label=FALSE, yaxs='i', y.lim=y_lim)
# Plot heatmap
par(mar=rep(0,4), xpd=TRUE)
image((1:nrow(x_mat))-0.5, (1:ncol(x_mat))-0.5, x_mat, xaxs='i', yaxs='i', axes=FALSE, xlab='',ylab='', ...)
# Plot names on right side of plot
par(mar=rep(0,4))
plot(NA, axes=FALSE, ylab='', xlab='', yaxs='i', xlim=c(0,2), ylim=y_lim)
text(rep(0, nrow(x_mat)), 1:nrow(x_mat), gsub('_', ' ', tree_row_tip_names), pos=4)
# Plot names on bottom of plot
par(mar=rep(0,4))
plot(NA, axes=FALSE, ylab='', xlab='', xaxs='i', ylim=c(0,2), xlim=x_lim)
text(1:ncol(x_mat), rep(2,ncol(x_mat)), gsub('_', ' ', tree_col_tip_names), srt=90, pos=2, offset=0)
dev.off()
}
#-------------------------------------------------------------------------
# Barplots with topology
#-------------------------------------------------------------------------
barplot.phylo <- function(x_df, x_df_cols, x_df_labels, species, tree_row, filename, ...) {
# x_df: dataframe with columns name1 and name2 with species names
# x_df_cols: list of column names from the data frame to plot
# x_df_labels: list of labels to use in legend
# species: species name
# tree_row: phylogenetic tree (class phylo) to be used in rows
# filename: path to SVG for saving plot
# ... additional arguments to be passed to image function
pdf(filename, width=10, height=6)
# The dataframe needs to be re-ordered, to match the order of the tree tips.
tree_row_is_tip = tree_row$edge[,2] <= length(tree_row$tip)
tree_row_tip_index = tree_row$edge[tree_row_is_tip, 2]
tree_row_tip_names = tree_row$tip[tree_row_tip_index]
x_df = subset(x_df, name2 == species, select=c('name1', x_df_cols))
x_df = x_df[match(tree_row_tip_names, x_df$name1),]
x_df = subset(x_df, select=x_df_cols)
x_mat = data.matrix(x_df)
# Work out the axis limits, then set up a 2x3 grid for plotting
maintitle = paste('GOC score distribution for', gsub('_', ' ', species))
y_lim = c(0.5, nrow(x_df)+0.5)
layout(matrix(c(0,1,0,2,3,4),nrow=2, byrow=TRUE), width=c(1,3,1), height=c(0.2,3))
# Add title
par(mar=c(0,0,2,0))
plot(NA, axes=FALSE, main=maintitle, xlim=c(0,1), ylim=c(0,1))
# Plot tree on left side of plot
par(mar=c(2,0,0,0))
plot(tree_row, direction='rightwards', show.tip.label=FALSE, yaxs='i', y.lim=y_lim)
# Add legend
legend('topleft', x_df_labels, ...)
# Plot bar chart
par(mar=c(2,0,0,0))
barplot(t(x_mat/rowSums(x_mat)), horiz=TRUE, xaxs='i', yaxs='i', axisnames=FALSE, xlab='',ylab='', ...)
# Plot names on right side of plot
par(mar=c(2,0,0,0))
plot(NA, axes=FALSE, ylab='', xlab='', yaxs='i', xlim=c(0,2), ylim=y_lim)
text(rep(0, nrow(x_mat)), 1:nrow(x_mat), gsub('_', ' ', tree_row_tip_names), pos=4)
dev.off()
}
phylo_tree = read.tree(paste(out_dir, tree_file, sep='/'))
phylo_tree = ladderize(collapse.singles(phylo_tree), FALSE)
goc_summary = read.delim(paste(out_dir, ""heatmap.data"", sep='/'), sep=""\t"", header=TRUE, na.strings=c('NULL'))
goc_0_matrix = as.matrix(acast(goc_summary, name1~name2, value.var='goc_eq_0'))
goc_25_matrix = as.matrix(acast(goc_summary, name1~name2, value.var='goc_gte_25'))
goc_50_matrix = as.matrix(acast(goc_summary, name1~name2, value.var='goc_gte_50'))
goc_75_matrix = as.matrix(acast(goc_summary, name1~name2, value.var='goc_gte_75'))
goc_100_matrix = as.matrix(acast(goc_summary, name1~name2, value.var='goc_eq_100'))
n_goc_cols = c('n_goc_0', 'n_goc_25', 'n_goc_50', 'n_goc_75', 'n_goc_100')
n_goc_labels = c('GOC score = 0', 'GOC score = 25', 'GOC score = 50', 'GOC score = 75', 'GOC score = 100')
heatmap_col = rev(heat.colors(10))
barplot_col = rainbow(length(n_goc_cols))
#-------------------------------------------------------------------------
# Heatmaps
#-------------------------------------------------------------------------
heatmap.phylo(goc_0_matrix, phylo_tree, phylo_tree, paste(out_dir, 'goc_0.pdf', sep='/'), 'Percentage of orthologs with GOC score = 0', col=heatmap_col, fill=heatmap_col, border=heatmap_col)
heatmap.phylo(goc_25_matrix, phylo_tree, phylo_tree, paste(out_dir, 'goc_25.pdf', sep='/'), 'Percentage of orthologs with GOC score >= 25', col=heatmap_col, fill=heatmap_col, border=heatmap_col)
heatmap.phylo(goc_50_matrix, phylo_tree, phylo_tree, paste(out_dir, 'goc_50.pdf', sep='/'), 'Percentage of orthologs with GOC score >= 50', col=heatmap_col, fill=heatmap_col, border=heatmap_col)
heatmap.phylo(goc_75_matrix, phylo_tree, phylo_tree, paste(out_dir, 'goc_75.pdf', sep='/'), 'Percentage of orthologs with GOC score >= 75', col=heatmap_col, fill=heatmap_col, border=heatmap_col)
heatmap.phylo(goc_100_matrix, phylo_tree, phylo_tree, paste(out_dir, 'goc_100.pdf', sep='/'), 'Percentage of orthologs with GOC score = 100', col=heatmap_col, fill=heatmap_col, border=heatmap_col)
for (species in levels(goc_summary$name1)) {
filename = paste(out_dir, paste('goc_', species, '.pdf', sep=''), sep='/')
barplot.phylo(goc_summary, n_goc_cols, n_goc_labels, species, phylo_tree, filename, fill=barplot_col, col=barplot_col)
}
#-------------------------------------------------------------------------
# Barplots sorted by GOC scores
#-------------------------------------------------------------------------
# Gene count
#---------------------------------------------------------------------------------------------------
pdf(paste(out_dir, 'gene_count.pdf', sep='/'),width=6,height=4,paper='special')
num_of_genes_dat = read.delim(paste(out_dir, 'gene_count.data', sep='/'), sep=""\t"", header=TRUE, na.strings=c('NULL'))
num_of_genes_plot <- melt(num_of_genes_dat, id.vars='species')
ggplot(num_of_genes_plot, aes(x=species, y=value)) + geom_bar(stat='identity') + facet_grid(.~variable) + coord_flip() + labs(x='',y='') + theme(text = element_text(size=5)) + theme(axis.text.x = element_text(size=rel(0.4)))
# Orthologues count
#---------------------------------------------------------------------------------------------------
pdf(paste(out_dir, 'number_of_orthologues.pdf', sep='/'),width=6,height=4,paper='special')
num_of_orthologues_dat = read.delim(paste(out_dir, 'homology.data', sep='/'), sep=""\t"", header=TRUE, na.strings=c('NULL'))
num_of_orthologues_plot <- melt(num_of_orthologues_dat, id.vars='species')
options(scipen=10000)
ggplot(num_of_orthologues_plot, aes(x=species, y=value)) + geom_bar(stat='identity') + facet_grid(.~variable) + coord_flip() + labs(x='',y='') + theme(text = element_text(size=5)) + theme(axis.text.x = element_text(size=rel(0.8)))
# References above 100
#---------------------------------------------------------------------------------------------------
reference_species = read.delim(reference_species_file, sep=""\n"", header=FALSE, na.strings=c('NULL'))
file_name = paste(out_dir, 'ordered_goc_100_refernces.pdf', sep='/')
pdf(file_name,width=6,height=4,paper='special')
for (ref_species in levels(reference_species$V1)) {
raw_data = read.delim( paste(paste(out_dir,ref_species,sep='/'), ""_ref.dat"", sep=''), header = TRUE, sep = "";"")
names(raw_data) <- c(""Species"", ""Proportion"", ""GOC"", ""taxon"")
raw_data$GOC = as.factor(sapply(raw_data$GOC , function(x){as.numeric(strsplit(as.character(x), split = ""X_"")[[1]][2])}))
# Capitalize and replace the underscore with a space
raw_data$Species = as.factor(sapply(raw_data$Species, function(x){paste(toupper(substring(x, 1,1)), chartr(old=""_"", new="" "", substring(x, 2)), sep="""")}))
x = raw_data[raw_data$GOC == ""100"",]
s = x$Proportion
#y = raw_data[raw_data$GOC == ""75"",]
#z = raw_data[raw_data$GOC == ""50"",]
#s = x$Proportion + y$Proportion + z$Proportion
species_list = x[rev(order(s)),]$Species
taxon_list = x[rev(order(s)),]$taxon
sorted_species_list = rev(species_list)
sorted_taxon_list = rev(taxon_list)
list = c(""Crocodylia"" = ""chartreuse4""
, ""Aves"" = ""blue""
, ""Squamata"" = ""darkorange2""
, ""Mammalia"" = ""red""
, ""Neopterygii"" = ""darkcyan""
, ""Testudines"" = ""black""
, ""Amphibia"" = ""deeppink"")
raw_data$Species = factor(raw_data$Species, levels = sorted_species_list)
raw_data$Taxonomy = sapply(raw_data$taxon, function(x){attributes(list[list == x])[[1]]})
if (ref_species == ""homo_sapiens"") {
ref_species = ""human""
} else if (ref_species == ""sphenodon_punctatus"") {
ref_species = ""tuatara""
}
graph_title = paste(""GOC score distribution (reference: "", ref_species, ""), ordered by GOC=100"",sep='')
#graph_title = paste(""GOC score distribution (reference: "", ref_species, ""), ordered by GOC>=50"",sep='')
print (ggplot(data = raw_data, aes(x = Species, y = Proportion, fill = GOC, colour = Taxonomy))
+ geom_bar(stat=""identity"", size = 0)
+ coord_flip()
+ theme(axis.text.y = element_text(colour = as.character(sorted_taxon_list)) , axis.text=element_text(size=7))
+ ggtitle(graph_title) + theme(plot.title = element_text(size = 7, face = ""bold""))
+ guides(colour = guide_legend(override.aes = list(size=1)))
+ scale_colour_manual(values = list)
)
}
# References above threshold with splits
#---------------------------------------------------------------------------------------------------
file_name = paste(out_dir, 'above_with_splits_references.pdf', sep='/')
pdf(file_name,width=6,height=4,paper='special')
for (ref_species in levels(reference_species$V1)) {
raw_data = read.delim( paste(paste(out_dir,ref_species,sep='/'), ""_above_with_splits.dat"", sep=''), header = TRUE, sep = "";"")
x <- raw_data[raw_data$threshold == ""above"",]
species_list <- x[rev(order(x$goc)),]$species
taxon_list <- x[rev(order(x$goc)),]$taxon
sorted_species_list <- rev(species_list)
sorted_taxon_list <- rev(taxon_list)
list <- c(""Crocodylia"" = ""chartreuse4""
, ""Aves"" = ""blue""
, ""Squamata"" = ""darkorange2""
, ""Mammalia"" = ""red""
, ""Neopterygii"" = ""darkcyan""
, ""Testudines"" = ""black""
, ""Amphibia"" = ""deeppink"")
raw_data$species <- factor(raw_data$species, levels = sorted_species_list)
raw_data$taxonomy <- sapply(raw_data$taxon, function(x){attributes(list[list == x])[[1]]})
if (ref_species == ""homo_sapiens"") {
ref_species = ""human""
} else if (ref_species == ""sphenodon_punctatus"") {
ref_species = ""tuatara""
}
graph_title = paste(""GOC scores above and under 50, reference: "",ref_species,sep='')
print (ggplot(data = raw_data, aes(x = species, y = goc, fill = threshold, colour = taxonomy))
+ geom_bar(stat=""identity"", size = 0) + coord_flip()
+ theme(axis.text.y = element_text(colour = as.character(sorted_taxon_list)) , axis.text=element_text(size=7))
+ ggtitle(graph_title) + theme(plot.title = element_text(size = 7, face = ""bold""))
+ guides(colour = guide_legend(override.aes = list(size=1)))
+ scale_colour_manual(values = list)
)
}
# References above threshold without splits
#---------------------------------------------------------------------------------------------------
file_name = paste(out_dir, 'above_threshold_references.pdf', sep='/')
pdf(file_name,width=6,height=4,paper='special')
for (ref_species in levels(reference_species$V1)) {
raw_data = read.delim( paste(paste(out_dir,ref_species,sep='/'), ""_ref_above_threshold.dat"", sep=''), header = TRUE, sep = "";"")
species_list <- raw_data[rev(order(raw_data$perc_orth_above_goc_thresh)),]$species
taxon_list <- raw_data[rev(order(raw_data$perc_orth_above_goc_thresh)),]$taxon
sorted_species_list <- rev(species_list)
sorted_taxon_list <- rev(taxon_list)
raw_data$species <- factor(raw_data$species, levels = sorted_species_list)
graph_title = paste(""Number of GOC>=50, reference: "",ref_species,sep='')
print(ggplot(data = raw_data[,c(1:2)], aes(x = species, y = perc_orth_above_goc_thresh, fill = perc_orth_above_goc_thresh))
+ geom_bar(stat=""identity"") + coord_flip()
+ theme(axis.text.y = element_text(colour = c(sorted_taxon_list)), axis.text=element_text(size=7))
+ ggtitle(""Number of GOC>=50: Tuatara as reference"") + theme(plot.title = element_text(size = 7, face = ""bold""))
)
}
","R"
"Codon","Ensembl/ensembl-compara","scripts/homology/prep_qfo_proteomes.py",".py","12052","280","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Prepare QfO reference proteome files for processing.""""""
import argparse
import json
import logging
import os
from pathlib import Path
import re
import string
from tarfile import TarFile
from tempfile import TemporaryDirectory
from typing import Dict, Union
from Bio import SeqIO
import pandas as pd
def generate_production_name(species_name: str) -> str:
""""""Returns a production name generated from the given species name.""""""
unqualified_species_name = re.sub(r""\(.+$"", """", species_name).rstrip()
underscored_species_name = re.sub(r""\s+"", ""_"", unqualified_species_name)
lowercased_species_name = underscored_species_name.lower()
return re.sub(""[^a-z0-9_]"", """", lowercased_species_name)
def parse_qfo_proteome_table(qfo_readme_file: Union[Path, str]) -> pd.DataFrame:
""""""Parse table in QfO reference proteome README file.
Args:
qfo_readme_file: Input QfO reference proteome README file.
Returns:
A pandas DataFrame containing QfO reference proteome metadata.
""""""
# This dict holds key meta-info about the format of the table in the QfO README file. By default, the
# table format meta-info of the most recent known release is used, so it should be possible to use this
# script on subsequent releases, provided that the table format has not changed since the most recent
# release included here. If there have been changes to the table format, you may need to update this dict
# with the new header line and updated sanitised column names. (Why sanitise the column names? Keeping
# them consistent, valid as Python identifiers etc. makes it easier to handle the proteome metadata.)
release_meta = {
""2015_to_2020"": {
""header"": ""Proteome_ID Tax_ID OSCODE #(1) #(2) #(3) Species Name"",
""columns"": [
""proteome_id"",
""tax_id"",
""oscode"",
""num_canonical"",
""num_additional"",
""num_gene2acc"",
""species_name""
],
""releases"": [""2015_04"", ""2016_04"", ""2017_04"", ""2018_04"", ""2019_04""]
},
""2020_to_date"": {
""header"": ""Proteome_ID Tax_ID OSCODE SUPERREGNUM #(1) #(2) #(3) Species_Name"",
""columns"": [
""proteome_id"",
""tax_id"",
""oscode"",
""superregnum"",
""num_canonical"",
""num_additional"",
""num_gene2acc"",
""species_name""
],
""releases"": [""2020_04"", ""2021_03"", ""2022_02""]
},
}
release_to_header: Dict = {}
release_to_columns: Dict = {}
for meta in release_meta.values():
release_to_header.update(dict.fromkeys(meta[""releases""], meta[""header""]))
release_to_columns.update(dict.fromkeys(meta[""releases""], meta[""columns""]))
# Release '2020_04' README contains a table with
# post-2020 columns underneath a pre-2020 header.
release_to_header[""2020_04""] = release_meta[""2015_to_2020""][""header""]
release_re = re.compile(r""Release (?P<release>[0-9]{4}_[0-9]{2}), [0-9]{2}-[A-Z][a-z]+-[0-9]{4}"")
release = None
table_lines = []
with open(qfo_readme_file) as file_obj:
exp_header_line = None
reading_table = False
for line in file_obj:
line = line.rstrip(""\n"")
if not reading_table:
release_line_match = release_re.fullmatch(line)
if release_line_match:
release = release_line_match[""release""]
try:
exp_header_line = release_to_header[release]
except KeyError:
release = max(release_to_header.keys())
exp_header_line = release_to_header[release]
elif exp_header_line and line == exp_header_line:
reading_table = True
continue
else:
if not line:
break
table_lines.append(line)
if not table_lines:
raise RuntimeError(f""failed to extract QfO proteome table from '{qfo_readme_file}'"")
max_split = len(release_to_columns[release]) - 1
rows = [x.split(maxsplit=max_split) for x in table_lines]
proteome_meta = pd.DataFrame(rows, columns=release_to_columns[release])
prod_names = proteome_meta[""species_name""].apply(generate_production_name)
if prod_names.duplicated().any():
dup_prod_names = set(prod_names[prod_names.duplicated()])
raise ValueError(f""duplicate species production name(s): {','.join(dup_prod_names)}"")
proteome_meta[""production_name""] = prod_names
# With consistently ordered proteome metadata,
# it will be easier to track meaningful changes.
proteome_meta.sort_values(by=""production_name"", inplace=True)
return proteome_meta
if __name__ == ""__main__"":
parser = argparse.ArgumentParser(description=""Prepare QfO reference proteome files for processing."")
parser.add_argument(""qfo_archive"",
help=""Input QfO archive file."")
parser.add_argument(""meta_file"",
help=""Output JSON file of proteome metadata."")
parser.add_argument(""output_dir"",
help=""Directory to which proteome data will be output."")
parser.add_argument(""--disallow-ambiguity-codes"", action=""store_true"",
help=""Filter out CDS FASTA records containing symbols""
"" other than 'A', 'C', 'G', 'T' or 'N'."")
parser.add_argument(""--skip-invalid-cds"", action=""store_true"",
help=""Skip CDS FASTA records with invalid DNA sequence."")
parser.add_argument(""--stats-file"", metavar=""PATH"",
help=""Output TSV file of proteome prep stats."")
args = parser.parse_args()
valid_cds_symbols = set(""ABCDGHKMNRSTVWY""
""abcdghkmnrstvwy"")
strict_cds_symbols = set(""ACGTN""
""acgtn"")
valid_aa_symbols = set(string.ascii_letters)
with TarFile.open(args.qfo_archive) as tar_file, TemporaryDirectory() as tmp_dir:
tar_file.extract(""README"", tmp_dir)
readme_file = os.path.join(tmp_dir, ""README"")
uniprot_meta = parse_qfo_proteome_table(readme_file)
fa_name_to_tar_info = {}
for tar_info in tar_file.getmembers():
if tar_info.isfile() and tar_info.name.endswith("".fasta""):
fa_name = os.path.basename(tar_info.name)
fa_name_to_tar_info[fa_name] = tar_info
out_dir = os.path.abspath(args.output_dir)
os.makedirs(out_dir, exist_ok=True)
prep_stats = []
source_meta = []
for row in uniprot_meta.itertuples():
exp_cds_fa_name = f""{row.proteome_id}_{row.tax_id}_DNA.fasta""
cds_member = fa_name_to_tar_info[exp_cds_fa_name]
exp_prot_fa_name = f""{row.proteome_id}_{row.tax_id}.fasta""
prot_member = fa_name_to_tar_info[exp_prot_fa_name]
tar_file.extractall(tmp_dir, [cds_member, prot_member])
in_cds_file_path = os.path.join(tmp_dir, cds_member.name)
out_cds_file_path = os.path.join(out_dir, f""{row.tax_id}_{row.production_name}.cds.fasta"")
cds_ids = set()
num_part_ambig_cds = 0
num_invalid_cds = 0
skipped_ambig_cds_ids = set()
skipped_invalid_cds_ids = set()
with open(in_cds_file_path) as in_file_obj, open(out_cds_file_path, ""w"") as out_file_obj:
for rec in SeqIO.parse(in_file_obj, ""fasta""):
db_name, uniq_id, entry_name = rec.id.split(""|"")
seq_symbols = set(str(rec.seq))
issues = []
action = ""keeping""
if (seq_symbols & valid_cds_symbols) - strict_cds_symbols:
if args.disallow_ambiguity_codes:
skipped_ambig_cds_ids.add(uniq_id)
issues.append(""disallowed ambiguity codes"")
action = ""skipping""
num_part_ambig_cds += 1
if seq_symbols - valid_cds_symbols:
if args.skip_invalid_cds:
skipped_invalid_cds_ids.add(uniq_id)
issues.append(""invalid sequence"")
action = ""skipping""
num_invalid_cds += 1
if issues:
logging.warning(""FASTA record '%s' in '%s' has %s, %s"",
rec.id, cds_member.name, "" and "".join(issues), action)
if action == ""skipping"":
continue
SeqIO.write([rec], out_file_obj, ""fasta"")
cds_ids.add(uniq_id)
skipped_cds_ids = skipped_ambig_cds_ids | skipped_invalid_cds_ids
os.remove(in_cds_file_path)
in_prot_file_path = os.path.join(tmp_dir, prot_member.name)
out_prot_file_path = os.path.join(out_dir, f""{row.tax_id}_{row.production_name}.prot.fasta"")
num_canonical = 0
num_without_cds = 0
num_prepped = 0
with open(in_prot_file_path) as in_file_obj, open(out_prot_file_path, ""w"") as out_file_obj:
for rec in SeqIO.parse(in_file_obj, ""fasta""):
db_name, uniq_id, entry_name = rec.id.split(""|"")
if not set(str(rec.seq)) <= valid_aa_symbols:
raise ValueError(
f""FASTA record '{rec.id}' in '{prot_member.name}' has invalid AA sequence"")
if uniq_id in cds_ids:
SeqIO.write([rec], out_file_obj, ""fasta"")
num_prepped += 1
elif uniq_id not in skipped_cds_ids:
logging.warning(""FASTA record '%s' in '%s' has no CDS, skipping"",
rec.id, cds_member.name)
num_without_cds += 1
num_canonical += 1
os.remove(in_prot_file_path)
source_meta.append({
""production_name"": row.production_name,
""taxonomy_id"": int(row.tax_id),
""cds_fasta"": out_cds_file_path,
""prot_fasta"": out_prot_file_path,
""source"": ""uniprot""
})
prep_stats.append({
""production_name"": row.production_name,
""num_canonical"": num_canonical,
""num_part_ambig_cds"": num_part_ambig_cds,
""num_invalid_cds"": num_invalid_cds,
""num_skipped_cds"": len(skipped_cds_ids),
""num_without_cds"": num_without_cds,
""num_prepped"": num_prepped
})
with open(args.meta_file, ""w"") as out_file_obj:
json.dump(source_meta, out_file_obj, indent=4)
if args.stats_file:
stats_df = pd.DataFrame(prep_stats)
stats_df.sort_values(by=""production_name"", inplace=True)
stats_df.to_csv(args.stats_file, sep=""\t"", index=False)
","Python"
"Codon","Ensembl/ensembl-compara","scripts/homology/plotJaccardIndex.r",".r","1321","33","#!/usr/bin/env Rscript
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#How to plot the Jaccard index:
#Rscript plotJaccardIndex.r <INPUT> <OUTPUT.pdf>
args = commandArgs(trailingOnly=TRUE)
if (length(args)==0) {
stop(""Missing arguments: Rscript plotJaccardIndex.r <INPUT> <OUTPUT.pdf>"", call.=FALSE)
}
library(ggplot2)
pdf(args[2])
ji_vertebrate = read.delim(args[1], sep=""\t"", header=FALSE)
ggplot(ji_vertebrate, aes(ji_vertebrate$V2)) + geom_density() + geom_vline(aes(xintercept=0)) + xlim(0, 1.25) + theme(legend.text=element_text(size=10)) + theme(axis.text.x=element_text(size=10),axis.text.y=element_text(size=10),axis.title.x=element_text(size=10),axis.title.y=element_text(size=10))
dev.off()
","R"
"Codon","Ensembl/ensembl-compara","scripts/pipeline/symlink_prev_dump.py",".py","6329","145","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Create symlinks to previously dumped Compara data files.""""""
import argparse
import json
import os
from pathlib import Path
import shutil
from tempfile import TemporaryDirectory
if __name__ == ""__main__"":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
""--curr_ftp_dump_root"", required=True, help=""Main dump root directory of the current Ensembl release.""
)
parser.add_argument(
""--prev_ftp_dump_root"", required=True, help=""Main dump root directory of previous Ensembl release.""
)
parser.add_argument(
""--curr_ftp_pub_root"", required=True, help=""FTP publication root directory of current release.""
)
parser.add_argument(
""--prev_ftp_pub_root"", required=True, help=""FTP publication root directory of previous release.""
)
parser.add_argument(
""--mlss_path_type"", required=True, choices=[""archive"", ""directory""], help=""MLSS path type.""
)
parser.add_argument(
""--mlss_path"", required=True, help=""MLSS dump path relative to the main dump root directory.""
)
parser.add_argument(""--mlss_id"", required=True, help=""MLSS ID of dumped data."")
parser.add_argument(""--dataflow_file"", required=True, help=""Output JSON file to dataflow missing MLSSes."")
args = parser.parse_args()
curr_ftp_dump_root = Path(args.curr_ftp_dump_root)
prev_ftp_dump_root = Path(args.prev_ftp_dump_root)
curr_ftp_pub_root = Path(args.curr_ftp_pub_root)
prev_ftp_pub_root = Path(args.prev_ftp_pub_root)
mlss_path = Path(args.mlss_path)
dataflow_file = Path(args.dataflow_file)
known_standin_mlss_ids = [""ANCESTRAL_ALLELES""]
try:
mlss_id = int(args.mlss_id)
except ValueError as exc:
if args.mlss_id in known_standin_mlss_ids:
mlss_id = args.mlss_id
else:
raise ValueError(f""invalid MLSS ID: {args.mlss_id}"") from exc
if not curr_ftp_dump_root.is_absolute():
raise ValueError(
f""value of --curr_ftp_dump_root must be an absolute path,""
f"" but appears to be relative: {str(curr_ftp_dump_root)!r}""
)
if not prev_ftp_dump_root.is_absolute():
raise ValueError(
f""value of --prev_ftp_dump_root must be an absolute path,""
f"" but appears to be relative: {str(prev_ftp_dump_root)!r}""
)
if not curr_ftp_pub_root.is_absolute():
raise ValueError(
f""value of --curr_ftp_pub_root must be an absolute path,""
f"" but appears to be relative: {str(curr_ftp_pub_root)!r}""
)
if not prev_ftp_pub_root.is_absolute():
raise ValueError(
f""value of --prev_ftp_pub_root must be an absolute path,""
f"" but appears to be relative: {str(prev_ftp_pub_root)!r}""
)
if args.mlss_path_type == ""archive"":
root_to_mlss_dir_path = mlss_path.parent
path_spec = f""{mlss_path.name}*""
elif args.mlss_path_type == ""directory"":
root_to_mlss_dir_path = mlss_path
path_spec = ""*""
else:
raise ValueError(f""unknown MLSS path type: {args.mlss_path_type}"")
prev_mlss_dir_path = prev_ftp_dump_root / root_to_mlss_dir_path
prev_mlss_file_paths = list(prev_mlss_dir_path.glob(path_spec))
if args.mlss_path_type == ""archive"" and len(prev_mlss_file_paths) > 1:
raise RuntimeError(
f""path spec {path_spec!r} matches multiple archive files in directory {str(prev_mlss_dir_path)!r}""
)
dataflow_events = []
if prev_mlss_file_paths:
curr_to_prev_root_path = Path(os.path.relpath(prev_ftp_pub_root, start=curr_ftp_pub_root))
curr_mlss_dir_path = curr_ftp_dump_root / root_to_mlss_dir_path
mlss_dir_to_root_path = os.path.relpath(curr_ftp_dump_root, start=curr_mlss_dir_path)
symlink_pairs = []
for prev_mlss_file_path in prev_mlss_file_paths:
root_to_mlss_file_path = root_to_mlss_dir_path / prev_mlss_file_path.name
new_symlink_target = mlss_dir_to_root_path / curr_to_prev_root_path / root_to_mlss_file_path
if prev_mlss_file_path.is_symlink():
prev_symlink_target = Path(os.readlink(prev_mlss_file_path))
if not prev_symlink_target.is_absolute():
new_symlink_target = Path(
os.path.normpath(new_symlink_target.parent / prev_symlink_target)
)
curr_mlss_file_path = curr_ftp_dump_root / root_to_mlss_file_path
symlink_pairs.append((new_symlink_target, curr_mlss_file_path))
with TemporaryDirectory(dir=curr_mlss_dir_path, prefix="".symlink_tmp_"") as tmp_dir:
for new_symlink_target, curr_mlss_file_path in symlink_pairs:
tmp_mlss_file_path = os.path.join(tmp_dir, curr_mlss_file_path.name)
os.symlink(new_symlink_target, tmp_mlss_file_path)
shutil.move(tmp_mlss_file_path, curr_mlss_file_path)
else:
# We cannot currently dataflow standin MLSS IDs.
if mlss_id in known_standin_mlss_ids:
raise RuntimeError(
f""cannot symlink {mlss_id} data - file not found in""
f"" previous release dump {str(prev_ftp_dump_root)!r}""
)
dataflow_branch = 2
dataflow_json = json.dumps({""missing_mlss_id"": args.mlss_id})
dataflow_events.append(f""{dataflow_branch} {dataflow_json}"")
os.makedirs(dataflow_file.parent, mode=0o775, exist_ok=True)
with open(dataflow_file, ""w"") as out_file_obj:
for dataflow_event in dataflow_events:
print(dataflow_event, file=out_file_obj)
","Python"
"Codon","Ensembl/ensembl-compara","scripts/pipeline/xmlschema_validate.py",".py","2042","57","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Script to validate XML data file against the given schema.
Unknown options are ignored.
""""""
import argparse
import sys
from warnings import warn
import xmlschema
if __name__ == ""__main__"":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(""--schema"", dest=""schema_file"", metavar=""schema_file"", required=True,
help=""Schema file against which to validate."")
parser.add_argument(""data_file"", help=""XML data file to be validated against the schema."")
known_args, other_args = parser.parse_known_args()
schema = xmlschema.XMLSchema(known_args.schema_file)
max_attempts = 3
default_recursion_limit = sys.getrecursionlimit()
for attempt in range(1, max_attempts + 1):
# A few trees hit the default recursion limit, so
# bump it up if we are retrying after a RecursionError.
curr_recursion_limit = attempt * default_recursion_limit
sys.setrecursionlimit(curr_recursion_limit)
try:
schema.validate(known_args.data_file)
except RecursionError as exc:
if attempt < max_attempts:
warn(
f""XML schema validation hit recursion limit {curr_recursion_limit}""
f"" in attempt {attempt}, retrying""
)
continue
raise exc
","Python"
"Codon","Ensembl/ensembl-compara","scripts/pipeline/add_hmm_lib.py",".py","7662","196","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Add HMM library archive to flat-file dump.""""""
import argparse
import os
from pathlib import Path
import subprocess
import shutil
from tempfile import TemporaryDirectory
if __name__ == ""__main__"":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
""--curr_ftp_dump_root"",
required=True,
help=""Main dump root directory of the current Ensembl release."",
)
parser.add_argument(
""--prev_ftp_dump_root"",
required=True,
help=""Main dump root directory of previous Ensembl release."",
)
parser.add_argument(
""--curr_ftp_pub_root"",
required=True,
help=""FTP publication root directory of current release."",
)
parser.add_argument(
""--prev_ftp_pub_root"",
required=True,
help=""FTP publication root directory of previous release."",
)
parser.add_argument(
""--hmm_library_basedir"",
required=True,
help=""Path of HMM library base directory."",
)
parser.add_argument(
""--ref_tar_path_templ"",
required=True,
help=""Template of reference HMM library tar archive path."",
)
parser.add_argument(
""--tar_dir_path"",
required=True,
help=""Path of directory containing archive, relative to the main dump root directory."",
)
args = parser.parse_args()
curr_ftp_dump_root = Path(args.curr_ftp_dump_root)
prev_ftp_dump_root = Path(args.prev_ftp_dump_root)
curr_ftp_pub_root = Path(args.curr_ftp_pub_root)
prev_ftp_pub_root = Path(args.prev_ftp_pub_root)
hmm_library_basedir = Path(args.hmm_library_basedir)
tar_dir_path = Path(args.tar_dir_path)
library_name = hmm_library_basedir.name
ref_tar_path = Path(args.ref_tar_path_templ % library_name)
ref_tar_md5sum_path = ref_tar_path.with_suffix("".gz.md5sum"")
if not curr_ftp_dump_root.is_absolute():
raise ValueError(
f""value of --curr_ftp_dump_root must be an absolute path,""
f"" but appears to be relative: {str(curr_ftp_dump_root)!r}""
)
if not prev_ftp_dump_root.is_absolute():
raise ValueError(
f""value of --prev_ftp_dump_root must be an absolute path,""
f"" but appears to be relative: {str(prev_ftp_dump_root)!r}""
)
if not curr_ftp_pub_root.is_absolute():
raise ValueError(
f""value of --curr_ftp_pub_root must be an absolute path,""
f"" but appears to be relative: {str(curr_ftp_pub_root)!r}""
)
if not prev_ftp_pub_root.is_absolute():
raise ValueError(
f""value of --prev_ftp_pub_root must be an absolute path,""
f"" but appears to be relative: {str(prev_ftp_pub_root)!r}""
)
if tar_dir_path.is_absolute():
raise ValueError(
f""value of --tar_dir_path must be a relative path,""
f"" but appears to be absolute: {str(tar_dir_path)!r}""
)
with TemporaryDirectory() as tmp_dir:
tmp_dir_path = Path(tmp_dir)
tar_file_confirmed_ok = False
if ref_tar_path.is_file():
md5sum_check_cmd_args = [""md5sum"", ""--check"", str(ref_tar_md5sum_path)]
try:
output = subprocess.check_output(
md5sum_check_cmd_args, cwd=ref_tar_md5sum_path.parent, encoding=""utf-8"", text=True
)
except subprocess.CalledProcessError:
pass
else:
output = output.rstrip()
if output == f""{ref_tar_path.name}: OK"":
tar_file_confirmed_ok = True
if not tar_file_confirmed_ok:
tmp_ref_tar_path = tmp_dir_path / ref_tar_path.name
tmp_ref_tar_md5sum_path = tmp_dir_path / f""{ref_tar_path.name}.md5sum""
tar_czf_cmd_args = [
""tar"",
""czf"",
str(tmp_ref_tar_path),
""-C"",
str(hmm_library_basedir.parent),
library_name,
]
subprocess.run(tar_czf_cmd_args, check=True)
gzip_test_cmd_args = [
""gzip"",
""--test"",
str(tmp_ref_tar_path),
]
subprocess.run(gzip_test_cmd_args, check=True)
md5sum_gen_cmd_args = [
""md5sum"",
tmp_ref_tar_path.name,
]
with open(tmp_ref_tar_md5sum_path, mode=""w"", encoding=""utf-8"") as out_file_obj:
subprocess.run(
md5sum_gen_cmd_args, stdout=out_file_obj, cwd=tmp_dir_path, encoding=""utf-8"", check=True
)
shutil.move(tmp_ref_tar_path, ref_tar_path)
shutil.move(tmp_ref_tar_md5sum_path, ref_tar_md5sum_path)
prev_compara_dir_path = prev_ftp_dump_root / tar_dir_path
prev_hmm_tar_file_path = None
if prev_compara_dir_path.is_dir():
path_spec = ""multi_division_hmm_lib*.tar.gz""
prev_hmm_tar_file_paths = list(prev_compara_dir_path.glob(path_spec))
if len(prev_hmm_tar_file_paths) == 1:
prev_hmm_tar_file_path = prev_hmm_tar_file_paths[0]
elif len(prev_hmm_tar_file_paths) > 1:
raise RuntimeError(
f""path spec {path_spec!r} matches multiple archive""
f"" files in directory {str(prev_compara_dir_path)!r}""
)
curr_compara_dir_path = curr_ftp_dump_root / tar_dir_path
curr_hmm_tar_file_path = curr_compara_dir_path / ref_tar_path.name
os.makedirs(curr_compara_dir_path, mode=0o775, exist_ok=True)
if prev_hmm_tar_file_path and (
ref_tar_path.name == prev_hmm_tar_file_path.name # pylint: disable=consider-using-in
# HMM lib files will retain the library name from e114 onwards,
# so from e115, it should be safe to delete the following line.
or prev_hmm_tar_file_path.name == ""multi_division_hmm_lib.tar.gz""
):
compara_dir_to_root_path = os.path.relpath(curr_ftp_dump_root, start=curr_compara_dir_path)
curr_to_prev_root_path = Path(os.path.relpath(prev_ftp_pub_root, start=curr_ftp_pub_root))
new_symlink_target = (
compara_dir_to_root_path / curr_to_prev_root_path / tar_dir_path / prev_hmm_tar_file_path.name
)
if prev_hmm_tar_file_path.is_symlink():
prev_symlink_target = Path(os.readlink(prev_hmm_tar_file_path))
if not prev_symlink_target.is_absolute():
new_symlink_target = Path(os.path.normpath(new_symlink_target.parent / prev_symlink_target))
with TemporaryDirectory(dir=curr_compara_dir_path, prefix="".symlink_tmp_"") as tmp_dir:
tmp_hmm_tar_file_path = os.path.join(tmp_dir, curr_hmm_tar_file_path.name)
os.symlink(new_symlink_target, tmp_hmm_tar_file_path)
shutil.move(tmp_hmm_tar_file_path, curr_hmm_tar_file_path)
else:
shutil.copyfile(ref_tar_path, curr_hmm_tar_file_path)
","Python"
"Codon","Ensembl/ensembl-compara","scripts/pipeline/time_pipeline.py",".py","6711","185","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""""" Script to calculate real time duration of a pipeline """"""
import sys
import re
import argparse
from typing import Any, Union
from datetime import datetime, timedelta
from sqlalchemy import create_engine, text
def die_with_help() -> None:
"""""" print helptext and exit """"""
helptext = """"""
time_pipeline.py -url <database_url> [options]
-url | --database_url URL for pipeline database
-a | --analyses_pattern include only some analyses (format: ""1"", ""1..10"", ""1,2,3"")
-l | --analyses_list file containing list of logic_names
-g | --gap_list print list summary of gaps found
-h | --help this help menu
""""""
die_with_message(helptext)
def die_with_message(message: str) -> None:
"""""" print message and exit """"""
print(message)
sys.exit(1)
def formulate_condition(analyses_pattern: str, analyses_list: str) -> str:
"""""" formulate WHERE SQL condition from analyses_pattern """"""
condition = ''
if analyses_pattern:
a_range = re.match(r'(\d+)\.\.(\d+)', analyses_pattern)
if a_range:
condition = f"" WHERE analysis_id BETWEEN {a_range.group(1)} AND {a_range.group(2)}""
else:
condition = f"" WHERE analysis_id IN ({analyses_pattern})""
elif analyses_list:
try:
with open(analyses_list) as f:
logic_names = [f""'{x.strip()}'"" for x in f.readlines()]
if len(logic_names) < 1:
die_with_message(f""File '{analyses_list}' is empty"")
condition = f"" WHERE logic_name IN ({','.join(logic_names)})""
except FileNotFoundError:
die_with_message(f""Cannot find analyses_list file: {analyses_list}"")
return condition
def parse_args(argv: list) -> argparse.Namespace:
"""""" parse the command-line arguments """"""
parser = argparse.ArgumentParser()
parser.add_argument('-url', '--database_url')
parser.add_argument('-a', '--analyses_pattern')
parser.add_argument('-l', '--analyses_list')
parser.add_argument('-g', '--gap_list', action='store_true', default=False)
opts = parser.parse_args(argv[1:])
if not opts.database_url:
die_with_help()
if opts.analyses_pattern and opts.analyses_list:
print(""--analyses_pattern and --analyses_list are mutually exclusive\n"")
die_with_help()
return opts
def main(opts: argparse.Namespace) -> None:
"""""" main """"""
# figure out analyses_pattern
condition = formulate_condition(opts.analyses_pattern, opts.analyses_list)
# set up db connection and fetch role data
engine = create_engine(opts.database_url, future=True)
connection = engine.connect()
sql = ""SELECT role_id, logic_name, when_started, when_finished FROM role""
sql += "" JOIN analysis_base USING(analysis_id)""
sql += condition + "" ORDER BY role_id""
result = connection.execute(text(sql))
# loop through roles and find runtime gaps
runtime_gaps = []
mins15 = timedelta(minutes=15)
prev_role = {}
now = datetime.now()
pipeline_start = '' # type: Union[Any, datetime]
pipeline_total_runtime = timedelta()
for result_mapping in result.mappings():
role = dict(result_mapping)
# Initalize start/finish times
if pipeline_start == '':
pipeline_start = role['when_started']
prev_role = role
# Skip this if the pipeline is stil running
elif prev_role['when_finished'] is not None:
# Gap detection
if role['when_started'] > prev_role['when_finished']:
this_gap = role['when_started'] - prev_role['when_finished']
if this_gap > mins15:
gap_desc = {
'role_id_a': prev_role['role_id'],
'analysis_a': prev_role['logic_name'],
'role_id_b': role['role_id'],
'analysis_b': role['logic_name'],
'gap': this_gap
}
runtime_gaps.append(gap_desc)
if (role['when_finished'] is None) or (role['when_finished'] > prev_role['when_finished']):
prev_role = dict(role)
pipeline_total_runtime += (role['when_finished'] or now) - role['when_started']
if pipeline_start == '':
print(""Pipeline hasn't started yet !"")
sys.exit(1)
# get overall timings
pipeline_finish = prev_role['when_finished']
pipeline_gross_time = (pipeline_finish or now) - pipeline_start
gaps_total = timedelta(minutes=0)
for gap in runtime_gaps:
gaps_total += gap['gap']
pipeline_net_time = pipeline_gross_time - gaps_total
average_running_jobs = pipeline_total_runtime.total_seconds() / pipeline_gross_time.total_seconds()
# print summaries
print(""\nPipeline duration summary:"")
if pipeline_finish:
print(f""\t- began at {pipeline_start} and ended at {pipeline_finish}"")
else:
print(f""\t- began at {pipeline_start} and still running"")
print(f""\t- {pipeline_gross_time} including runtime gaps"")
print(f""\t- {pipeline_net_time} excluding runtime gaps"")
print(f""\t- {pipeline_total_runtime} total runtime"")
print(f""\t- {average_running_jobs:.1f} running jobs on average"")
print(f""\t- {len(runtime_gaps)} gaps detected, totalling {gaps_total}"")
if opts.gap_list:
print_gaps(runtime_gaps)
print()
def print_gaps(runtime_gaps: list) -> None:
""""""print the runtime gaps identified above""""""
print(""\nGaps list:"")
for gap in runtime_gaps:
analysis_str = ''
if gap['analysis_a'] == gap['analysis_b']:
analysis_str = f""during {gap['analysis_a']}""
else:
analysis_str = f""between {gap['analysis_a']} and {gap['analysis_b']}""
print(
f""\t- {gap['gap']} between role_ids {gap['role_id_a']} and {gap['role_id_b']} ({analysis_str})""
)
if __name__ == ""__main__"":
main(parse_args(sys.argv))
","Python"
"Codon","Ensembl/ensembl-compara","scripts/pipeline/symlink_fasta.py",".py","2565","67","#!/usr/bin/env python3
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an ""AS IS"" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""Script to create and optionally cleanup symlinks in a central location""""""
import argparse
import glob
import os
import re
import sys
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--cleanup_symlinks', action='store_true')
parser.add_argument('-s', '--symlink_dir')
group = parser.add_mutually_exclusive_group(required=True)
# Only one of target_dir or target_file can be specified
group.add_argument('-d', '--target_dir')
group.add_argument('-t', '--target_file')
opts = parser.parse_args(sys.argv[1:])
target_dir = opts.target_dir
symlink_dir = opts.symlink_dir
target_file = opts.target_file
# Create symlink directory if doesn't exist
Path(symlink_dir).mkdir(parents=True, exist_ok=True)
# Clean up the broken symlinks - these should be due to genome retirement
if opts.cleanup_symlinks:
for link in glob.glob(os.path.join(symlink_dir, '**/*.fasta'), recursive=True):
if not os.path.exists(os.readlink(link)):
print(f'Broken symlink: {link} to be removed')
os.remove(link)
# Collect all the genome fasta files and symlink them
if target_dir:
for fasta_file in glob.glob(os.path.join(target_dir, '**/*.fasta'), recursive=True):
# Skip split fasta files
if re.search(r'split\b', fasta_file):
continue
file_name = os.path.basename(fasta_file)
symlink_path = os.path.join(symlink_dir, file_name)
if not os.path.exists(symlink_path):
print(f'New symlink: {symlink_path} created for target: {fasta_file}')
os.symlink(fasta_file, symlink_path)
else:
file_prefix = os.path.basename(target_file)
symlink_path = os.path.join(symlink_dir, file_prefix)
if not os.path.exists(symlink_path):
print(f'New symlink: {symlink_path} created for target: {target_file}')
os.symlink(target_file, symlink_path)
","Python"