tRNA / full.sh
QPromaQ's picture
Upload 2 files
309abfe verified
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
# ==========================================================
# ARAGORN pipeline for genomes listed in ftp_urls.txt
# Adds: GC content + normalized 4-mers + TypePerAcc to NDJSON
# Keeps original file operations and structure
# ==========================================================
# --- Configuration ---
THREADS=16
FTP_LIST="ftp_urls.txt"
GENOMES_DIR="genomes"
OUTDIR="aragorn_out"
LOGDIR="logs"
TMPDIR_BASE="tmp_aragorn"
LINES_DIR="json_lines" # per-genome NDJSON lines (new)
AGG_JSON="FEATURES_ALL.ndjson" # merged NDJSON (new)
# --- Setup ---
mkdir -p "$GENOMES_DIR" "$OUTDIR" "$LOGDIR" "$TMPDIR_BASE" "$LINES_DIR"
: > "$AGG_JSON"
# --- Helpers: feature calculators ---
# 1) GC content
gc_content() {
local fasta="$1"
awk '
BEGIN{A=0;C=0;G=0;T=0}
/^>/ {next}
{
for(i=1;i<=length($0);i++){
b=toupper(substr($0,i,1))
if(b=="U") b="T"
if(b=="A") A++
else if(b=="C") C++
else if(b=="G") G++
else if(b=="T") T++
}
}
END{
total=A+C+G+T
gc=(total>0)?(G+C)/total:0
printf("gc_fraction\t%.6f\n", gc)
printf("gc_percent\t%.3f\n", gc*100)
printf("length_acgt\t%d\n", total)
printf("count_A\t%d\ncount_C\t%d\ncount_G\t%d\ncount_T\t%d\n", A,C,G,T)
}' "$fasta"
}
# 2) Normalized 4-mers (256 combos), freq = count / sum(L_i-3)
tetra_norm() {
local fasta="$1"
awk '
function count_seq(seq){
n=length(seq)
if(n<4) return
for(i=1;i<=n-3;i++){
k=substr(seq,i,4)
if(k ~ /^[ACGT]{4}$/) cnt[k]++
}
win_total += (n-3)
}
BEGIN{
split("A C G T", B, " ")
for(i1=1;i1<=4;i1++)
for(i2=1;i2<=4;i2++)
for(i3=1;i3<=4;i3++)
for(i4=1;i4<=4;i4++){
k=B[i1] B[i2] B[i3] B[i4]
cnt[k]=0
}
win_total=0
}
/^>/{
if(seq!=""){ count_seq(seq) }
seq=""
next
}
{
s=toupper($0)
gsub(/U/,"T",s)
gsub(/[^ACGT]/,"N",s)
seq=seq s
}
END{
if(seq!=""){ count_seq(seq) }
denom = (win_total>0)?win_total:1
split("A C G T", B, " ")
for(i1=1;i1<=4;i1++)
for(i2=1;i2<=4;i2++)
for(i3=1;i3<=4;i3++)
for(i4=1;i4<=4;i4++){
k=B[i1] B[i2] B[i3] B[i4]
freq = cnt[k]/denom
printf("%s\t%.8f\n", k, freq)
}
printf("windows_total\t%d\n", win_total)
}' "$fasta"
}
# 3) Type Per Acc from ARAGORN output (like your example)
trna_type_per_acc() {
local aragorn_txt="$1" acc="$2"
awk -v ACC="$acc" '
BEGIN{ IGNORECASE=1 }
{
line=$0
if(line ~ /tRNA/){
aa=""
if(match(line, /tRNA-([A-Za-z]+)/, m)){ aa=m[1] }
else if(match(line, /^[[:space:]]*([A-Za-z]{3})[[:space:]]*\(/, m)){ aa=m[1] }
ac=""
if(match(line, /\(([A-Za-z]{3})\)/, n)){ ac=n[1] }
if(ac!=""){
gsub(/u/,"T",ac); gsub(/U/,"T",ac); ac=toupper(ac); gsub(/[^ACGT]/,"N",ac)
}
if(aa!=""){ aa=tolower(aa); aa=toupper(substr(aa,1,1)) substr(aa,2,2) }
if(aa!="" && ac!=""){
key = ACC "_genome_" aa "_" ac
counts[key]++
}
}
}
END{ for(k in counts) printf("%d %s\n", counts[k], k) }
' "$aragorn_txt" | sort -k1,1nr -k2,2
}
# 4) Build one NDJSON line into json_lines/<ACC>.ndjson
emit_ndjson_line() {
local acc="$1" gc_fp="$2" tetra_fp="$3" tpa_fp="$4" out_line_fp="$5"
{
printf '{'
printf '"acc":"%s",' "$acc"
printf '"gc":{'
awk '
$1=="gc_fraction"{printf("\"fraction\":%s", $2); next}
$1=="gc_percent"{printf(",\"percent\":%s", $2); next}
$1=="length_acgt"{printf(",\"length\":%s", $2); next}
$1=="count_A"{printf(",\"A\":%s", $2); next}
$1=="count_C"{printf(",\"C\":%s", $2); next}
$1=="count_G"{printf(",\"G\":%s", $2); next}
$1=="count_T"{printf(",\"T\":%s", $2); next}
' "$gc_fp"
printf '},'
printf '"tetra_norm":{'
awk '
BEGIN{first=1}
{
if($1=="windows_total"){ wt=$2; next }
if(!first) printf(",")
printf("\"%s\":%s", $1, $2)
first=0
}
END{
if(!first) printf(",")
printf("\"windows_total\":%s", (wt ? wt : 0))
}
' "$tetra_fp"
printf '},'
printf '"trna_type_per_acc":['
if [ -s "$tpa_fp" ]; then
awk '
BEGIN{first=1}
{
if(!first) printf(",")
printf("{\"count\":%d,\"label\":\"%s\"}", $1, $2)
first=0
}' "$tpa_fp"
fi
printf ']'
printf '}\n'
} > "$out_line_fp"
}
# --- Function for processing one genome (kept as in your script, extended) ---
process_one() {
url="$1"
acc=$(basename "$url" | sed 's/_genomic.*//')
tmpdir="$TMPDIR_BASE/${acc}_tmp"
mkdir -p "$tmpdir"
cd "$tmpdir" || exit 1
echo "[INFO] Processing $acc" >&2
# Download genome
wget -q -O "${acc}.fna.gz" "$url"
if [ ! -s "${acc}.fna.gz" ]; then
echo "[ERROR] Failed to download $acc" >&2
rm -rf "$tmpdir"
return
fi
# Unpack
gunzip -f "${acc}.fna.gz"
fasta=$(ls *.fna *.fa *.fasta 2>/dev/null | head -n 1)
if [ ! -f "$fasta" ]; then
echo "[ERROR] No FASTA found for $acc after extraction" >&2
rm -rf "$tmpdir"
return
fi
# Run ARAGORN (unchanged)
out_file="../../$OUTDIR/${acc}.txt"
log_file="../../$LOGDIR/${acc}.log"
echo "[RUN] ARAGORN on $acc" >&2
aragorn -t -l -gc1 -w -o "$out_file" "$fasta" >"$log_file" 2>&1
if [ -s "$out_file" ]; then
echo "[OK] $acc done" >&2
else
echo "[FAIL] ARAGORN empty output for $acc" >&2
echo "$acc" >> "../../$LOGDIR/failed_genomes.txt"
: > "$out_file" # keep an empty file to be parsable
fi
# --- NEW: compute features and emit NDJSON line (all stays in tmpdir / json_lines) ---
gc_fp="${acc}.gc.tsv"
tetra_fp="${acc}.tetra.tsv"
tpa_fp="${acc}.tpa.txt"
gc_content "$fasta" > "$gc_fp"
tetra_norm "$fasta" > "$tetra_fp"
trna_type_per_acc "$out_file" "$acc" > "$tpa_fp" || true
# write single-line NDJSON per genome
json_line="../../$LINES_DIR/${acc}.ndjson"
emit_ndjson_line "$acc" "$gc_fp" "$tetra_fp" "$tpa_fp" "$json_line"
# Cleanup tmp (unchanged)
cd - >/dev/null
rm -rf "$tmpdir"
}
export -f process_one gc_content tetra_norm trna_type_per_acc emit_ndjson_line
export OUTDIR LOGDIR TMPDIR_BASE LINES_DIR
# --- Parallel execution (unchanged) ---
echo "[PIPELINE] Starting with $(wc -l < "$FTP_LIST") genomes using $THREADS threads..."
cat "$FTP_LIST" | parallel -j "$THREADS" process_one {}
# --- Merge NDJSON lines to a single file (new, simple, sequential) ---
# deterministic order by filename
ls "$LINES_DIR"/*.ndjson 2>/dev/null | sort | xargs cat -- > "$AGG_JSON" || true
echo "[PIPELINE] All done. NDJSON -> $AGG_JSON"