#!/usr/bin/env python3
# Author: Armit
# Create Time: 2023/01/05 

# process raw PDB data
#   PDB file SMCRA layout: Structure -> Model -> Chain -> Residue -> Atom

import math
from datetime import datetime
from os import cpu_count
from pathlib import Path
from multiprocessing import Process, Manager
import ctypes
import gzip
from time import time
from traceback import print_exc
from typing import List, Union

import numpy as np
from Bio.PDB import PDBParser

from data import *

PDB_ROOT = Path(RDATA_PATH) / 'NMR_PDB'
N_WORKER = cpu_count()


def walk_for_pdb():
  for folder in PDB_ROOT.iterdir():
    if folder.is_file(): continue
    for file in folder.iterdir():
      if file.suffix != '.gz': continue
      yield file


def worker(tasks:List[Path], n_structure, n_model, sequences:List[str], locations:List[List[Vector3d]]):
  parser = PDBParser()

  for file in tasks:
    try:
      with gzip.open(file, 'rt', encoding='utf-8') as fh:
        id = file.stem[:-len('.pdb')]
        structure = parser.get_structure(id, fh)
    except:
      print_exc()
      print(f'<< error ignore file {file}')
      continue

    for model in structure.get_models():
      flag = False
      for chain in model.get_chains():    # biopy 会主动忽略断裂的链
        residues = list(chain.get_residues())
        try:
          # std name to 1-letter name
          sequences.append(''.join([NAME_TO_LABEL[r.resname] for r in residues]))
          # take all atoms' coord mean as central location for the residue
          # FIXME: later find better definition of 'residue center'
          locations.append([tuple(np.stack([a.coord for a in r.get_atoms()], axis=0).mean(axis=0)) for r in residues])

          flag = True
        except KeyError: pass       # ignore conformations with none-AA residue
      if flag: n_model.value += 1
    n_structure.value += 1


def preprocess():
  # perfcount 
  ts = time()

  # log stats
  logfp = open(PDB_STAT_FILE, 'w', encoding='utf-8')

  def log(s: Union[str, list]):
    logfp.write(str(s))
    logfp.write('\n')
    logfp.flush()
    print(s)

  log(f'>> now ts: {datetime.now()!s}')

  # multiprocess tasks & workers
  tasks: List[Path] = list(walk_for_pdb())
  n_tasks_each = math.ceil(len(tasks) / N_WORKER)
  log(f'>> found {len(tasks)} pdb files')
  log(f'      assign {n_tasks_each} tasks for each thread')

  manager = Manager()
  n_structure: int                = manager.Value(ctypes.c_int, 0)
  n_model: int                    = manager.Value(ctypes.c_int, 0)
  sequences: List[str]            = manager.list()
  locations: List[List[Vector3d]] = manager.list()
  procs = [
    Process(target=worker, 
            args=(tasks[i*n_tasks_each:(i+1)*n_tasks_each], n_structure, n_model, sequences, locations), 
            daemon=True) 
      for i in range(N_WORKER)
  ]
  for proc in procs: proc.start()
  for proc in procs: proc.join()
  del procs, manager, tasks

  log(f'>> found {n_structure.value} structures')
  log(f'>> found {n_model.value} models')
  assert len(sequences) == len(locations)
  sequences, locations = list(sequences), list(locations)   # convert proxy object to python native

  # seq len stats
  lengths = np.asarray([len(c) for c in sequences])
  log(f'>> found {len(sequences)} chains (conformations)')
  log(f'      min len: {lengths.min()}')
  log(f'      max len: {lengths.max()}')
  log(f'      avg len: {lengths.mean()}')
  log(f'      std len: {lengths.std()}')

  # write data
  sz = save_pdb_data(CLASS_LABELS, sequences, locations)
  log(f'>> file size: {sz:.3f} MB')

  # perfcount
  log(f'>> done in {time() - ts:.3f}s')

  # clean
  logfp.close()


if __name__ == '__main__':
  preprocess()
