# -*- coding: utf-8 -*-
"""
:mod:`nubio.io.pdb`
===================

Provides read/write support for the pdb file format.

"""

import re
from collections import defaultdict
from tempfile import NamedTemporaryFile

from nubox.shim import izip

from nubio.core.atom import Model
from nubio.data.alphabet import AA3_TO_AA



PDB_FIELDS = ('at_type', 'ser_num', 'at_id', 'alt_loc', 'res_name', 'chain_id',
              'res_id', 'res_ic', 'occupancy', 'bfactor', 'element', 'charge')
PDB_COORDS = "%s%5i %-4s%c%3s %c%4i%c   %8.3f%8.3f%8.3f%6.2f%6.2f          %2s%2s\n"
PDB_TER = "%s%5i %-4s%c%3s %c%4i%c\n"

def _parse_atom_line(line):
    """
    (internal) Parses a PDBv3 line. Returns a tuple of (coords, fields). Fields
    are in the order of the line and are valid for ``AtomData`` instances.
    
    Arguments:
    
        - line(``str``) A "ATOM  " or "HETATM" line.
    
    """
    at_type = line[0:6]
    ser_num = int(line[6:11])   # numbers are ints's?
    at_id = line[12:16]         # " N B "
    alt_loc = line[16:17]       # always keep \s
    res_name = line[17:20]      # non standard is 4chars long
    chain_id = line[21:22]
    res_id = int(line[22:26].strip())   # pdb requirement int
    res_ic = line[26:27]
    x = float(line[30:38])
    y = float(line[38:46])
    z = float(line[46:54])
    occupancy = float(line[54:60])
    bfactor = float(line[60:66])
    element = line[76:78]
    charge = line[78:80]
    coords = (x, y, z)
    fields = (at_type, ser_num, at_id, alt_loc, res_name, chain_id, \
              res_id, res_ic, occupancy, bfactor, element, charge)

    return (coords, {'pdb':dict(izip(PDB_FIELDS, fields))})

def _pdb_header_parser(lines):
    """
    (internal) Parses a PDBv3 header in the form of a sequence of lines. Returns
    a header ``dict``.
    
    """
    header = {}
    seqres = defaultdict(list)
    for line in lines:
        if line.startswith('SEQRES'):
            chain_id = line[11]
            try:
                seqres[chain_id].extend([AA3_TO_AA[aa3.capitalize()] \
                                        for aa3 in line[19:71].split()])
            except KeyError:
                # take it raw.
                seqres[chain_id].extend(line[19:71].split())
        elif line.startswith('HEADER'):
            header['ID'] = line[62:66]
    header['SEQRES'] = dict(seqres)
    return header

def pdb_parser(fh):
    """
    Parses a PDB file and for each model yields a tuple of ``(atom_coordinates, 
    atom_metadatas, model_metadata)``. Coordinates are tuples of floats. The 
    data associated with each "ATOM" or "HETATM" line in the PDB file is stored
    as ``atom_metadata['pdb']``. Model metadata is a dictionary with PDB header
    data under the 'pdb' key in ``model_metadata``.
    
    Arguments:
    
      - fh(``file``) A file-like object open for reading.

    """

    match_coords = re.compile('^HETATM|ATOM|MODEL') # start of coordinates

    lines = fh.readlines()
    c_offset = None
    for i, line in enumerate(lines):
        if match_coords.match(line):
            c_offset = i
            break
    raw_header = lines[:c_offset] if c_offset else ""
    parsed_header = _pdb_header_parser(raw_header)

    model_atoms = []
    model_atoms_meta = []
    model_id = 0
    for line in lines[c_offset:]:
        record_type = line[0:6]
        if record_type == 'MODEL ' or \
          (record_type == 'ATOM  ' and not model_id):
            model_id += 1
            model_meta = {'pdb':{'mod_id':model_id,
                                 'header':parsed_header,
                                 'raw_header':raw_header}}
        if record_type == 'ATOM  ' or record_type == 'HETATM':
            atom_coords, atom_metadata = _parse_atom_line(line)
            model_atoms.append(atom_coords)
            model_atoms_meta.append(atom_metadata)
        elif record_type in ('ENDMDL', 'END   ') and model_atoms:
            yield (model_atoms, model_atoms_meta, model_meta)
            model_atoms = []
            model_atoms_meta = []
        else:
            continue

def pdb_writer(fh, vector):
    """
    Writes a PDB file given the contents of a sequence of atom coordinates,
    atom metadata dictionaries and a model metadata dictionary. The "vector"
    can be either a tuple of ``(atom_coordinates, atom_metadatas, 
    model_metadata)`` or an instance of ``Model``.
 
    Arguments:
    
      - fh(``file``) An open for writing file handle or object with ``write``
        and ``writelines`` methods.
      - vector(sequence or ``Model`` instance) This should be either a sequence 
        of atom coordinates, metadata and model metadata or a 
        ``nubio.core.atom.Model`` instance.
    
    """
    if isinstance(vector, Model):
        model_atoms = vector.iter_item()
        model_atoms_meta = [kwargs['meta'] for kwargs in vector.childrenkwargs]
        model_meta = vector.meta
    else:
        model_atoms, model_atoms_meta, model_meta = vector

    old_meta = (None,)* 12
    fh.writelines(model_meta['pdb'].get('raw_header', ()))
    # write first MODEL
    fh.write('MODEL        %s\n' % model_meta['pdb'].get('mod_id', ' '))
    for coords, meta in izip(model_atoms, model_atoms_meta):
        meta = tuple([meta['pdb'][k] for k in PDB_FIELDS])

        if old_meta[5] and (old_meta[5] != meta[5]) and \
        (old_meta[4].capitalize() in AA3_TO_AA):
            fh.write(PDB_TER % (('TER   ',) + (old_meta[1] + 1,) + \
                                ('   ', ' ') + old_meta[4:8]))
        meta = tuple(meta)
        fh.write(PDB_COORDS % (meta[:8] + tuple(coords) + meta[8:]))
        old_meta = meta
    # write last TER
    if meta[4].capitalize() in AA3_TO_AA:
        fh.write(PDB_TER % (('TER   ',) + (meta[1] + 1,) + \
                            ('   ', ' ') + meta[4:8]))
    # write ENDMDL and last END
    fh.write('ENDMDL\n')
    fh.write('END   \n')

def pdb_tmp(vector):
    """
    Writes a macromolecular model to a temporary PDB file. Returns a file handle 
    open for reading and writing. If the handle is closed the temporary file 
    will be (at some point) deleted from the file system.
    
    Arguments
    
      - vector(`Model`` instance or sequence) see: ``pdb_writer``
          
    """
    tmp_file = NamedTemporaryFile()
    pdb_writer(tmp_file, vector)
    tmp_file.flush()
    return tmp_file

#def clean_pdb_model(model, hetatm=True, ic=True, al=True, dups=True, gaps=True):
#    """
#    A non-sophisticated PDB model clean-up. Removes HETATM i.e. ligands and 
#    waters ("hetatm"), atoms with alternate locations ("al"), residues with 
#    insertion codes ("ic"). Returns the clean model i.e. ``Model`` and 
#    ``ModelData`` instances (copies).
#    
#    Arguments:
#    
#        - model (``sequence``) A sequence of ``Model``, ``ModelData`` and 
#          ``dict`` instances as returned by ``pdb_parser``.
#        - hetatm (``bool``) If ``True`` removes HETATM records 
#        - ic (``bool``) If ``True`` removes residues with != 'A' or ' '
#        - al (``bool``) If ``True`` removes atoms with alternate location != ' '
#        - gaps (``bool``) If ``True`` raises 
#    """
#    # remove HETATOMs keep MSE
#    coords, data, modelmeta = model
#    if hetatm:
#        atom = ((data['at_type'] == 'ATOM  ') | (data['res_name'] == 'MSE'))
#    # no IC
#    if ic:
#        noic = (data['res_ic'] == ' ')
#    # no AL
#    if al:
#        noal = ((data['alt_loc'] == ' ') | (data['alt_loc'] == 'A'))
#    # no gaps
#    trace_data = data[data['at_id'] == ' CA ']
#    chain_ids = set(trace_data['chain_id'])
#    for chain_id in chain_ids:
#        chain_data = trace_data[trace_data['chain_id'] == chain_id]
#        res_ids = set(chain_data['res_id'])
#        if not (len(res_ids) - 1) == (chain_data['res_id'][-1] -
#                                      chain_data['res_id'][0]):
#            msg = 'Chain %s:%s has missing residues.'
#            if gaps:
#                raise ValueError(msg % (modelmeta['header']['ID'], chain_id))
#    # make selection this is a copy
#    valid_coords = coords[atom & noic & noal]
#    valid_data = data[atom & noic & noal]
#    # no duplicates
#    valid_trace_data = valid_data[valid_data['at_id'] == ' CA ']
#    valid_chain_ids = set(valid_trace_data['chain_id'])
#    for chain_id in valid_chain_ids:
#        valid_chain_data = valid_trace_data[valid_trace_data['chain_id'] == chain_id]
#        if len(valid_chain_data['res_id']) != len(list(set(valid_chain_data['res_id']))):
#            if dups:
#                raise ValueError('Chain has still non-unique residue ids.')
#    return (valid_coords, valid_data, modelmeta)
