import os, sys

basedir = r"C:\Users\Sandesh\Desktop\TS\Studies\GSE34248"          # enter base directory

def annot(basedir, inputfile):
    """Pre: basedir is where the inputfile is.
            inputfile is the file to process.
       Post: Creates and writes data to the annotation file.
    """
    
    fa = inputfile+'_a'    
    f1 = open(os.path.join(basedir, fa), 'w')
    f=open(os.path.join(basedir, inputfile))
        
    GSEid = inputfile.split('_')[0]
    if not isValidGEOid(GSEid):
        print "Not a valid GEO id .. check needed !!! ", GSEid, inputfile
        
    lineprocess(f, f1, GSEid)       
    f1.close()  
    f.close()
    
def isValidGEOid(GEOID):
    """Pre : GEOID is a alpha numeric string with GSE as prefix
    post: verifiy that pre true with GSE and number present in GEOID, return TRUE"""
    """ @tested by Sandesh"""

    if 'GSE' == GEOID[:3]:
        num=GEOID.split('GSE')[1]
        if num.isdigit():
            return True
        else:
            return False
    else:
        return False

def lineprocess(f, f1, GEOID):
    """Pre: f* are file handlers defined in parent def.
        Post: Write lines to file handlers.
    """
    validfields=['ID', 'Gene Symbol', 'ENTREZ_GENE_ID', 'Species Scientific Name']
    colidx={}
    end=False
    
    for line in f:
        line=stripline(line)

        """
        1: First character should not be '!', '^', or '#', since these lines are unnecessary.
        2: All required fields identified, |checked for presence|, and passed as fields to output file function.
        """

        while not end:
            if line[0] not in ['!', '^', '#']:
                columns = line.split('\t')   #column is a list
                if isHeader(columns):
                    colidx=ProcessHeader(columns,validfields)
                else:
                    linedata=processDataline(columns,colidx,validfields)  #newline is a list
                    newline=[GEOID]+linedata
                    f1.write("\t".join(newline))
                    f1.write("\n")
                
            if line=="!platform_table_end":
                end=True
            else:
                end=False
            line=stripline(f.next())
            

            
def stripline(line):
    """pre: line is a line from file
    post: return clearn line """
    
    if line[-2:]=='\r\n':
        line=line[:-2]
    elif line[-1]=='\n':
        line=line[:-1]
    elif line[-1]=='\r':
        line=line[:-1]
    else:
        line=line
    return line
        
def isHeader(line):
    """pre: line is list of tabs split data line
    Post: Return True if header line"""
    
    if line[0]=="ID":
        return True
    else:
        return False
            
def ProcessHeader(line,validlist):
    """ pre: line is list of tabs split data line, validlist is the list of colids you need extract
    Post: retrun the validlist column postions"""
    
    s1=set(validlist)
    s2=set(line)
    rv={}
    if s1.issubset(s2):
        for item in validlist:
            rv[item]=line.index(item)        
    else:
        print "Validlist not in HeaderColumn .. check needed\n", line
    
    return rv
    
    
def processDataline(fieldlist, colpositions, validlist):
    """pre: fieldlist is list of tabs split data line, colpostions is a dictionary of colid and index
    post: extact data using colpositions """
    rv=[]
    for item in validlist:
        if colpositions[item] < len(fieldlist):
            rv.append(fieldlist[colpositions[item]])
        else:
            print "Field length is smaller than col position " ,item, colpositions[item], column
    return rv
    
    
def write2fone(afnn,aid1,gs1,egi1,ssn1,f1):
    """pre: Takes field values and output file name as input.
    post: Creates output file with all data properly formatted 
    """
   
    f1.write(afnn)
    f1.write(aid1)
    f1.write(gs1)
    f1.write(egi1)
    f1.write(ssn1)
    

#1. Remove non essential numbrical columns from the later part of file
#2. QC checks
#3. Extract data from db without modification in code
#4. Replace field names
