#!/usr/bin/env python3
# Licensing : see licensing directory

import misc
import re

# Now we are in the good folder. We have the filelist, and we know in wich order
# do we have to proceed. Let's go a little further and place all those files in 
# memory as tokens... Then will we be ok to perform some syntax checks.
def phase3 (filestree):
    """Here we will begin tokenizing each source file. At this time, this isn't
    functionnal: development's going on. Actually, only Sub files are managed"""
    print ("\nPhase #3: tokenizing, Step #1")
    
    counter = 1    # We'll count the passes
    
    # Yes, I could have distributed the three next loops, but in my mind that
    # code, while redundant, is readable and simple. Optimization will comme
    # later.
    # We begin by Sub files
    for name in filestree['Sub']:
        print ("\tPass #"+str(counter)+': '+name)
        # All will be stored as a list of lists after the name and the file
        print ("\t\tPreparing the dataset")
        
        filestree['Sub'][name].append([])
                
        print ("\t\tGenerating tokens:")
        
        #try:
        with open(filestree['Sub'][name][0], 'r') as fd:
            for li in gentok (fd) : filestree['Sub'][name][2].append(li)
        #except:
        #    print ("\t\tError: can't read "+filestree['Sub'][name][0]+". Exiting")
        #    exit (1)
        #else:    
        #    print ("\t\tTokens successfully generated")
        #    counter += 1
    
    # Then we do Packages
    for name in filestree['Package']:
        print ("\tPass #"+str(counter)+': '+name)
        #All will be stored as a list of lists after the name and the file
        print ("\t\tPreparing the dataset")
        
        filestree['Package'][name].append([])
        
        print ("\t\tGenerating tokens:")
        
        try:
            with open(filestree['Package'][name][0], 'r') as fd:
                for li in gentok (fd): filestree['Package'][name][1].append(li)
        except:
            print ("\t\tError: can't read "+filestree['Sub'][name][0]+". Exiting")
            exit (1)
        else:
            print ("\t\tTokens successfully generated")
            counter += 1
            
    # Finally we do Programs
    for name in filestree['Program']:
        print ("\tPass #"+str(counter)+': '+name)
        #All will be stored as a list of lists after the name and the file
        print ("\t\tPreparing the dataset")
        
        filestree['Program'][name].append([])
        
        print ("\t\tGenerating tokens:")
        
        try:
            with open(filestree['Program'][name][0], 'r') as fd:
                for li in gentok (fd): filestree['Program'][name][1].append(li)
        except:
            print ("\t\tError: can't read "+filestree['Sub'][name][0]+". Exiting")
            exit (1)
        else:
            print ("\t\tTokens successfully generated")
            counter += 1
        
    return misc.fend(filestree)
    
def gentok (fd):
    """This function parses all lines in fd file, and generates the appropriate
    token list for each one. It merely has to ignore comments, empty lines and
    useless spaces, but must keep indentation. Also, it has to keep doc comment
    lines unchanged and to keep spaces in '\"\"\"' strings, as they are meant to
    be preformated text (like <pre> in html)"""
    ttree = []
    dlines = []
    slines = []
    c = 0
    for line in fd.readlines():
        # We don't want lines only made of comments or empty ones.
        if (not re.match(r'^\s*~', line) and not re.match(r'^\s*$', line)):
            splitedline = []
            # Keep the indent as it is needed for syntax verification
            if (re.match(r'^\s+' ,line)):
                tmp = 0
                for ct in line:
                    if (ct != ' '): break
                    tmp += 1
                splitedline.append(' ' * tmp)
            # We doesn't need to take care of ident anyway
            line = line.strip()
            
            # We shoot all comments and rstrip the line
            if (re.match('.*~', line)):
                line, _ = re.split('~', line)
            line = line.rstrip()
            
            # Doc comments make one token by line (the whole line)
            if (c - 1 in dlines):
                splitedline.append(line)
                if (not re.search(r'`', line)):
                    dlines.append(c)
                line = ''
            if (re.match('^`', line)):
                splitedline.append(line)
                dlines.append(c)
                line = ''
           
            sp = line.split()
            for it in sp: splitedline.append(it)
            line = ''

            # Gestion of multiline strings disabled for the moment: I need to
            # take time looking for the best way to do it
            
            # lstr(line, splitedline, slines, c)
            #print (slines)
            # We simply print the splited line
            print ("\t\t\t",splitedline)
                  
            # And we append it to the files tree
            ttree.append(splitedline)
        # We increment the counter for the next line
        c += 1
    return ttree 

def lstr(line, splitedline, slines, c):
    rs = re.compile(r'"{3}')
    rss = re.compile(r'"{3}.*"{3}')

    if (not rs.search(line) and c - 1 not in slines):
        sp = line.split()
        for it in sp: splitedline.append(it)
        line = ''
        return
        
    elif (not rs.search(line) and c - 1 in slines):
        slines.append(c)
        splitedline.append(line)
        line = ''
        return
        
    elif (rs.search(line)):
        ml = rs.findall(line)
        nl = []
        tt = ''
        match = rss.findall(line)
        tmpline = rss.split(line)
        print (tmpline)
        
        if ((not len(ml) % 2 and c - 1 not in slines) or (len(ml) % 2 and c - 1 in slines)): 
            slines.append(c)
            print ("Slines")
            tt = re.findall(r'"{3}.*$', tmpline[0])
            tmpline = re.split(r'"{3}.*$', tmpline[0])
            print (tt, ':', tmpline)

        if (match):
            print (match)
            if((re.match (r'^"{3]', line) and c - 1 not in slines) or (not re.match (r'^"{3}', line) and c - 1 in slines)):
                c = 0
                for m in match:
                    nl.append(m)
                    for n in tmpline[c].split():
                        nl.append(n)
                    c += 1
                if ((not len(ml) % 2 and c - 1 not in slines) or (len(ml) % 2 and c - 1 in slines)):
                    nl.append(tt)
            else:
                c = 0
                for m in match:
                    for n in tmpline[c].split():
                        nl.append(n)
                    nl.append(m)            
                    c += 1
                if ((not len(ml) % 2 and c - 1 not in slines) or (len(ml) % 2 and c - 1 in slines)):
                    nl.append(tt)
        else:
            nl = tmpline[0].split()
            if ((not len(ml) % 2 and c - 1 not in slines) or (len(ml) % 2 and c - 1 in slines)):
                nl.append(tt)

        print (nl)
        
