#!/usr/bin/env python

import os
import stemmer as stm
import scipy
import utils
from Categories import Categories

from scipy import sparse

#-----------------------------------------------------------------------------#
#                         Parametres externes :                               #
#-----------------------------------------------------------------------------#
 
NOISE = ['\n', ',', '.', ';', ':', '(', ')', '[', ']', '"', '?', '&', '$', '?'\
         , '}', '{', '!', '+', '~', '=', '!', '*', '\\', '/', '--', '\'']
PATH = '../../Part1'
MIN_WORDS_PER_FILE = 10
MIN_FREQ_WORD = 2
MAX_FREQ_WORD = 10000
MIN_FREQ_CAT = 0
MIN_VAR = 0.0005
#-----------------------------------------------------------------------------#
           
def tokenize(slice):
    """
    Cette fonction sert a enlever du bruit.
    """
    slice = slice.lower()
    for i in NOISE:
        slice = slice.replace(i, ' ')
    return slice.split(' ')


def readStopWords():
    """
    Lecture du fichier des stopwords.
    """
    f = open('../stopwords.csv', 'r')
    b = f.read()
    f.close()
    b = b.replace('"', '')
    b = b.split('\n')
    return set(b)
        
def readCategory(a):
    return a[a.find('NSF Program : ') + 24:a.find('NSF Program : ') + a[a.find('NSF Program : '):].find('\n')]


def readWords(stopwords, a, p):
    wsTmp = tokenize(a[a.find('\nAbstract    :\n') + '\nAbstract    :\n'.__len__():])    
    ws = {}
    for i in wsTmp:
        if i not in stopwords:
            w = p.stem(i, 0, i.__len__() - 1)
            if not ws.has_key(w):
                ws[w] = 1
            else:
                ws[w] += 1
    return ws


def readFileData(path, filename):
    f = open(path + '/' + filename, 'r')
    fileData = f.read()
    f.close()
    return fileData


def readFiles(path, folders, allwords, categories, stopwords, files):
    """
    Cette fonction sert fileData lire les fichiers et extraire les categories et les retrouves,
    afin de generer les dictionnaires Words et Categories.
    """
    p = stm.PorterStemmer()
    for i in os.listdir(path):
        if folders.count(i) > 0 :
            utils.printTime('Lecture du dossier' + str(i))
            newPath1 = path + '/' + i
            for j in os.listdir(newPath1):
                if j != 'links.html':
                    newPath2 = newPath1 + '/' + j
                    for k in os.listdir(newPath2):
                        if k != 'links.html':
                            fileData = readFileData(newPath2, k)
                            ws = readWords(stopwords, fileData, p)
                            if ws.__len__() > MIN_WORDS_PER_FILE:
                                c = readCategory(fileData)
                                allwords.addWords(ws)
                                categories.add(c)
                                files.add(k, ws, c)
    #newCats = {}
    #count = 0
    #for key,val in categories.bag.iteritems():
    #    if val[1] >= 50 :
    #        newCats[key] = [count,val[1]]
    #        count = count + 1
    #categories.bag = newCats
    #categories.nb = count
    #newFiles = {}
    #count = 0
    #for key,val in files.bag.iteritems():
    #    if val[2] in categories.bag :
    #        allwords.addWords(val[1])
    #        newFiles[key] = [count,val[1],val[2]]
    #        count = count + 1
    #files.bag = newFiles
    #files.idFile = count
    allwords.prune(MIN_FREQ_WORD, MAX_FREQ_WORD)    
#    allwords.save(MIN_FREQ_WORD, MAX_FREQ_WORD)
    

def wordsToMatrix(allwords, words, nbFile, matrix):
    """
    Cette fonction sert a remplir une file de la matrice.
    """
    for w in words:
        if allwords.bag.has_key(w):
            col = allwords.bag[w][0] + 2
            matrix[nbFile, col] = words[w]


def loadMatrix(path, allwords, categories, files):
    """
    Cette fonction sert fileData construire la matrice sparse qui contiendra tous les donnees
    
    |idFichier|idCategorie|frequence du mot1|...|frequence du motn| 
    
    ou n est le nombre des mots retrouves parmi l'ensemble des fichier lus.
    
    """
    matrix = sparse.lil_matrix((files.bag.__len__(), allwords.bag.__len__() + 2), dtype='int32')
    for f in files.bag:
        matrix[files.bag[f][0], 0] = files.bag[f][0]
        matrix[files.bag[f][0], 1] = categories.bag[files.bag[f][2]][0]
        wordsToMatrix(allwords, files.bag[f][1], files.bag[f][0], matrix)
    return matrix


def rebuildFeaturesMatrix(featuresMatrix, matrix2, clusters):
    """

    """
    reMatrix = sparse.lil_matrix((featuresMatrix.shape[0], matrix2.retraceWords.__len__() + 1))
    cat2clus = scipy.zeros(matrix2.categories.__len__())
    colsPos = scipy.zeros(matrix2.retraceWords.__len__())
    
    for i in clusters.positions:
        for j in clusters.positions[i]:
            cat2clus[j] = i
        
    for i in range(featuresMatrix.shape[0]):
        reMatrix[i, 0] = cat2clus[featuresMatrix[i, 1]]
        
    for i in range(matrix2.words.__len__()):
        if matrix2.retraceWords.has_key(i):
            colsPos[matrix2.retraceWords[i]] = i + 2
    
    reMatrix[:, 1:] = featuresMatrix[:, colsPos]
            
    return reMatrix.tocsr()


def getWords(folder, file):
    p = stm.PorterStemmer()
    fdata = readFileData(folder, file)
    return readCategory(fdata), readWords(stopwords, fdata, p)


stopwords = readStopWords()
