'''
Created on 2011-01-06

Glowny modul projektu.
Parametry wywolania:
   docsPath - sciezka do katalogu zawierajacego dokumenty

@author: Michal Kolodziejski
'''
import sys
import os
import math
import nltk
import numpy
import gc
import re

from BeautifulSoup import BeautifulSoup
from genericpath import isdir
from classes.document import Document
from classes.word import Word
from nltk.tokenize.treebank import TreebankWordTokenizer
from nltk.stem.porter import PorterStemmer
from classes.metaparser import MetaParser
from nltk.util import clean_html
from numpy import mean
from numpy.core.fromnumeric import std, amin, amax, var
from nltk import cluster
from nltk.cluster.util import euclidean_distance
from classes.group import Group
from os import makedirs
from shutil import copyfile, rmtree
from nltk.tag import pos_tag



documents=[]
words={}
wordsIdfs={}
docsPath=""
#wordsSortedArray = []
groups={}
vectors = []

# parametry do grupowania
nGroups = 20
nMinDocumentsNumberForWords = 10
nMaxDocumentsWithWord = 1.0/80.0




''' Funkcja sprawdzajaca poprawnosc argumentow '''
def checkArgs():
    # Sprawdzenie poprwanej ilosci argumentow
    if len(sys.argv) != 2:
        print "[ERROR] Prosze podac sciezke do katalogu zawierajacego dokumenty!\n"
        sys.exit(-1)
    
    global docsPath
    docsPath = sys.argv[1]
    
    print "Sciezka do katalogu zawierajacego dokumenty: ", docsPath, "\n"
    
    if not isdir(docsPath):
        print "[ERROR] Podana sciezka nie jest katalogiem!\n"
        sys.exit(-1)



''' Funkcja wczytujaca wszystkie pliki '''
def readDocuments():
    global stopwords
    global docsPath
    global documents
    
    #inicjalizacja stopwords
    stopwords = nltk.corpus.stopwords.words('english')
    otherWords = ["]", "[", ")", "(", ";", ",", "'s", "\""]
    stopwords += otherWords
    
    dirFiles = os.listdir(docsPath)
    for filename in dirFiles:
        if not isdir(docsPath+filename):
            documents.append(Document(docsPath+filename))
    print "Ilosc dokumentow: ", len(documents), "\n"
    

    ''' Wczytujemy kazdy dokument '''
    
    print "Reading documents..."
    
    i=0
    for document in documents:
        i+=1
        if i % 2000 == 0:
            print "At: "+str(i)
        #print "At document numer: "+str(i)
        readSingleDocument(document)
    
    print "finished"
    print


''' Funkcja wczytujaca pojedynczy dokument '''
def readSingleDocument(document):
    global words
    
    f = open(document.getPath())
    content=f.read()
    
    #parsowanie tytulu i metadanych
    document = parseDocument(content, document)
    
    #usuniecie htmlowych tagow
    content = clean_html(content)
    
    # tokenizujemy
    tokenizer = TreebankWordTokenizer()
    fileWords = tokenizer.tokenize(content)
    
    # zwiekszenie wagi slow z metadanych
    i=0
    while i<5:
        fileWords += document.getMetadataWords()
        fileWords += document.getTitleWords()
        i+=1
    
    
    # usuwamy stop-words
    noStops = removeStopwords(fileWords)
    
    # usuwamy niektore czesci mowy
    # noStops = removePOS(noStops)
    
    stemmer = PorterStemmer()
    
    for word in noStops:
        # stemming
        word = stemmer.stem_word(word)
        
        # pomijamy liczby i daty
        if word.startswith(('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')):
            continue
        
        # usuwamy konczaca kropke (ale tylko jezeli to jest jedyna kropka w slowie)
        if re.search("^[^.]+\.$", word):
            word = word[:-1]
        
        # pomijamy pojedyncze znaki
        if len(word) == 1:
            continue
        
        # lower-case
        word = word.lower()
        
        # dodanie slowa do dokumentu
        docContainsWord = document.containsWord(word)
        document.addWord(word)
        
        # dodanie slowa do zbioru wszystkich slow 
        if words.has_key(word):
            aWord=words.get(word)
        else:
            aWord=Word(word)
        
        aWord.incrementCount()
        # dokument nie zawieral slowa - zwieksz licznik
        if docContainsWord == False:
            aWord.incrementDocumentsContainingCount()
        words[word]=aWord
            
    f.close()
    
    

def removeStopwords(wordsToCheck):
    global stopwords
    noStopwords = [w for w in wordsToCheck if w.lower() not in stopwords]
    return noStopwords


#''' tagowanie czesci mowy i wyciecie niepotrzebnych '''
#def removePOS(words):
#    words_tag = nltk.pos_tag(words)
#    
#    for word_tag in words_tag:
#        if word_tag[1].startswith("V") \
#            or word_tag[1] == "CD" \
#            or word_tag[1] == "HVN" \
#            or word_tag[1] == "MD":
#            
#            words.remove(word_tag[0])
#    return words


def parseDocument(content, document):
    
    # poprawienie blednnych tagow htmlowych
    soup = BeautifulSoup(content)
    content = soup.prettify()
    
    try:
        parser = MetaParser(content, document)
        document = parser.getChangedDocument()
        
    except:
        print "blad parsowania"
    
    tw = removeStopwords(document.getTitleWords())
    mw = removeStopwords(document.getMetadataWords())
    document.setTitleWords(tw)
    document.setMetadataWords(mw)
        
    return document



#usuwa najmniej istotne slowa
def cleanWords():
    global words
    global documents
    global nMinDocumentsNumberForWords
    global nMaxDocumentsWithWord
    
    print "Filtering words..."
    
    N = len(documents)
    #oblicza idfy slow
    #przydadza sie przy obliczaniu tfidf
    for w in words:
        aWord = words[w]
        wordsIdfs[w] = math.log10(N/aWord.getDocumentsContainingCount())
    
    #sortuje slowa w kolekcji idfow od najwyzszego
    items = wordsIdfs.items()
    items.sort(reverseSortfuncValues)
    
    logToCheck = math.log10(1.0/nMaxDocumentsWithWord)
    
    print "ALL WORDS: " + str(len(words))
    
    # kolekcja par slowo-idf
    # wybieramy te, ktore maja idf > log(2) - czyli jest w mniej niz polowie dokumentow
    # i wybieramy te, ktore sa w wiecej niz 2 dokumentach
    for tuple in items:
        w = tuple[0]
        aWord = words[w]
        if tuple[1] > logToCheck and aWord.getDocumentsContainingCount() > nMinDocumentsNumberForWords:
            print "IDF: "+str(tuple[1])+", docs: "+str(aWord.getDocumentsContainingCount())+", word: "+w
            continue
        else:
            words.pop(w)
            
    print "FILTERED WORDS: " + str(len(words))

    # set words indexes in an array
    i = 0
    for w in words:
        aWord = words[w]
        aWord.setIndex(i)
        i+=1
        
    print "finished"
    print



#wylicza tfidf - istotnosc slow w dokumentach
#zapisuje w odpowiendim wektorze dla slow w dokumencie
def computeTfidfs():
    global words
    global documents
    global wordsIdfs
    global vectors
    
    print "Computing tfidfs..."
    
    i = 0
    N = len(words)
    
    for doc in documents:
        i+=1
        if i % 2000 == 0:
            print "At: "+str(i)
        
        tfidfs = [0 for d in range(0,N)]
        for word in doc.getWords():
            if not words.has_key(word):
                continue
            aWord = words[word]
            counter = doc.getWordCounter(aWord.getWord())
            tfidfs[aWord.getIndex()] = (1+math.log10(counter))*wordsIdfs[word]
        
        #wektor tfidfs do grupowania
        vectors.append(numpy.array(tfidfs))
    
    del wordsIdfs
    del words
    gc.collect()
    
    print "finished"
    print
    

def clustering():
    
    global documents
    global groups
    global nGroups
    global vectors
    
    print "Clustering..."
    
    #vectors = [numpy.array(doc.getTfidfVector()) for doc in documents]
    #clusterer = cluster.KMeansClusterer(5, euclidean_distance, repeats=10, normalise=True)
    
    if len(documents) < nGroups:
        nGroups = len(documents)/3
    
#    clusterer = cluster.GAAClusterer(nGroups)
    clusterer = cluster.KMeansClusterer(nGroups, euclidean_distance, repeats=1, normalise=False)
    clusters = clusterer.cluster(vectors, True)
    
    #przypisanie do grup indeksow dokumentow
    i = 0
    for doc in documents:
        #print doc.getPath() + ": " + str(clusters[i])
        if groups.has_key(clusters[i]):
            aGroup = groups[clusters[i]]
        else:
            aGroup = Group()
            groups[clusters[i]] = aGroup
        aGroup.addDocument(i)
        i += 1
    
    print "finished"
    print


# nadaje nazwy grupom
def setGroupTitles():
    global groups
    global documents
    global wordsSortedArray
    
    print "Naming groups..."
   
    for g in groups:
        wrds = []
        aGroup = groups[g]
        wordsCounter = {}
        
        # zliczanie najistotniejszych slow z kazdego dokumentu grupy
        for index in aGroup.getDocuments():
            doc = documents[index]
            
            # words - wszystkie istotne slowa do wziecia pod uwage
            wrds += doc.getTitleWords() + doc.getMetadataWords()
        
        # zliczanie wystapienia slow istotnych
        for w in wrds:
            if not wordsCounter.has_key(w):
                wordsCounter[w] = 0
            wordsCounter[w] += 1
        
        # posortowanie po ilosci wystapien
        items = wordsCounter.items();
        items.sort(reverseSortfuncValues)
        
        i = 0
        while i < 2 and len(items) > i:
            aGroup.addNameWord(items[i][0])
            i += 1
            
        print "group: " + aGroup.getName(), "    size: " + str(aGroup.getSize())
        
    print "finisehd"
    print
        

def reverseSortfuncValues(x,y):
    return cmp(y[1], x[1])


def copyFiles():
    global docsPath
    global groups
    
    print "Copying files..."
    if isdir("groups"):
        rmtree("groups")
    makedirs("groups")
    i=0
    for group in groups.values():
        i+=1
        groupdir = "groups\\"+ str(i) + "_" + group.getName()
        makedirs(groupdir)
        documents_index = group.getDocuments()
        for index in documents_index:
            filename = documents[index].getPath().rsplit("\\", 1)[1]
            copyfile(documents[index].getPath(), groupdir+"\\"+filename)
            

def writeWords():    
    
    global words
    sortedArray = []
    
    
    for k, v in words.iteritems():
        if   v.getCount() <= 10:
            continue
        if v.getCount() < 100:
            sortedArray.append("00" + str(v.getCount())+" w: " + k)
        elif v.getCount() < 1000:
            sortedArray.append("0" + str(v.getCount())+" w: " + k)
        else:
            sortedArray.append(str(v.getCount())+" w: " + k)
#        print "word: " + k + " c: " + str(v.getCount())

    sortedArray.sort()
    sortedArray.reverse()
    for s in sortedArray:
        print s
    
    for word in words:
        aWord=words[word]
        print "Word: "+aWord.getWord()+",        count: "+str(aWord.getCount())+",        docs: "+str(aWord.getDocumentsContainingCount())



def printStats():
    wordCountVec = []                   # wektor ilosci wystapien slow we wszystkich dokumentach
    wordDocumentsContainingCount = []   # wektor ilosci dokumentow, w ktorych wystepuje slowo
    
    for word in words.values():
        wordCountVec.append(word.getCount())
        wordDocumentsContainingCount.append(word.getDocumentsContainingCount())
    
    wordsNum = len(words)
    print ""
    print "Ilosc slow: "+str(wordsNum)
    print ""
    
    # Srednia ilosc wystapien slow (w ogole)
    meanWordCount = mean(wordCountVec)
    stdWordCount = std(wordCountVec)
    varWordCount = var(wordCountVec)
    minWordCount = amin(wordCountVec)
    maxWordCount = amax(wordCountVec)
    print "Srednia ilosc wystapien slowa: "+str(meanWordCount)
    print "Standardowe odchylenie: "+str(stdWordCount)
    print "Wariancja: "+str(varWordCount)
    print "Minimum: "+str(minWordCount)
    print "Maximum: "+str(maxWordCount)
    
    print ""
    # Srednia ilosc dokumentow, w ktorych wystepuje slowo
    meanWordDocumentsContaining = mean(wordDocumentsContainingCount)
    stdWordDocumentsContaining = std(wordDocumentsContainingCount)
    varWordDocumentsContaining = var(wordDocumentsContainingCount)
    minWordDocumentsContaining = min(wordDocumentsContainingCount)
    maxWordDocumentsContaining = max(wordDocumentsContainingCount)
    print "Srednia ilosc dokumentow, w ktorych wystepuje pojedyncze slowo: "+str(meanWordDocumentsContaining)
    print "Standardowe odchylenie: "+str(stdWordDocumentsContaining)
    print "Wariancja: "+str(varWordDocumentsContaining)
    print "Minimum: "+str(minWordDocumentsContaining)
    print "Maximum: "+str(maxWordDocumentsContaining)
    
    oneOccurenceCount=0;
    lessThanFiveOccurencesCount=0
    for count in wordDocumentsContainingCount:
        if count == 1:
            oneOccurenceCount+=1
        if count < 5:
            lessThanFiveOccurencesCount+=1
    print ""
    print "Ilosc slow pojawiajacych sie tylko w 1 dokumencie: "+str(oneOccurenceCount)
    print "Ilosc slow pojawiajacych sie w mniej niz 5 dok.: "+str(lessThanFiveOccurencesCount)
    
    


''' -------------------------------------------------- '''
if __name__ == '__main__':
    checkArgs()
    readDocuments()
    
    #printStats()
    
    cleanWords()
    
    computeTfidfs()
    clustering()
    setGroupTitles()
    copyFiles()
    print "finished"
