import collections
import csv

import pandas as pd
import nltk
from nltk import *



    #simple form transform




    # to find the 's following the pronouns. re.I is refers to ignore case
from nltk.corpus import stopwords

pat_is = re.compile("(it|he|she|that|this|there|here)(\'s)", re.I)
pat_s = re.compile("(?<=[a-zA-Z])\'s")
pat_s2 = re.compile("(?<=s)\'s?")
pat_not = re.compile("(?<=[a-zA-Z])n\'t")
# to find the abbreviation of would
pat_would = re.compile("(?<=[a-zA-Z])\'d")
# to find the abbreviation of will
pat_will = re.compile("(?<=[a-zA-Z])\'ll")
# to find the abbreviation of am
pat_am = re.compile("(?<=[I|i])\'m")
# to find the abbreviation of are
pat_are = re.compile("(?<=[a-zA-Z])\'re")
# to find the abbreviation of have
pat_ve = re.compile("(?<=[a-zA-Z])\'ve")
#wordnet
lmtzr = WordNetLemmatizer()


#deal file
def get_words():
    inf = pd.read_csv('E:/@python/Lyrics/smithsLyrics.csv')
    text = ''.join(inf['songLyrics'])
    pat_letter = re.compile(r'[^a-zA-Z \']+')
    new_text = pat_letter.sub(' ', text.strip().lower())

    new_text=replace_abbr(new_text)
    ts = nltk.word_tokenize(new_text)
    tag = nltk.pos_tag(ts)



    for i in range(0,len(tag)-1):
        pos=get_wordnet_pos(tag[i][1])
        if pos!=None:
             ts[i]=lmtzr.lemmatize(ts[i],pos)
        else:
            continue

    english_stopwords = stopwords.words('english')
    j = 0
    ts_new=[]
    for i in ts:
        a=True

        for j in range(0,len(english_stopwords)):

            if english_stopwords[j] == i:

                a=False

                break

        if a:
            ts_new.append(i)





    res = collections.Counter(ts_new)


    return res









#replace abrev
def replace_abbr(text):
    new_text=text

    new_text = pat_is.sub(r"\1 is", new_text)
    new_text = pat_s.sub("", new_text)
    new_text = pat_s2.sub("", new_text)
    new_text = pat_not.sub(" not", new_text)
    new_text = pat_would.sub(" would", new_text)
    new_text = pat_will.sub(" will", new_text)
    new_text = pat_am.sub(" am", new_text)
    new_text = pat_are.sub(" are", new_text)
    new_text = pat_ve.sub(" have", new_text)
    new_text = new_text.replace('\'', ' ')
    ts=nltk.word_tokenize(text)
    return new_text







#valcabury restore
def get_wordnet_pos(treebank_tag):

    if treebank_tag.startswith('V'):
        return "v"
    elif treebank_tag=='NNS':
        return "n"
    elif treebank_tag.startswith('N'):
        return "v"


def createDictCSV(fileName="liam.csv", dataDict={}):
    dataDict=get_words()

    with open(fileName, "wb") as csvFile:
        csvWriter = csv.writer(csvFile)
        for k, v in dataDict.iteritems():
            csvWriter.writerow([k, v])
        csvFile.close()

def printI(filename):
    csv_reader = csv.reader(open(filename))
    for row in csv_reader:
        print(row[0]+";"+row[1])














if __name__ == '__main__':

    createDictCSV()
    printI("liam.csv")

