#!/usr/bin/env python
# coding=utf-8
# __author__ = 'Yunchao Ling'

import re
from simhash import Simhash, SimhashIndex
import nltk

def get_features(s):
    width = 3
    s = s.lower()
    s = re.sub(r'[^\w]+', '', s)
    return [s[i:i + width] for i in range(max(len(s) - width + 1, 1))]

def remove_stopwords(words,stopwords):
    result=[]
    for word in words:
        if not word in stopwords:
            result.append(word)
    return result

def tokenize_stopwords_stem(line,stopwords):
    snowball = nltk.stem.SnowballStemmer('english')
    tokens = nltk.tokenize.TweetTokenizer().tokenize(line)
    stems_snowball = [snowball.stem(t) for t in tokens]
    result_tokens = remove_stopwords(stems_snowball, stopwords)
    return result_tokens

def merge_token(tokenSet):
    result=""
    for token in tokenSet:
        result+=token
    return result

def get_features2(line,stopwords):
    tokens = tokenize_stopwords_stem(line, stopwords)
    mergeToken = merge_token(tokens)
    resultSet=get_features(mergeToken)
    return resultSet


def merge_sets(ht,datasetHead,datasetList):
    ht2={}
    for item in ht[datasetHead]:
        ht2[item]=0
    for set in datasetList:
        for item2 in ht[set]:
            ht2[item2]=0
        ht.pop(set)
    resultList=[]
    for key in ht2.keys():
        resultList.append(key)
    ht[datasetHead]=resultList
    return ht

def check_sets(ht,datasetHead):
    tryagain=True
    while tryagain:
        tryagain=False
        merge_list=[]
        for item in ht[datasetHead]:
            if ht.has_key(item) and item != datasetHead:
                tryagain=True
                merge_list.append(item)
        merge_sets(ht,datasetHead,merge_list)

if __name__ == "__main__":
    stopwords = nltk.corpus.stopwords.words('english')

    k_value=5
    infile=open("D:/data/shanghai_org.txt","r")
    count=0
    data={}
    for line in infile:
        count+=1
        line=line.rstrip()
        data[count]=line
    infile.close()
    # objs = [(str(k), Simhash(get_features(v))) for k, v in data.items()]
    objs = [(str(k), Simhash(get_features2(v,stopwords))) for k, v in data.items()]
    index = SimhashIndex(objs, k=k_value)
    print index.bucket_size()

    ht={}
    for key in data.keys():
        # ss = Simhash(get_features(data[key]))
        ss=Simhash(get_features2(data[key],stopwords))
        ss_near=index.get_near_dups(ss)
        ss_near2=[]
        for item in ss_near:
            ss_near2.append(int(item))
        ht[int(key)]=ss_near2

    for key in ht.keys():
        if ht.has_key(key):
            check_sets(ht,key)

    outfile1=open("D:/data/shanghai_classification_new_k"+str(k_value)+".stat","w")
    outfile2=open("D:/data/shanghai_classification_new_k"+str(k_value)+".tsv","w")
    for k,v in ht.items():
        outfile1.write(str(k)+"\t"+str(len(v))+"\t"+str(v)+"\n")
        outfile1.flush()
        for item3 in v:
            outfile2.write(str(item3)+"\t"+data[item3]+"\t"+str(k)+"\n")
            outfile2.flush()
    outfile1.close()
    outfile2.close()



