# -*- coding: utf-8 -*-
#!/usr/bin/env python


# data conversion
# from
# <Label><tab><sentence>
# to
# <Label><tab><feature vector>
# the output can be inputted to svm training indirectly

import jieba.posseg as pseg
flag_list = ['t','q','p','u','e','y','o','w','m']


def jieba_fenci(all_the_text):
    re = []
    words = pseg.cut(all_the_text)
    for w in words:
        flag = w.flag
        tmp = w.word
        #print "org: "+tmp
        if len(tmp)>1 and len(flag)>0 and flag[0] not in flag_list and  tmp[0]>=u'/u4e00' and tmp[0]<=u'\u9fa5':
            re.append(w.word)
    return re

DIM_OF_FEATURES = 260000
MIN_LEN_OF_FEATURE = 1


def get_label_and_sentence(line):
    #s = line.split("\t",1)
    for i in range(0, len(line)):
        if line[i] == u'\t' or line[i] == u' ':
            break

    return line[0:i], line[i+1:]


def is_chinese(uchar):
    x = ord(uchar)
    if x >=0x4e00 and x <= 0x9fa5:
        return True
    else:
        return False


def filter(sentense):
    #sentense = sentense.decode('UTF8')
    s = list(sentense)
    result =[]
    have_space = False
    for i in range(0, len(s)):
        if is_chinese(s[i]) is True:
            result.append(s[i])
            have_space = False
        elif s[i] == unicode(" ") and have_space == False:
            result.append(s[i])
            have_space = True
    return ''.join(result)


def get_features(sentense):
    #sentense = filter(sentense)
    features = []

    #if sentence have already spilted by space
    #ss = sentense.split(" ", -1)
    #else
    ss = jieba_fenci(sentense)
    #2-gram
    for i in range(1,len(ss)-1):
        f = ss[i-1] + ss[i]
        features.append(f)
    #3-gram
    #for i in range(2,len(ss)-1):
    #    f = ss[i-2] + ss[i-1] + ss[i]
    #    features.append(f)
    #1-gram
    for i in ss:
        if len(i) >= MIN_LEN_OF_FEATURE:
            features.append(i)
    features = list(set(features))
    return features

def extract_feature_by_dim(datefile, _DIM_OF_FEATURES = DIM_OF_FEATURES):
    file = open(datefile)
    keys = {}
    i = 0
    for line in file.readlines():
        line = line.decode("utf-8")
        label, sentense = get_label_and_sentence(line)
        i+=1
        ff = get_features(sentense)

        for f in ff:
            count = keys.get(f,0)
            keys[f] = count + 1


    file.close()

    rank = []
    for it in keys.keys():
        rank.append((keys[it],it))
    keys={}
    rank = sorted(rank,cmp=lambda x,y:cmp(y[0],x[0]))

    keys={}

    for id in range(0,min(_DIM_OF_FEATURES,len(rank))):
        #keys[N-gram] = (id,count-of-N-gram)
        keys[rank[id][1]] = (id,rank[id][0])
    return keys


def print_the_format_data_as_svm_style(datefile):
    file = open(datefile)
    keys = extract_feature_by_dim(datefile, DIM_OF_FEATURES)
    for line in file.readlines():
        line = line.decode("utf-8")
        label, sentense = get_label_and_sentence(line)

        ff = get_features(sentense)

        ff1 = []
        for f in ff:
            id,count = keys.get(f,(-1,0))
            if id != -1:
                ff1.append((id,count))
        #if len(ff1) == 0:
        #    continue
        ff1 = sorted(ff1, cmp=lambda x, y: cmp(x[0], y[0]))
        print(label,)
        for i in ff1:
            #print str(i[0])+":"+str(i[1])+" ",
            print( str(i[0]+1) + ":" + "1.0",)
        print( "")

    file.close()

if __name__ == "__main__":
    print_the_format_data_as_svm_style("../dataset/svm/corpus.1.txt.train")