# coding:utf-8
import pandas as pd
import xlwt
import re
import thulac
import pickle
import operator
from sklearn import feature_extraction
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer



def clean_data(inf,outf):
    re_cdata = re.compile('//<!\[CDATA\[[^>]*//\]\]>', re.I)  # 匹配CDATA
    re_script = re.compile('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>', re.I)  # Script
    re_style = re.compile('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>', re.I)  # style
    re_br = re.compile('<br\s*?/?>')  # 处理换行
    re_h = re.compile('</?\w+[^>]*>')  # HTML标签
    re_comment = re.compile('<!--[^>]*-->')  # HTML注释

    df = pd.read_excel(inf)
    #re.compile(r'\[nameCN=([^]]+),value=([^]]+),oValue=([^]]+)\]')
    #print(df.columns.values)
    full_txts = list(df['fulltext'])
    result = {}
    m_result = {}
    for i,txt in enumerate(full_txts):
        if txt != txt:
            txt = ""

        txt = txt.replace('<BR>','').replace('<br>','').replace('<A class=alink onmouseover=AJI(17010,0) href="javascript:SLC(17010,0)">','').replace('　　','')
        txt = txt.replace('<p>','').replace('</p>','').replace('<br/>','').replace('<!--bdyh-->','').replace('&nbsp;','')
        txt = re_cdata.sub('', txt)  # 去掉CDATA
        txt = re_script.sub('', txt)  # 去掉SCRIPT
        txt = re_style.sub('', txt)  # 去掉style
        txt = re_h.sub('', txt)  # 去掉HTML 标签
        txt = re_comment.sub('', txt)  # 去掉HTML注释
        #txt = txt.replace('')
        txt = txt.strip()
        start = txt.find('</A>')
        #end = txt.find('<BR>')
        if start != -1 :
            txt = txt[start+4:].strip()
        else:
            if i == 101:
                print(i)
            start = txt.find('条')
            if start < 7:
                txt = txt[start+1:].strip()
        if txt.find('款')<4 and txt.find('款') >= 0:
            txt = txt[txt.find('款')+2:].strip()
        if txt.find('之')<3 and txt.find('之')>=0:
            txt = txt[txt.find('之')+2:].strip()
        txt = txt.replace('</A>','')
        if txt != '' and (txt[0] == '）' or txt[0] == '、'):
            txt = txt[1:]
        result[i] = txt.strip()
        print(i,txt)

    output = []
    for i in range(df.shape[0]):
        m = []
        for j in range(df.shape[1]):
            if j!=df.shape[1]-1:
                m.append(df.iloc[i,j])
            else:
                m.append(result[i])
        output.append(m)
        print(i,'concating...')
    out_df = pd.DataFrame(output,columns=list(df.columns.values))
    out_df.to_excel(outf,'Sheet1',index=False)

def clean_law(inf,outf):
    out = open(outf,'w',encoding='utf-8')
    with open(inf,encoding='utf-8') as f:
        lines = f.readlines()
        i = 0
        # xingshisusong
        while i<len(lines)-1:
            line = lines[i].strip()
            if line.find('条') > 0 and line.find('条') <10 and (lines[i][line.find('条')+1] == '\n' ):
                out.write(lines[i].strip()+' ')

            #elif line.find('条') > 0 and line.find('条') <10 and lines[i][line.find('条')+1==' ']:
            #    out.write(lines[i].strip().split(' ')[0]+'\t'+lines[i].strip().split(' ')[1]+'\n')
            elif line[-1] == '：' or line[-1] == '；'or (lines[i+1].strip())[0] != '第':
                out.write(lines[i].strip())
            else:
                out.write(lines[i].strip()+'\n')
            out.flush()
            print(i)
            i += 1

        # xingfa
        # while i<len(lines)-1:
        #     line = lines[i].strip()
        #
        #
        #     if line[-1] == '：' or line[-1] == '；' or (lines[i+1].strip())[0] != '第':
        #         out.write(lines[i].strip())
        #     else:
        #         out.write(lines[i].strip()+'\n')
        #     out.flush()
        #     print(i)
        #     i += 1
    out.close()

def cut_words(inf,outf):
    thu = thulac.thulac(seg_only=True,user_dict='./datasets/user_dict.txt')
    df = pd.read_excel(inf)
    punctuation = """〈〉.《！？｡＂。，＃＄"％＆＇（）＊＋－／：；＜＝＞＠［＼］＾＿｀｛｜｝～｟｠｢｣､、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘'‛“”„‟…‧﹏"""
    re_punctuation = "[{}]+".format(punctuation)
    out = open(outf,'w',encoding='utf-8')
    for i in range(df.shape[0]):
        if df.iloc[i,12] != df.iloc[i,12]:
            txt = ''
        else:
            txt = thu.cut(df.iloc[i,12],text=True)
            txt = re.sub(re_punctuation,'',txt)
        out.write(str(df.iloc[i,0])+'\t'+txt+'\n')
        print(i)
    out.close()

def cut_law(inf,outf):
    thu = thulac.thulac(seg_only=True, user_dict='./datasets/user_dict.txt')
    punctuation = """〈〉.《！？｡＂。，＃＄"％＆＇（）＊＋－／：；＜＝＞＠［＼］＾＿｀｛｜｝～｟｠｢｣､、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘'‛“”„‟…‧﹏"""
    re_punctuation = "[{}]+".format(punctuation)
    out = open(outf, 'w', encoding='utf-8')
    with open(inf,encoding='utf-8') as f:
        lines = f.readlines()
        for i,line in enumerate(lines):
            if line.split('    ')[0].strip()[-1] =='条':
                txt = thu.cut(line.split('    ')[1],text=True)
                txt = re.sub(re_punctuation, '', txt)
                out.write(line.split('    ')[0].strip()+'\t'+txt.strip()+'\n')
                out.flush()
                print(i)
            else:
                print('pass')
    out.close()


def caculate_tfidf(inf,outf):
    out = open(outf,'wb')
    with open(inf,encoding='utf-8') as f:
    # create document
        document = []
        for i,line in enumerate(f.readlines()):
            if len(line.strip().split('\t'))>1:
                document.append(line.strip().split('\t')[1])
            else:
                document.append('')
    vectorizer = CountVectorizer()
    transformer = TfidfTransformer()
    tfidf = transformer.fit_transform(vectorizer.fit_transform(document))
    words = vectorizer.get_feature_names()
    weight = tfidf.toarray()
    result = []
    for i in range(len(document)):
        print('第',i,'条原文本:', document[i])
        m = {}
        for j in range(len(words)):
            if weight[i][j]!=0:
                print(words[j], weight[i][j])
                m[words[j]] = weight[i][j]
        m = sorted(m.items(),key=operator.itemgetter(1))
        result.append(m)
    pickle.dump(result,out)
    # for i in range(len(weight)):
    #     print('第',i,'条原文本:',document[i])
    #     for j in range(len(words)):
    #         if weight[i][j]!=0:
    #             print(words[j],weight[i][j])

def create_pvdm_corpus(data_inf,law_inf,outf):
    with open(data_inf,encoding='utf-8') as d:
        datas = [_.split('\t')[1].strip() for _ in d.readlines()]
    with open(law_inf,encoding='utf-8') as f:
        laws = [_.split('\t')[1].strip() for _ in f.readlines()]
    datas.extend(laws)
    with open(outf,'w',encoding='utf-8') as out:
        for i in range(len(datas)):
            out.write('__label__'+str(i)+' '+datas[i]+'\n')
            out.flush()
            print(i)

def create_corpus_by_xls(inf,outf):
    thu = thulac.thulac(seg_only=True)
    df = pd.read_excel(inf)
    punctuation = """〈〉.《！？｡＂。，＃＄"％＆＇（）＊＋－／：；＜＝＞＠［＼］＾＿｀｛｜｝～｟｠｢｣､、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘'‛“”„‟…‧﹏"""
    re_punctuation = "[{}]+".format(punctuation)
    out = open(outf,'w',encoding='utf-8')
    for i in range(df.shape[0]):
        # if str(df.iloc[i,0]) == '1741':
        #     print(i)
        if i == 785:
            a = 1
        txt = df.iloc[i,2].strip().replace('_x000D_','').replace('\n','').replace('\r','')
        if txt.find('：')!=-1:
            txt = txt.replace(txt[txt.find('：')+1],'')
        if txt.find('】') != -1:
            txt = txt[txt.find('】')+1:]
        txt = thu.cut(txt,text=True)
        txt = re.sub(re_punctuation, '', txt)
        out.write('__label__'+str(df.iloc[i,0]).strip()+' '+txt+'\n')
        out.flush()
        print(i)
    out.close()





if __name__ == '__main__':
    #clean_data('./datasets/刑事_司法解释.xlsx','./datasets/clean_data_exlaination.xlsx')
    # df = pd.read_excel('./datasets/clean_data.xlsx')
    # all_txts = list(df['fulltext'])
    # with open('./datasets/fulltxt.txt','w',encoding='utf-8') as f:
    #     for i,t in enumerate(all_txts):
    #         f.write(t.strip()+'\n')
    #         f.flush()
    #         print(i)
    #clean_law('./datasets/xingshisusong_law.txt','./datasets/clean_xingshisusong_law.txt')
    #cut_words('./datasets/clean_data_exlaination.xlsx','./datasets/cut_data.txt')
    #cut_law('./datasets/clean_xing_law.txt','./datasets/cut_xing_law.txt')
    #caculate_tfidf('./datasets/cut_data.txt','./datasets/data_tfidf.pkl')
    #caculate_tfidf('./datasets/cut_xing_law.txt', './datasets/xing_law_tfidf.pkl')
    #create_pvdm_corpus('./datasets/cut_data.txt','./datasets/cut_xing_law.txt','./datasets/pvdm_corpus.txt')
    #create_corpus_by_xls('./datasets/law.xlsx','./datasets/corpus_law.txt')
    with open('./datasets/cut_data.txt') as f:
        out = open('./datasets/test.txt','w',encoding='utf-8')
        for i,line in enumerate(f.readlines()):
            out.write(line.split('\t')[1].strip()+'\n')
            out.flush()
            print(i)
        out.close()
