#encoding: utf-8

import re
import os
import sys
import pickle
import jieba
import torch
import time
from datetime import timedelta
import chardet
import sklearn
from tqdm import tqdm
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
sys.path.append("../")
from config import *

class Sougou:
    '''
    读取SogouCS.reduced数据集，精简版
    '''
    def __init__(self,raw_dir,output_dir):
        self.raw_dir = raw_dir
        self.output_dir = output_dir
        self.file = [file for a, b, file in os.walk(self.raw_dir)][0]

    def split_language(self):
        os.makedirs(self.output_dir) if not os.path.exists(self.output_dir) else print('输出目录已存在')

        for file in self.file:
            # 读取txt文件
            text = open(self.raw_dir + file, 'rb').read().decode("utf-8")
            # 匹配 url 和 正文内容
            content = re.findall('<url>(.*?)</url>.*?<contenttitle>(.*?)</contenttitle>.*?<content>(.*?)</content>',
                                 text, re.S)

            # 根据 url 存放每一个 正文内容
            for news in content:
                url_title     = news[0]
                content_title = news[1]
                news_text     = news[2]
                # 提取正文的类别
                title = re.findall('http://(.*?).sohu.com', url_title)[0]
                # 存储正文
                if len(title)>0 and len(content_title)>0 and len(news_text)>30:
                    print('【{}】【{}】【{}】'.format(file, title, content_title))
                    # 目标保存路径
                    save_config = self.output_dir + title
                    # 如果没有该文件，则创建文件夹
                    os.makedirs(save_config) if not os.path.exists(save_config) else print('Is Exists')
                    # 保存文件
                    f = open('{}/{}.txt'.format(save_config, content_title), 'w', encoding='utf-8')
                    f.write(news_text)
                    f.close()


'''
处理流程：
1. 遍历每个文件夹中的子文件夹，获取内容与标签
2. 遍历子文件夹，读取文件内容
3. 对文件内容进行分词，去除停用词处理
4. 将词转换为id
'''
class SougouDataset:
    def __init__(self,sougouCS_dir):
        self.sougouCS_dir =sougouCS_dir
        self.name = 'Sougo'
        self.trimmed = False # 是否根据词频过滤单词
        global  processed_dir
        if "reduce" in sougouCS_dir:
            processed_dir = base_dir / "output/cs_processed"


        self.index2word = {PAD_token: "PAD", SOS_token: "SOS", EOS_token: "EOS",UNK_token:'UNK'}
        self.word2index = dict([v,k] for k,v in self.index2word.items())
        self.word2count = dict([k,1] for k,v in self.word2index.items())
        print(self.index2word)
        print(self.word2index)
        # print('---',self.word2index.get("UNK"))
        self.num_words = 4  # Count SOS, EOS, PAD
        self.stop_words = get_stop_words(baidu_stopword)

    def readAllFile_CS(self):
        '''读取文件'''
        categories = os.listdir(self.sougouCS_dir)
        all_seq = []
        for category in categories:
            
            cat_file = os.path.join(self.sougouCS_dir, category)
            fp = _read_file(cat_file)
            # fp = open(cat_file,'r',encoding='utf-8')
            
            # print(fp)
            for i,line in enumerate(fp):
                cur_seq = []
                cate, content = _unpack_line(line)
                if len(content) == 0:
                    continue
                content = content.replace("\ue40c",'')
                # print(content)
                content = ''.join([strQ2B(char) for char in content])
                cur_seq.append(cate)
                cur_seq.append(content)

                all_seq.append(cur_seq)
                # if i ==3:
                #     break
            # break
        pkl_save(all_seq,datasetDir / "all_seq&lable.pkl")
        print("{} seq".format(len(all_seq)))

    def readAllFile(self):
        '''
        遍历所有文件将文件内容添加到列表中
        :return:
        '''
        sub_dirs = os.listdir(self.sougouCS_dir)
        all_seq = []
        # 获取需要的标签
        sub_dirs = [dir for dir in sub_dirs if dir.split('-')[1] in label_dict.keys()]
        print("="*10)
        print("\n")
        print(sub_dirs,len(sub_dirs))
        # 遍历子文件夹
        for dir in (sub_dirs):
            lable = label_dict[dir.split('-')[1]] # 获取标签
            sub_dir_path = os.path.join(self.sougouCS_dir,dir)
            sub_files = os.listdir(sub_dir_path)
            # 遍历当子文件夹所有文件
            for i, file in enumerate(sub_files):
                cur_seq = []
                cur_seq.append(lable)
                sub_file_path = os.path.join(sub_dir_path,file)
                # print(i,sub_file_path)
                if i % 500 == 0:
                    print("-"*7,i)
                    print(sub_file_path)
                temp_f = open(sub_file_path,'r')

                content = temp_f.read()
                print(content)
                sys.exit(0)
                    # print(temp_f.readlines())
                temp_f.close()
                cur_seq.append(content)
                all_seq.append(cur_seq)
                # print(len(content),content)
                # break

            # break
        pkl_save(all_seq,processed_dir / "all_seq&lable.pkl")
        print("{} seq".format(len(all_seq)))
        # for seq in all_seq:
        #     print(seq)
        # print(all_seq)
    def split_remove(self):
        '''分词与添加到字典'''
        if not os.path.exists(processed_dir / "all_seq&lable.pkl"):
            print("对应文件不存在")
            sys.exit(1)
        print(processed_dir / "all_seq&lable.pkl")
        self.all_seq = pkl_load(processed_dir / "all_seq&lable.pkl")
        print("数据记录：",len(self.all_seq))
        for i,seq in enumerate(self.all_seq):
            # if i == 1:
                # break
            # print(seq)
            content = re.sub(r'\\u.{4}','',seq[1].__repr__()) # TODO 移出unicode字符
            # print(content)
            content_split = list(jieba.cut(content,cut_all=False))



            content_split = [word for word in content_split if word not in self.stop_words]
            seq[1] = ' '.join(content_split)
            # print(' '.join(content_split))
            # print(seq[1])
            # print(len(content_split))
        pkl_save(data=self.all_seq,pkl_path=processed_dir / 'split_all_sentence.pkl')

    def make_vocab(self,min_count=1):
        '''制作词表，word2index,index2word'''
        if not os.path.exists(processed_dir / "split_all_sentence.pkl"):
            print("分词&去除停用词后的文件不存在")
            sys.exit(2)
        self.all_seq_split = pkl_load(processed_dir / "split_all_sentence.pkl")

        for i,seq in enumerate(self.all_seq_split):
            # if i == 1:
            #     break
            # print(seq)
            label,content,_ = seq
            tokens = content.split(" ")
            # print(tokens)
            for word in tokens:
                self.addWord(word)
        print("共有{}个词".format(self.num_words-3))

        if self.trimmed:
            self.trim_word(min_count)
            print("过滤后的词数：{}".format(self.num_words-3))

        pkl_save(data=self.word2index,pkl_path=processed_dir / "word2index.pkl")
        pkl_save(data=self.index2word,pkl_path=processed_dir / 'index2word.pkl')
    def addWord(self, word):
        # 如果当前单词不在字典中
        if word not in self.word2index:
            self.word2index[word] = self.num_words  # 赋给它一个索引
            self.word2count[word] = 1   # 词频记为1
            self.index2word[self.num_words] = word # 反向字典
            self.num_words += 1 # 索引加1
        else:
            self.word2count[word] += 1 #词频加一

    def trim_word(self,min_count):
        '''去除低频词语'''
        if self.trimmed:
            return
        self.trimmed = True
        keep_words = []
        for k,v in self.word2count.items():
            if v>= min_count:
                keep_words.append(k)
    # 保留的单词数量 与 所有的单词数量的比值

        print('keep_words {} / {} = {:.4f}'.format(
            len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)
        ))
        # Reinitialize dictionaries
        self.word2index = {}
        self.word2count = {}
        self.index2word = {PAD_token: "PAD", SOS_token: "SOS", EOS_token: "EOS"}
        self.num_words = 3 # Count default tokens
        for word in keep_words:
            self.addWord(word)
    def make_dataset(self,pad_size=100):
        '''获取数据集'''
        if not os.path.exists(processed_dir / "split_all_sentence.pkl"):
            print("分词&去除停用词后的文件不存在")
            sys.exit(3)
        contents = []
        self.all_seq_split = pkl_load(processed_dir / "split_all_sentence.pkl")
        if len(self.word2index) < 6:
            self.word2index = pkl_load(processed_dir / "word2index.pkl")
            print("读取词表成功")

        # TODO 遍历所有行
        for i, seq in enumerate(self.all_seq_split):
            # if i == 1:
            #     break
            # print(seq)
            label,content,_ = seq

            tokens = content.split(" ") # TODO 获得当前行
            # print(len(tokens),tokens)
            seq_len = len(tokens)
            if seq_len < pad_size:
                tokens.extend([PAD] * (pad_size-seq_len))
            else:
                tokens = tokens[:pad_size]
                seq_len = pad_size
            # word to id
            words_line = [self.word2index.get(word,UNK_token) for word in tokens]
            contents.append((label,np.array(words_line))) # TODO 添加当前行


        pkl_save(contents,pkl_path=processed_dir / "all_seq_vectors.pkl")
        print("保存成功，共{}条数据".format(len(contents)))

    def getDataset(self):
        if not os.path.exists(processed_dir / "all_seq_vectors.pkl"):
            print("ERROR：all_seq_vectors.pkl 不存在")
            return
        self.word2index = pkl_load(processed_dir / 'word2index.pkl')
        self.index2word = pkl_load(processed_dir / 'index2word.pkl')
        contents = pkl_load(processed_dir / "all_seq_vectors.pkl")
        df_dataset = pd.DataFrame(contents)
        print(df_dataset.info())
        print(df_dataset.head(2))
        label = df_dataset[0].values
        datset = df_dataset[1].values
        print(type(label),type(datset))
        print('查看标签',set(label))
        test_rate = 0.1
        train_X,testDev_X, train_y,testDev_Y = train_test_split(datset,label,test_size=0.2,random_state=0)
        dev_X, test_X, dev_Y, test_Y = train_test_split(testDev_X,testDev_Y,test_size=0.5,random_state=0)
        # print("训练集尺寸：{}, 标签尺寸：{}".format(train_X.shape,train_y.shape))
        # print("验证集尺寸：{}, 标签尺寸：{}".format(dev_X.shape,dev_Y.shape))
        # print("测试集尺寸：{}, 标签尺寸：{}".format(test_X.shape,test_Y.shape))
        # print("词表数量：",len(self.word2index.keys()),len(self.index2word))
        return (train_X,train_y), (dev_X,dev_Y),(test_X,test_Y),self.word2index,self.index2word

def split(raw_path,output_path):
    p = re.compile('</doc>',re.S)
    end = '</doc>'
    fileContent = open(raw_path,'r',encoding='utf-8').read();  #读文件内容
    paraList = p.split(fileContent)     #根据</doc>对文本进行切片
    #print(len(paraList))

    fileWriter = open('{}0.txt'.format(output_path),'a',encoding='utf8')  #创建一个写文件的句柄
    #遍历切片后的文本列表
    for paraIndex in range(len(paraList)):
        #print(paraList[paraIndex])
        fileWriter.write(paraList[paraIndex])   #先将列表中第一个元素写入文件中
        if(paraIndex != len(paraList)):         #不加if这两行的运行结果是所有的</doc>都没有了，除了最后分割的文本
            fileWriter.write(end)
        if((paraIndex+1)%5000==0):              #5000个切片合成一个.txt文本
            fileWriter.close()
            fileWriter = open(output_path+str((paraIndex+1)/5000)+'.txt','a',encoding='utf8'); #重新创建一个新的句柄，等待写入下一个切片元素。注意这里文件名的处理技巧。
    fileWriter.close()          #关闭最后创建的那个写文件句柄
    print('finished')
def _unpack_line(line):
    category, content = line.strip('\n').split('\t')
    return category, content
def strQ2B(ustring):
    """把字符串全角转半角"""
    ss = []
    for s in ustring:
        rstring = ""
        for uchar in s:
            inside_code = ord(uchar)
            if inside_code == 12288:  # 全角空格直接转换
                inside_code = 32
            elif (inside_code >= 65281 and inside_code <= 65374):  # 全角字符（除空格）根据关系转化
                inside_code -= 65248
            rstring += chr(inside_code)
        ss.append(rstring)
    return ''.join(ss)
class DatasetIterater(object):
    def __init__(self, batches, batch_size, device):
        self.batch_size = batch_size
        self.batches = batches
        self.n_batches = len(batches[0]) // batch_size
        print(len(batches[0]),len(batches[1]))
        print(len(batches[0]),batch_size)
        self.residue = False  # 记录batch数量是否为整数
        if len(batches[0]) % self.n_batches != 0:
            self.residue = True
        self.index = 0
        self.device = device

    def _to_tensor(self, datas):
        # xx = [xxx[2] for xxx in datas]
        # indexx = np.argsort(xx)[::-1]
        # datas = np.array(datas)[indexx]
        x = torch.LongTensor([a for a in datas[0]]).to(self.device)
        y = torch.LongTensor([a for a in datas[1]]).to(self.device)
        # bigram = torch.LongTensor([_[3] for _ in datas]).to(self.device)
        # trigram = torch.LongTensor([_[4] for _ in datas]).to(self.device)

        # pad前的长度(超过pad_size的设为pad_size)
        # seq_len = torch.LongTensor([_[2] for _ in datas]).to(self.device)
        return x,y
        # return (x, seq_len, bigram, trigram), y
    def __next__(self):
        if self.residue and self.index == self.n_batches:
            batches = self.batches[self.index * self.batch_size: len(self.batches)]
            self.index += 1
            batches = self._to_tensor(batches)
            return batches

        elif self.index >= self.n_batches:
            self.index = 0
            raise StopIteration
        else:
            batches = self.batches[self.index * self.batch_size: (self.index + 1) * self.batch_size]
            self.index += 1
            batches = self._to_tensor(batches)
            return batches

    def __iter__(self):
        return self

    def __len__(self):
        if self.residue:
            return self.n_batches + 1
        else:
            return self.n_batches
def _read_file(txt_file):
    """读取txt文件"""
    lines = open(txt_file, 'r',encoding='utf-8').readlines()
    # try: 
    #     lines = open(txt_file, 'r',encoding='utf-8').readlines()
    # except Exception as e:
    #     print(e)
    #     print("当前文件")
    #     print(txt_file)
    #     sys.exit(0)
    return lines
def build_iterator(dataset, config):
    iter = DatasetIterater(dataset, config.batch_size, config.device)
    return iter
def get_time_dif(start_time):
    """获取已使用时间"""
    end_time = time.time()
    time_dif = end_time - start_time
    return timedelta(seconds=int(round(time_dif)))

def pkl_save(data,pkl_path):
    with open(pkl_path,'wb') as f:
        pickle.dump(data,f)
def pkl_load(f_path):
    with open(f_path,'rb') as f:
        return pickle.load(f)
def get_stop_words(f_path):
    file = open(f_path, 'rb').read().decode('utf-8').split('\r\n')
    return set(file)
if __name__ == '__main__':
    raw_dataPath = r"D:\迅雷下载\Compressed\news_tensite_xml.dat"
    output_path = "../output/files/"

    # TODO 以下可以运行

    # sg = Sougou(raw_dir=r"/home/stu/LRR/cw-caps-net-classifer/utils/files/",output_dir="./output/sougouCS/")
    # sg.split_language()

    # TODO 下面的语句12月3日测试不行
    # split(raw_dataPath,output_path)

    sougoCS_recude="/home/stu/Documents/dataset/sougo/sougoCS_reduce_cat"
    # TODO 构建数据集的完整流程
    sougou_dator = SougouDataset(sougouCS_dir=sougoCS_recude)
    # sougou_dator.readAllFile_CS()
    
    # sougou_dator.readAllFile() # 读取所有文件
    sougou_dator.split_remove() # 分词去除停用词
    sougou_dator.make_vocab() # 创建word2vec词表
    sougou_dator.make_dataset() # 创建数据集
    sougou_dator.getDataset()


    # # 读取已分词并且去除停用词文件，将其写入
    # all_data = pkl_load(processed_dir / 'split_all_sentence.pkl')
    # print(type(all_data),len(all_data))
    # for i,data in enumerate(all_data):
    #     if i == 2:
    #         break
    #     # with open('sogoucs_finish.txt')
    #     print(data[0],''.join(data[1].split(' ')))
    pass


