"""数据预处理"""
import codecs
import os
from pathlib import Path
import pickle
from gensim.models import KeyedVectors
from tqdm import tqdm_notebook, tqdm
from collections import defaultdict
import re
import jieba
import pyodbc
import numpy as np
import pandas as pd
import copy
# from word2vec import save_vocab
from data.verse.word2vec import save_vocab, save_w2v_model
from utils import WordEmbedding, add_pad_unk
from utils.util import ensure_dir


def read_accdb():
    # 读取accdb语料库
    file_path = r"D:\XMU\1-4.大四\大四下\毕业设计\语料库\5in1唐宋诗词汇总.accdb"  # file_path是access文件的绝对路径。
    # file_path = r"G:\OneDrive - Seagroup\mina\5in1唐宋诗词汇总.accdb"  # file_path是access文件的绝对路径。
    tablename = ["全唐诗jt", "全宋诗jt", "全唐诗补编_简_ok"]  # tablename是access数据库中的表名
    conn = pyodbc.connect(u'Driver={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=' + file_path)  # 链接数据库
    cursor = conn.cursor()  # 创建游标

    cursor.execute("select [author],[verse_zl] from [{}] where [author]<>'' order by [author]".format(tablename[0]))
    data = cursor.fetchall()  # 获取数据库中表的全部数据，n行2列
    cursor.execute(
        "select [author],[verse_zlk_BDJZ] from [{}] where [author]<>'' order by [author]".format(tablename[1]))
    data.extend(cursor.fetchall())  # 加入全宋诗的诗集
    cursor.execute("select [author],[verse_zlk] from [{}] where [author]<>'' order by [author]".format(tablename[2]))
    data.extend(cursor.fetchall())  # 加入全宋诗的诗集

    # # 查找特定作者的诗
    # cursor.execute("select [author],[verse_zl] from [{}] where [author]='李白'".format(tablename[0]))
    # data = cursor.fetchall()  # 获取数据库中表的全部数据，n行2列
    # cursor.execute(
    #     "select [author],[verse_zlk_BDJZ] from [{}] where [author]='李白'".format(tablename[1]))
    # data.extend(cursor.fetchall())  # 加入全宋诗的诗集
    # cursor.execute("select [author],[verse_zlk] from [{}] where [author]='李白'".format(tablename[2]))
    # data.extend(cursor.fetchall())  # 加入全宋诗的诗集

    cursor.close()  # 关闭游标
    conn.close()  # 关闭链接

    # save_txt(data)
    # sift_wrong_data(data)
    # sift(data, 10)

    prepare_data()
    return data


def save_txt(data):
    normal_dir = 'raw_data/normal_content_by_author'
    abnormal_dir = 'raw_data/abnormal_content_by_author'
    ensure_dir(normal_dir)
    ensure_dir(abnormal_dir)
    for verse in data:
        if len(verse[0]) < 2:  # 不规范数据or包含非法字符
            continue  # 放弃该首不规范/不完整的诗

        flag = True  # 正常的
        if -1 != verse[1].find('□'):
            flag = False
        for j in range(26):
            if -1 != verse[1].find(chr(ord('A') + j)) or -1 != verse[1].find(chr(ord('a') + j)):
                flag = False  # 放弃该首不规范/不完整的诗
                break
        if flag is False:
            # 不正常
            with codecs.open('{}/{}.txt'.format(abnormal_dir, verse[0]), 'a', 'utf-8') as f:
                f.write(verse[1].replace('\n', '') + '\n')
        else:
            with codecs.open('{}/{}.txt'.format(normal_dir, verse[0]), 'a', 'utf-8') as f:
                f.write(verse[1].replace('\n', '') + '\n')
        # if len(last_author) < 2:
        #     last_author = verse[0]
        # with codecs.open('raw_data/content_by_author/{}.txt'.format(verse[0]), 'a', 'utf-8') as f:
        #     f.write(verse[1].replace('\n', '') + '\n')


# 筛选出存在不规范字符的诗
def sift_wrong_data(data):
    ensure_dir('raw_data/content_by_author/')
    libai_wrong_content = []
    for i in range(len(data)):
        verse = data[i]
        if verse[0] == '李白':
            verse[1] = verse[1].replace('\n', '')
            if -1 != verse[1].find('□'):  # 不规范数据or包含非法字符
                libai_wrong_content.append(verse[1])
                continue  # 放弃该首不规范/不完整的诗
            for j in range(26):
                if -1 != verse[1].find(chr(ord('A') + j)) or -1 != verse[1].find(chr(ord('a') + j)):
                    libai_wrong_content.append(verse[1])
                    break
    # 写入txt
    with codecs.open('raw_data/content_by_author/李白.txt', 'w', 'utf-8') as f:
        f.write('\n'.join(libai_wrong_content))
    print('success!')

def prepare_data():
    prepare_data_for_libai()
    # 从normal中挑选具有1000首诗的
    dirname = "raw_data/normal_content_by_author"
    to_dirname = "raw_data/merged_content_by_author"
    dirs = os.listdir(dirname)
    for dir in dirs:
        if dir == "." or dir == "..":
            continue
        with codecs.open("{}/{}".format(dirname, dir), 'r', 'utf-8') as f:
            data = f.read().split('\n')
            data.pop()
        if len(data) < 1000:
            continue
        with codecs.open("{}/{}".format(to_dirname, dir), 'w', 'utf-8') as f:
            f.write('\n'.join(data))

    return

def prepare_data_for_libai():
    data1, data2 = [], []
    # 读取txt文件
    with codecs.open('raw_data/fixed_abnormal_content_by_author/李白.txt', 'r', 'utf-8') as f:
        data1 = f.read().split('\n')
        data1.pop()
    with codecs.open('raw_data/normal_content_by_author/李白.txt', 'r', 'utf-8') as f:
        data2 = f.read().split('\n')
        data2.pop()

    # data1 data2 合成
    data1.extend(data2)

    # 补充到1000首
    lack = 1000 - len(data1)
    np.random.seed(None)
    id_list = list(np.random.randint(0, len(data1), lack))
    for i in id_list:
        data1.append(data1[i])

    # 写入txt
    ensure_dir("raw_data/merged_content_by_author/")
    with codecs.open('raw_data/merged_content_by_author/李白.txt', 'w', 'utf-8') as f:
        f.write('\n'.join(data1))


# 筛选
def sift(data, min_num):  # 古诗最少不能少于min_num
    all_verse = []  # 存储所有作者的诗，格式[[author1,[verse1,verse1,...]],[author2,[verse21,verse22,...]],...]]
    last_author = data[0][0]  # 上一首诗的作者,str
    single_author_verse = []  # 只存储单个作者的诗，格式[verse1,verse2]
    for i in range(len(data)):
        verse = data[i]
        if len(verse[0]) < 2 or -1 != verse[1].find('□'):  # 不规范数据or包含非法字符
            continue  # 放弃该首不规范/不完整的诗

        flag = True
        for j in range(26):
            if -1 != verse[1].find(chr(ord('A') + j)) or -1 != verse[1].find(chr(ord('a') + j)):
                flag = False  # 放弃该首不规范/不完整的诗
                break
        if flag is False:
            continue  # 放弃该首不规范/不完整的诗

        if len(last_author) < 2:
            last_author = verse[0]
        verse[1] = verse[1].replace('\n', '')
        if verse[0] != last_author or i == len(data) - 1:  # 上一作者的诗集全部遍历完成 or 最后一首诗
            if i == len(data) - 1:
                single_author_verse.append(verse[1])
            # 剔除作诗数量过少的作者，保留满足条件的
            if len(single_author_verse) >= min_num:  # 满足条件
                all_verse.append([last_author, single_author_verse])
                if last_author == '李白':
                    print('李白')
                with codecs.open('raw_data/content_by_author/{}.txt'.format(last_author), 'a', 'utf-8') as f:
                    f.write('\n'.join(single_author_verse))
            # 无论满不满足条件，都要更换成当前author
            single_author_verse = [verse[1]]
            last_author = verse[0]

        single_author_verse.append(verse[1])

    return all_verse


# 按文件分词，file to file
def word_cut_f2f(inputfile: str, outputfile: str, method: str):  # 分词方法method:['jieba','segtag','char']
    print('word_cut_f2f {} start!'.format(method))
    if method == 'jieba':
        content_wordcut_list = []
        with codecs.open(inputfile, 'r', 'utf-8') as in_file:
            for line in in_file:  # 按行分词
                tmp_wordcut_list = jieba.lcut(line.replace('\n', ''))  # 使用jieba分词
                content_wordcut = ' '.join(tmp_wordcut_list)  # 一首诗的分词结果，保留标点符号
                content_wordcut_list.append(content_wordcut)
        with codecs.open(outputfile, 'w', 'utf-8') as out_file:
            out_file.write(' \n'.join(content_wordcut_list))  # 将分词后的结果写入输出文件
    elif method == 'segtag':
        output = os.system(r'D:\XMU\1-4.大四\大四下\毕业设计\分词工具\segtag2017\segtag2017\segtag\segtag64.exe '
                           r'-g -s -v -r '
                           r'{} {}'.format(inputfile.replace('/', '\\'), outputfile.replace('/', '\\')))
        print('segtag分词输出：{}'.format(output))
    elif method == 'char':
        with codecs.open(inputfile, 'r', 'utf-8') as in_file:
            content_list = in_file.read().split('\n')
            content_charcut_list = []
            for i in range(len(content_list)):
                content_charcut_list.append(' '.join(list(content_list[i])))  # 按字分词
        with codecs.open(outputfile, 'w', 'utf-8') as out_file:
            out_file.write(' \n'.join(content_charcut_list))  # 将分词后的结果写入输出文件
    else:
        print('error:不支持method={}！'.format(method))
    print('word_cut_f2f {} finished!'.format(method))


# 训练词向量模型，并保存词向量到.word中
def train_wv_model():
    # 筛选出拥有10首诗以上的作者，专门用来生成词向量
    all_verse = sift(data, 1)
    all_content = []  # 所有的诗内容，只用来生成词向量
    f = codecs.open('../word_embedding/all_content.txt', 'w', 'utf-8')
    for verse in all_verse:
        if verse[0] == '李白':
            print('李白作诗：{}'.format(len(verse[1])))
        all_content.extend(verse[1])
    f.write('\n'.join(all_content))  # 所有作者的诗全部写入txt，诗之间用换行符分割
    f.close()

    # 使用3种方法进行分词并保存分词后的结果到txt
    ensure_dir('../word_embedding/content_wordcut_jieba')  # jieba分词后保存文件所在的目录
    ensure_dir('../word_embedding/content_wordcut_segtag')  # segtag分词后保存文件所在的目录
    ensure_dir('../word_embedding/content_wordcut_char')  # segtag分词后保存文件所在的目录
    word_cut_f2f(inputfile=r'..\word_embedding\all_content.txt',
                 outputfile='../word_embedding/content_wordcut_jieba/all_content_wordcut.txt',
                 method='jieba')
    word_cut_f2f(inputfile=r'..\word_embedding\all_content.txt',
                 outputfile=r'..\word_embedding\content_wordcut_segtag\all_content_wordcut.txt',
                 method='segtag')
    word_cut_f2f(inputfile=r'..\word_embedding\all_content.txt',
                 outputfile=r'../word_embedding/content_wordcut_char/all_content_wordcut.txt',
                 method='char')

    # 使用分词后的结果训练词向量模型
    ensure_dir('../word_embedding/model')  # 存放w2v模型
    save_w2v_model(corpus_dir='../word_embedding/content_wordcut_jieba',
                   modelfile="../word_embedding/model/all_verse_jieba.zh_word2vec",
                   size=96, window=2, min_count=10)
    save_w2v_model(corpus_dir='../word_embedding/content_wordcut_segtag',
                   modelfile="../word_embedding/model/all_verse_segtag.zh_word2vec",
                   size=86, window=3, min_count=10)
    save_w2v_model(corpus_dir='../word_embedding/content_wordcut_char',
                   modelfile="../word_embedding/model/all_verse_char.zh_word2vec",
                   size=76, window=4, min_count=10)

    # 读取出词库的所有词向量并保存到txt
    save_vocab(model_file="../word_embedding/model/all_verse_jieba.zh_word2vec",
               vocab_file='../word_embedding/verse.jieba.word')
    save_vocab(model_file="../word_embedding/model/all_verse_segtag.zh_word2vec",
               vocab_file='../word_embedding/verse.segtag.word')
    save_vocab(model_file="../word_embedding/model/all_verse_char.zh_word2vec",
               vocab_file='../word_embedding/verse.char.word')


# # 生成raw_data数据集
def gen_raw_data(saved_num: int):
    ensure_dir('raw_data/')
    sift_verse = sift(data, saved_num)  # 筛选出拥有1000首诗以上的作者

    # 保存所有标签到本地
    label_map_id = dict()  # 标签字典
    i = 0
    for verse in sift_verse:
        label_map_id[verse[0]] = i
        i = i + 1
    print('选出作者{}个, 作者名单：{}'.format(len(label_map_id), label_map_id.keys()))
    np.save('raw_data/label_map_id.npy', label_map_id)
    # label_map_id = np.load('label_map_id.npy', allow_pickle=True).item()  # 直接读取成字典

    # 将sift_content保存在txt中，便于用segtag2017分词生成分词后的数据集
    sift_content = []
    for verse in sift_verse:
        verse[1] = verse[1][:saved_num]  # 每个作者仅保留前saved_num首诗
        sift_content.extend(verse[1])
    with codecs.open('../word_embedding/sift_content.txt', 'w', 'utf-8') as f:
        f.write('\n'.join(sift_content))  # 每个作者1000首诗，全部写入txt，诗之间用换行符分割

    # 分词，得到分词后的内容
    ensure_dir('../word_embedding/content_wordcut_jieba')  # jieba分词后保存文件所在的目录
    ensure_dir('../word_embedding/content_wordcut_segtag')  # segtag分词后保存文件所在的目录
    ensure_dir('../word_embedding/content_wordcut_char')  # segtag分词后保存文件所在的目录
    word_cut_f2f(inputfile='../word_embedding/sift_content.txt',
                 outputfile='../word_embedding/content_wordcut_jieba/sift_content_wordcut.txt',
                 method='jieba')  # jieba分词
    word_cut_f2f(inputfile=r'..\word_embedding\sift_content.txt',
                 outputfile=r'..\word_embedding\content_wordcut_segtag\sift_content_wordcut.txt',
                 method='segtag')  # segtag分词
    word_cut_f2f(inputfile='../word_embedding/sift_content.txt',
                 outputfile='../word_embedding/content_wordcut_char/sift_content_wordcut.txt',
                 method='char')  # char分词
    # 对分词后的内容归类，以字典格式保存在本地
    sift_verse_jieba = copy.deepcopy(sift_verse)
    sift_verse_segtag = copy.deepcopy(sift_verse)
    sift_verse_char = copy.deepcopy(sift_verse)
    with codecs.open('../word_embedding/content_wordcut_jieba/sift_content_wordcut.txt', 'r', 'utf-8') as f:
        tmp_list = f.read().split('\n')
        i = 0
        for verse in sift_verse_jieba:
            verse[1] = tmp_list[i:i + saved_num]
            i = i + saved_num
    with codecs.open('../word_embedding/content_wordcut_segtag/sift_content_wordcut.txt', 'r', 'utf-8') as f:
        tmp_list = f.read().split('\n')
        i = 0
        for verse in sift_verse_segtag:
            verse[1] = tmp_list[i:i + saved_num]
            i = i + saved_num
    with codecs.open('../word_embedding/content_wordcut_char/sift_content_wordcut.txt', 'r', 'utf-8') as f:
        tmp_list = f.read().split('\n')
        i = 0
        for verse in sift_verse_char:
            verse[1] = tmp_list[i:i + saved_num]
            i = i + saved_num
    np.save('raw_data/sift_verse.npy', sift_verse)  # 保存筛选出来并清洗完成的数据
    np.save('raw_data/sift_verse_jieba.npy', sift_verse_jieba)  # 保存筛选出来并清洗完成的分词后的数据
    np.save('raw_data/sift_verse_segtag.npy', sift_verse_segtag)  # 保存筛选出来并清洗完成的分词后的数据
    np.save('raw_data/sift_verse_char.npy', sift_verse_char)  # 保存筛选出来并清洗完成的分词后的数据


def make_word_embedding(input_file: str, vocab_file: str, output_file: str):
    # 加载word embedding
    wv = KeyedVectors.load_word2vec_format(vocab_file, binary=False, encoding='utf-8', unicode_errors='ignore')
    in_data = np.load(input_file, allow_pickle=True)  # 直接读取成list。分词后的数据

    # 按词分
    word_set = set()
    for class_data in in_data:
        for content in class_data[1]:  # class_data[1]：单个类中的内容list
            word_set = word_set.union(set(content.split(' ')))

    stoi = defaultdict(int)
    itos = defaultdict(str)
    vectors = []
    add_pad_unk(stoi, itos, vectors, wv)
    for idx, word in enumerate(word_set):
        if word in wv.vocab:
            stoi[word] = len(stoi)
            itos[len(itos)] = word
            vectors.append(wv.get_vector(word))
    word_embedding = WordEmbedding(stoi=stoi, itos=itos, vectors=vectors)

    word_embedding_cache = Path(output_file).open('wb')
    pickle.dump(word_embedding, word_embedding_cache)
    word_embedding_cache.close()


if __name__ == '__main__':
    # # 从accdb中读取出所有的诗
    data = read_accdb()

    # # 针对全部语料训练词向量模型，并保存中间产物词向量
    # train_wv_model()
    #
    # # 筛选生成raw_data数据集（分词前+分词后），
    # gen_raw_data(saved_num=1000)
    #
    # # # 根据筛选后的数据集，生成该数据集中的词/字向量缓存
    # ensure_dir('../word_embedding/.cache/')
    # make_word_embedding(input_file='raw_data/sift_verse_jieba.npy',
    #                     vocab_file='../word_embedding/verse.jieba.word',
    #                     output_file='../word_embedding/.cache/verse_word_embedding.jieba.pkl')
    # make_word_embedding(input_file='raw_data/sift_verse_segtag.npy',
    #                     vocab_file='../word_embedding/verse.segtag.word',
    #                     output_file='../word_embedding/.cache/verse_word_embedding.segtag.pkl')
    # make_word_embedding(input_file='raw_data/sift_verse_char.npy',
    #                     vocab_file='../word_embedding/verse.char.word',
    #                     output_file='../word_embedding/.cache/verse_char_embedding.pkl')
