import jieba
import jieba.analyse
import jieba.posseg
from sklearn.model_selection import KFold

import our_global
import persistence
from preprocess.base_preprocess import BasePreprocess
import os
import re
from functools import reduce
from xml.dom.minidom import parse
import matplotlib.pyplot as plt
import numpy as np

import warnings

warnings.filterwarnings('ignore')  # 警告扰人，手动封存

from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
from sklearn.decomposition import PCA, TruncatedSVD


class YyzPreprocess(BasePreprocess):
    persistence_model = persistence.Persistence("yyz")

    def get_pca_xy(self):
        x_list = []
        y_list = []
        try:

            x_list_deal = self.persistence_model.get("x_list_deal")
            y_list = self.persistence_model.get("y_list")
            words = self.persistence_model.get("words")

        except Exception as e:
            corpus_names = self.read_corpus_name()
            print(corpus_names)
            for c in corpus_names:
                file_name = f"{our_global.work_place}/{c}"
                # 使用minidom解析器打开 XML 文档
                dom_tree = parse(file_name)
                collections = dom_tree.documentElement
                # 获取 XML 文档中的所有段落
                paras = collections.getElementsByTagName("pragraph")
                for para in paras:
                    content = para.getAttribute("content")
                    # print(content)
                    # res = re.findall(r'.[\d]、', content)
                    #
                    # print(res)
                    content_s = re.split(r'[，。！？；]', content)
                    content_s = list(filter(lambda x: x != "", content_s))

                    print(content_s)
                    for ptr in range(len(content_s) - 1):
                        current_sent = content_s[ptr]
                        next_sent = content_s[ptr + 1]
                        print(current_sent)
                        print(next_sent)
                        # 把这两个子句加入x list
                        x_fst = re.sub('\d*、', "", current_sent)
                        x_scd = re.sub('\d*、', "", next_sent)
                        x = f"{x_fst}\t{x_scd}"
                        x_list.append(x)
                        # 如果第二句是数字、开头，说明这两个子句间是要被分割的，标注为1
                        if re.findall('^\d、', next_sent):
                            y = 1
                            y_list.append(y)
                        else:
                            y = 0
                            y_list.append(y)
            x_list_deal = []
            for x in x_list:
                print(x)
                x_spl = x.split()
                x_st = x_spl[0]
                x_sec = x_spl[1]

                def get_deal_out(sent):
                    sent_seg = jieba.posseg.cut(sent.strip())
                    out = ""

                    for s in sent_seg:
                        out += "{}/{}/".format(s.word, s.flag)
                    out += "none/none/none/none/none/none"
                    out = re.findall(r'.*?/.*?/.*?/.*?/.*?/.*?/', out)
                    out = out[0]
                    return out

                out = get_deal_out(x_st)
                out += get_deal_out(x_sec)

                print(out)
                x_list_deal.append(out)
            print(x_list_deal)
            self.persistence_model.save("x_list_deal", x_list_deal)
            self.persistence_model.save("y_list", y_list)
            words = []
            for x in x_list_deal:
                print(x)
                x_s = x.split('/')
                for w in x_s:
                    # 加入字
                    for c in w:
                        if c not in words:
                            words.append(c)
                    # 加入词
                    if w not in words:
                        words.append(w)
            print(words)
            self.persistence_model.save("words", words)

        try:
            embeddings_index = self.persistence_model.get("embeddings_index")

        except Exception as e:
            w2v_model = f"{our_global.root}/model/sgns.target.word-character.char1-2.dynwin5.thr10.neg5.dim300.iter5"
            f = open(w2v_model, 'r', encoding='utf-8')
            embeddings_index = {}
            for line in f:
                values = line.split()
                word = values[0]
                if word in words:
                    embeddings_index[word] = np.asarray(values[1:], dtype='float32')
            f.close()
            self.persistence_model.save("embeddings_index", embeddings_index)
            print(embeddings_index)

        try:
            x_list_embed = self.persistence_model.get("x_list_embed")

        except Exception as e:
            for x in x_list_deal:
                print(x)
                x_s = x.split('/')
                x_s = list(filter(lambda x: x != "", x_s))
                print(x_s)
                ele_x = []
                for w in x_s:
                    if w in embeddings_index:
                        embed = embeddings_index[w]
                        ele_x.append(embed)
                    else:
                        c = w[0]
                        embed = embeddings_index[c]
                        for index in range(1, len(w)):
                            embed += embeddings_index[w[index]]
                            embed = embed / len(w)
                        ele_x.append(embed)
                x_list_embed.append(ele_x)
            print(x_list_embed)
            self.persistence_model.save("x_list_embed", x_list_embed)

        try:
            x_list_pool = self.persistence_model.get("x_list_pool")

        except:
            x_list_embed_pca = []

            def pca(weight, n=100):
                p = PCA(n_components=n)
                r = p.fit_transform(weight)
                return r

            for x in x_list_embed:
                pca_x = pca(x, 12)
                x_list_embed_pca.append(pca_x)

            x_list_pool = []
            for x in x_list_embed_pca:
                x = np.array(x)
                r_x = x.reshape(1, -1)[0]
                x_list_pool.append(r_x)

            print(x_list_pool[0])
            self.persistence_model.save("x_list_pool", x_list_pool)

        kf = KFold(n_splits=5)
        for train, test in kf.split(x_list_pool):
            x_list_pool = np.array(x_list_pool)
            y_list = np.array(y_list)
            print("%s-%s" % (train, test))
            train_x = x_list_pool[train]
            test_x = x_list_pool[test]
            train_y = y_list[train]
            test_y = y_list[test]

            print(len(train_x), len(train_y))
            print(len(test_x), len(test_y))

            return train_x, train_y, test_x, test_y

    def get_xy(self):
        x_list_embed = self.persistence_model.get("x_list_embed")
        y_list = self.persistence_model.get("y_list")
        x_list_pool = []
        for x in x_list_embed:
            x = np.array(x)
            r_x = x.reshape(1, -1)[0]
            x_list_pool.append(r_x)
        kf = KFold(n_splits=5)
        for train, test in kf.split(x_list_pool):
            x_list_pool = np.array(x_list_pool)
            y_list = np.array(y_list)
            print("%s-%s" % (train, test))
            train_x = x_list_pool[train]
            test_x = x_list_pool[test]
            train_y = y_list[train]
            test_y = y_list[test]

            print(len(train_x), len(train_y))
            print(len(test_x), len(test_y))

            return train_x, train_y, test_x, test_y


if __name__ == '__main__':
    YyzPreprocess().get_pca_xy()
