import configparser
import numpy as np
import pandas as pd
import community_vos as community
import networkx as nx
from pyecharts.charts import Graph
from pyecharts import options as opts
from pyecharts.globals import CurrentConfig
from collections import defaultdict
from sklearn import preprocessing


CurrentConfig.ONLINE_HOST = "assets/"
# Read parameters from conf.ini
config = configparser.ConfigParser()
config.read('./conf.ini',encoding='utf-8')
step2_config = config['step2']

RANDOM_SEED = int(step2_config['RANDOM_SEED'])
DECAY_STEPS = int(step2_config['DECAY_STEPS'])
inputPath = step2_config['input_path']
nodes_num = int(step2_config['nodes_num'])
edge_num = int(step2_config['edge_num'])
MAX_ITERATIONS = int(step2_config['MAX_ITERATIONS'])
INITIAL_STEP_SIZE = float(step2_config['INITIAL_STEP_SIZE'])
STEP_SIZE_REDUCTION = float(step2_config['STEP_SIZE_REDUCTION'])
STEP_SIZE_CONVERGENCE = float(step2_config['STEP_SIZE_CONVERGENCE'])
output_graph_path = step2_config['output_graph_path']
output_json_Path = step2_config['output_json_Path']
output_xlsx_path = step2_config['output_xlsx_path']
output_cluster_path = step2_config['output_cluster_path']


def read_matrix(path):
    matrix = pd.read_csv(path, sep='\t', header=None, index_col=0)
    i = 0
    label = {}
    drops_index = []
    drops_cols = []
    for index in matrix.index:
        if sum(matrix.loc[index]) == 0:
            drops_index.append(index)
            drops_cols.append(i)
        i += 1
    matrix.drop(drops_index, inplace=True)
    matrix.drop(drops_cols, axis=1, inplace=True)
    for index, word in enumerate(matrix.index):
        label[index] = word
    return matrix, label


def read_txt(path):
    file = open(path, 'r', encoding='utf-8')
    lines = [line.strip().strip(';').split(';') for line in file]
    return lines


def get_matrix(lines, nodes_num):
    '''
    将列表形式的txt转化为高频词共现矩阵
    :param path:
    :param nodes_num:高频词个数
    :return:
    '''
    dict_word_freq = defaultdict(int)
    totoal_num = len(lines)  # 总记录数
    # print('一共有{}条记录'.format(totoal_num))
    for line in lines:
        for l in line:
            if l:
                dict_word_freq[l] += 1
    # print('一共有{}个词'.format(len(dict_word_freq)))
    sorted_dict = sorted(dict_word_freq.items(), key=lambda item: item[1], reverse=True)
    # print('词频最低为', sorted_dict[-1])
    # print('词频最高为', sorted_dict[0])
    freq = sorted_dict[nodes_num - 1][1] if len(dict_word_freq) >= nodes_num else sorted_dict[-1][1]  # 获取第500个字符的频次
    word_list = [item[0] for item in sorted_dict if item[1] >= freq]
    freqs = [item[1] for item in sorted_dict if item[1] >= freq]
    # print('选择词频大于等于{}的词，共有{}个词'.format(freq, len(word_list)))

    dict_k_w = defaultdict(int)
    matrix = pd.DataFrame(np.zeros((len(word_list), len(word_list))), columns=word_list, index=word_list)
    for line in lines:
        line = list(set(line) & set(word_list))
        for i in range(0, len(line) - 1):
            for j in range(i + 1, len(line)):
                dict_k_w[frozenset([line[i], line[j]])] += 1

    for key, value in dict_k_w.items():
        key = list(key)
        matrix.loc[key[0], key[1]] = value
        matrix.loc[key[1], key[0]] = value

    matrix = matrix.loc[(matrix.sum(axis=1) != 0), (matrix.sum(axis=0) != 0)]  # 去除和其他所有点都没有共现关系的点
    series_a = matrix.sum(axis=1)
    for index in matrix.index:
        for col in matrix.columns:
            matrix.loc[index, col] = matrix.loc[index, col] / (series_a[index] * series_a[col])
    labels = dict((index, item) for index, item in enumerate(matrix.index))
    return matrix, labels, freqs


def keep_norm(init_data, k=1):
    total = 0
    for i in range(len(init_data)):
        for j in range(i, len(init_data)):
            total += np.linalg.norm(init_data[i] - init_data[j])
    total = total / k
    return init_data / total


def init(m):
    np.random.seed(RANDOM_SEED)
    init_data = np.random.rand(m, 2)
    return keep_norm(init_data, init_data.shape[0] * (init_data.shape[0] - 1) / 2.0)


def train_each(init_data, matrix, times, INITIAL_STEP_SIZE=1, STEP_SIZE_REDUCTION=0.75, DECAY_STEPS=1):
    for k in range(times):
        new_rate = INITIAL_STEP_SIZE * STEP_SIZE_REDUCTION ** (k / DECAY_STEPS)
        rate = new_rate if new_rate > 0.0001 else 0.0001
        for j in range(len(init_data)):
            tmpdata = np.dot(matrix.iloc[j], init_data) / np.sum(matrix.iloc[j])
            init_data[j] = init_data[j] - rate * (init_data[j] - tmpdata)
        init_data = keep_norm(init_data, init_data.shape[0] * (init_data.shape[0] - 1) / 2.0)
        if rate == 0.001:
            return init_data
    return init_data


def get_graph_data(matrix, coordinate, label, freqs, edge_num=500):
    nodes = []
    links = []
    norm_freqs = preprocessing.scale(freqs)
    size_freqs = preprocessing.minmax_scale(freqs)

    matrix = matrix / 10.0
    G = nx.Graph()
    total = matrix.shape[0]
    for i in range(total - 1):
        for j in range(i + 1, total):
            if matrix.iloc[i, j]:
                G.add_edge(label[i], label[j], weight=matrix.iloc[i, j])
                links.append({"source": label[i], "target": label[j], "value": matrix.iloc[i, j]})
    clusters = community.best_partition(G, lamda=0.001)

    categories = set(clusters.values())
    for k, v in label.items():
        nodes.append({
            "x": coordinate[k][0],
            "y": coordinate[k][1],
            "id": v,
            "name": v,
            "category": clusters[v],
            "symbolSize": 15 + 10 * round(norm_freqs[k], 2),
            "label": {
                "fontSize": 10 + 10 * round(size_freqs[k], 2)
            }
        })
    links = sorted(links, key=lambda item: item['value'], reverse=True)[:edge_num]
    categories = [{"name": str(i)} for i in categories]
    return nodes, links, categories


def draw(nodes, links, categories, outputPath):
    (
        Graph(init_opts=opts.InitOpts(width="auto", height="1000px"))
        .add(
            series_name="",
            nodes=nodes,
            categories=categories,
            links=links,
            layout="none",
            is_roam=True,
            is_focusnode=True,
            label_opts=opts.LabelOpts(is_show=True, position="inside"),
            linestyle_opts=opts.LineStyleOpts(width=0.5, curve=0.3, opacity=0.7),
        )
        .set_global_opts(title_opts=opts.TitleOpts(title="主题分布"),
                         # visualmap_opts=opts.VisualMapOpts(
                         #     min_=0,
                         #     max_=1,
                         #     range_text=["High", "Low"],
                         #     is_calculable=True,
                         # )
                         )
        .render(outputPath)
    )


def data2json(nodes,links,categories,outputPath):
    import json
    with open(outputPath, 'w', encoding='utf-8') as f:
        f.write(json.dumps({"nodes": nodes, "links": links, "categories": categories}, indent=4, ensure_ascii=False))


def json2html(inputPath, outputPath):
    import json
    lines = open(inputPath, 'r', encoding='utf-8').read()
    contents = json.loads(lines)
    nodes = contents['nodes']
    links = contents['links']
    categories = contents['categories']
    draw(nodes, links, categories, outputPath)


def data2heatmapJson(inputPath, outputPath, nodes_num=300, edge_num=500, MAX_ITERATIONS=100, INITIAL_STEP_SIZE=1,
                     STEP_SIZE_REDUCTION=0.75):
    import json
    lines = read_txt(inputPath)
    matrix, label, freq = get_matrix(lines, nodes_num)
    init_data = init(matrix.shape[0])
    coordinate = train_each(init_data, matrix, MAX_ITERATIONS, INITIAL_STEP_SIZE, STEP_SIZE_REDUCTION)
    nodes, links, categories = get_graph_data(matrix, coordinate, label, freq, edge_num)
    new_nodes = []
    new_links = []
    colors = ['yellow', 'blue', 'red', 'black']
    import random
    for i in range(len(nodes)):
        node = nodes[i]
        k = random.randint(0, 3)
        new_nodes.append({"id": i, "label": node['name'], "x": node["x"], "y": node["y"], "size": node['symbolSize'],
                          "color": colors[k], "cluster": k})

    links_min = min([link['value'] for link in links])
    for link in links:
        new_links.append({"sourceID": link["source"],
                          "targetID": link['target'],
                          "size": link['value'] / links_min})

    sourceData = []
    for i in range(1):
        sourceData.append({
            "clusterNum": 4 + i,
            "nodes": new_nodes,
            "edges": new_links
        })

    with open(outputPath, 'w', encoding='utf-8') as f:
        f.write(json.dumps(sourceData, ensure_ascii=False, indent=4))


def main(inputPath, output_graph_path,output_json_Path,output_xlsx_path,output_cluster_path, nodes_num=300, edge_num=500, MAX_ITERATIONS=100, INITIAL_STEP_SIZE=1,
         STEP_SIZE_REDUCTION=0.75):
    print("开始主进程")
    
    # 读取文本数据
    print(f"从 {inputPath} 读取文本数据")
    lines = read_txt(inputPath)
    print("文本数据读取完成")
    
    # 生成共现矩阵
    print("生成共现矩阵")
    matrix, label, freq = get_matrix(lines, nodes_num)
    print("共现矩阵生成完成")
    
    print(f"保存矩阵到 {output_xlsx_path}")
    matrix.to_excel(output_xlsx_path)
    print("矩阵已保存到 Excel")
    
    # 初始化数据
    print("初始化数据")
    init_data = init(matrix.shape[0])
    print("数据初始化完成")
    
    # 训练模型
    print(f"开始训练模型，最大迭代次数为 {MAX_ITERATIONS}")
    coordinate = train_each(init_data, matrix, MAX_ITERATIONS, INITIAL_STEP_SIZE, STEP_SIZE_REDUCTION)
    print("模型训练完成")
    
    # 生成图数据
    print("生成图数据")
    nodes, links, categories = get_graph_data(matrix, coordinate, label, freq, edge_num)
    print("图数据生成完成")
    
    # 绘制图形
    print("绘制图形")
    draw(nodes, links, categories, output_graph_path)
    print("图绘制完成")
    
    # 保存数据到 JSON
    print(f"保存数据到 {output_json_Path}")
    data2json(nodes,links,categories,output_json_Path)
    print("数据已保存到 JSON")
    
    # 保存矩阵到 Excel

    # 保存聚类数据
    print(f"保存聚类数据到 {output_cluster_path}")
    file = open(output_cluster_path,'w',encoding='utf-8')
    for node in nodes:
        file.write(f'{node["name"]}\t{node["category"]}\n')
    print("聚类数据已保存")
    
    print("主进程完成")


if __name__ == '__main__':
    main(inputPath, output_graph_path,output_json_Path,output_xlsx_path,output_cluster_path, nodes_num,edge_num, MAX_ITERATIONS, INITIAL_STEP_SIZE, STEP_SIZE_REDUCTION)

