import json
from math import log

import numpy as np
import pandas as pd
from collections import defaultdict, Counter
import networkx as nx
from mpmath import ln


def load_titles(file_path):
    """
    从CSV文件中加载教师的职称信息。
    """
    data = pd.read_csv(file_path)
    return dict(zip(data['Name'], data['Professional Titles']))


def load_papers_from_csv(file_path):
    """
    从CSV文件中读取论文数据，并转换为特定的字典列表格式。
    """
    data = pd.read_csv(file_path)
    return [{"title": paper['Title'], "authors": paper['Authors']} for paper in data.to_dict(orient='records')]


def categorize_title(title):
    """
    根据职称将教师分为三类。
    """
    if '副教授' == title or '副研究员' == title:
        return '副教授/副研究员'
    elif '教授' == title or '研究员' == title:
        return '教授/研究员'
    else:
        return '助理研究员/讲师/实验（工程）师'


# 加载教师职称信息
titles_file_path = '../result_CNKI/result/teachers.csv'
titles = load_titles(titles_file_path)

# 加载论文数据
papers = []
file_paths = [
    '../result_sci/result/006删除了非工程中心的老师.csv',
    '../result_CNKI/result/003删除了非工程中心的老师.csv',
    '../result_CNKI_ZL/filtered_output.csv'
]
for file_path in file_paths:
    papers.extend(load_papers_from_csv(file_path))

# 解析教师数据
teachers = set()
collaborations = defaultdict(Counter)
teacher_counts = Counter()

for paper in papers:
    paper['authors'] = str(paper['authors'])
    if paper['authors'] == 'nan':
        continue
    authors = [author.strip() for author in paper['authors'].split(';')]
    teachers.update(authors)
    for author in authors:
        teacher_counts[author] += 1
    for author1 in authors:
        for author2 in authors:
            if author1 != author2:
                collaborations[author1][author2] += 1

# 创建图
G = nx.Graph()

# 添加节点
for idx, teacher in enumerate(teachers):
    title = titles.get(teacher, "未知")
    category = categorize_title(title)
    symbol_size = 10 + teacher_counts[teacher]
    G.add_node(idx, name=teacher, symbolSize=symbol_size, category=category)

# 添加边
for author, partners in collaborations.items():
    source_id = next(node for node, data in G.nodes(data=True) if data['name'] == author)
    for partner, count in partners.items():
        target_id = next(node for node, data in G.nodes(data=True) if data['name'] == partner)
        G.add_edge(source_id, target_id, weight=count)

# 使用spring_layout算法，根据边的权重自动布局
pos = nx.spring_layout(G, weight='weight', seed=1, k=0.2)

# 调整节点位置以考虑symbolSize
max_size = max(data['symbolSize'] for node, data in G.nodes(data=True))
scale_factor = max_size * 2  # 根据最大symbolSize设置缩放因子

for node, (x, y) in pos.items():
    G.nodes[node]['x'] = x * 1000 + np.random.rand() * scale_factor
    G.nodes[node]['y'] = y * 1000 + np.random.rand() * scale_factor

# 更新节点的位置信息
for node, data in G.nodes(data=True):
    data['x'] = pos[node][0]
    data['y'] = pos[node][1]

# 创建节点和边的数据结构
nodes = [{
    "id": node,
    "name": data['name'],
    "symbolSize": log(data['symbolSize']),
    "x": data['x'],
    "y": data['y'],
    "value": teacher_counts[data['name']],
    "category": data['category']
} for node, data in G.nodes(data=True)]

links = [{
    "source": u,
    "target": v,
    "value": d['weight']
} for u, v, d in G.edges(data=True)]

# 输出JSON格式数据
categories = [{'name': '教授/研究员'}, {'name': '副教授/副研究员'}, {'name': '助理研究员/讲师/实验（工程）师'}]
graph_data = {"nodes": nodes, "links": links, "categories": categories}

# 将数据写入JSON文件
with open('teachers.json', 'w', encoding='utf-8') as file:
    json.dump(graph_data, file, ensure_ascii=False, indent=4)