import os
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from matplotlib.colors import LinearSegmentedColormap
import numpy as np
plt.rcParams['font.sans-serif']=['SimHei'] #Show Chinese label
# plt.rcParams['axes.unicode_minus']=False   #These two lines need to be set manually


def read_excel(src_path):
    sheet_names = ['第七届（2014）', '第八届（2018）', '第九届（2022)']
    area_names = ['东部', '中部', '西部', '东北']
    all_titles = {}
    for sheet_name in sheet_names:
        year_name = sheet_name[:3]
        all_titles[year_name] = {}
        df_year = pd.read_excel(src_path, sheet_name)
        for area_name in area_names:
            area_titles = []
            df_area_list = df_year[area_name].to_list()
            for title in df_area_list:
                if isinstance(title, str):
                    area_titles.append(title)
            all_titles[year_name][area_name] = area_titles
    return all_titles


def count_words_freqs(word_list, content_list, relevance_thre=20):
    word_freqs = {}
    relevance_freqs = {}
    # initialize word_freqs
    for i in range(len(word_list)):
        word_freqs[word_list[i]] = 0
        for j in range(i + 1, len(word_list)):
            relevance_freqs[word_list[i] + '_' + word_list[j]] = 0

    content_num = len(content_list)
    # iterate content
    for content in content_list:
        # check current content contains which words
        contain_words = []
        for word in word_list:
            if word in content:
                contain_words.append(word)

        # Add frequency to contain_words and word group
        for i in range(len(contain_words)):
            word_freqs[contain_words[i]] += 1 / content_num
            for j in range(i + 1, len(contain_words)):
                relevance_freqs[contain_words[i] + '_' + contain_words[j]] += 1 / content_num

    # Delete relevance less than freq_thre
    all_relevance_keys = list(relevance_freqs.keys())
    for key in all_relevance_keys:
        if relevance_freqs[key] < relevance_thre:
            relevance_freqs.pop(key)

    # print max frequency
    max_freq = 0.01
    for key, freq in word_freqs.items():
        if freq > max_freq:
            max_freq = freq
    print('Max frequency:', max_freq)
    return word_freqs, relevance_freqs


def plot_relevance1(img_path: str, all_keywords: list, keywords_freqs: dict, relevance_freqs: dict, max_freq=1):
    # decide positions of each keyword point
    word_num = len(all_keywords)
    word_points = np.zeros((word_num, 2), dtype=float)
    radius = 1
    center = 1
    shift_degree = np.pi / 2
    word_points[0, :] = [center, center]
    for i in range(word_num - 1):
        word_points[i + 1, 0] = np.cos((i / (word_num - 1)) * 2 * np.pi + shift_degree) * radius + center
        word_points[i + 1, 1] = np.sin((i / (word_num - 1)) * 2 * np.pi + shift_degree) * radius + center

    # point colors are evenly spaced in a color map
    point_colors = np.arange(word_num) / (word_num - 1)
    point_colors = np.flip(point_colors)
    color_map = 'rainbow'
    # point size
    point_sizes = []
    for i in range(word_num):
        point_size = keywords_freqs[all_keywords[i]] * 300
        point_sizes.append(point_size)

    fig, ax = plt.subplots(figsize=(8, 6))
    # plot keyword points
    # plt.scatter(word_points[:, 0], word_points[:, 1], s=point_sizes, c=point_colors, cmap=color_map, zorder=2)
    for i in range(word_num):
        ax.scatter(word_points[i, 0], word_points[i, 1], s=point_sizes[i], c=point_colors[i],
                    cmap=color_map, vmin=0, vmax=1, zorder=2, label=all_keywords[i])
        ax.annotate(all_keywords[i], (word_points[i, 0]+0.05, word_points[i, 1]-0.02), fontsize=12)
    # plot keywords lines
    for i in range(word_num):
        for j in range(i + 1, word_num):
            relevance_key = all_keywords[i] + '_' + all_keywords[j]
            if relevance_key not in relevance_freqs:
                continue
            ij_freq = relevance_freqs[relevance_key]
            if ij_freq > 0:
                line_width = ij_freq / max_freq * 12
                x_ij = [word_points[i, 0], word_points[j, 0]]
                y_ij = [word_points[i, 1], word_points[j, 1]]
                ax.plot(x_ij, y_ij, color='gray', linewidth=line_width, markersize=0, zorder=1)

    ax.set_xlim([-0.1, 2.1])
    ax.set_ylim([-0.1, 2.1])
    ax.set_aspect('equal', 'box')
    ax.set_axis_off()
    # ax.legend(loc='right')
    # ax.legend(loc=(2.5, 0))
    fig.tight_layout()
    fig.savefig(img_path)
    plt.close()


def plot_relevance2(img_path: str, all_keywords: list, keywords_freqs: dict, relevance_freqs: dict, max_freq=1):
    word_num = len(all_keywords)
    bin_width = 2
    cmap0 = plt.colormaps['Purples']
    color_list = []
    for i in range(5):
        color_list.append(cmap0((i+1) / 5))
    cmap = LinearSegmentedColormap.from_list("mycmap", color_list).resampled(5)

    x_labels = all_keywords.copy()
    y_labels = all_keywords.copy()
    y_labels.reverse()

    # Decide color according to frequencies
    relevance_mesh = np.ones((word_num, word_num, 3), dtype=float)
    for i in range(word_num):
        word_x = x_labels[i]
        for j in range(word_num - i):
            word_y = y_labels[j]
            word_xy = word_x + '_' + word_y
            if word_x == word_y:
                relevance_mesh[j, i] = np.array(cmap(keywords_freqs[word_x]))[:3]
            elif word_xy in relevance_freqs:
                relevance_mesh[j, i] = np.array(cmap(relevance_freqs[word_xy] / max_freq))[:3]

    for i in range(word_num):
        if len(x_labels[i]) > 2:
            x_labels[i] = x_labels[i][:2] + '\n' + x_labels[i][2:]

    fig, ax = plt.subplots(figsize=(8, 6))
    label_positions = (np.arange(len(all_keywords)) + 0.5) * bin_width
    ax.set_xticks(label_positions, labels=x_labels)
    ax.set_yticks(label_positions, labels=y_labels)

    x_mesh = np.arange(word_num + 1) * bin_width
    y_mesh = np.arange(word_num + 1) * bin_width
    ax.pcolormesh(x_mesh, y_mesh, relevance_mesh)
    plt.colorbar(plt.cm.ScalarMappable(norm=Normalize(0, 1), cmap=cmap), ax=ax, label="Frequency")

    ax.set_aspect('equal', 'box')
    fig.tight_layout()
    fig.savefig(img_path)
    plt.close()


def main():
    src_path = './input/副本九届职教成果奖汇总.xlsx'
    dst_dir = './output/lines'
    # dst_dir = './output/squares'
    # all_keywords = ['实践', '培养', '高职', '专业', '创新', '模式', '人才培养', '探索', '教学', '体系',
    #                 '研究', '改革', '基于', '教育', '职业', '技术', '建设', '构建', '育人', '教育']
    all_keywords = ['实践', '培养', '高职', '专业', '创新', '模式', '人才培养', '探索', '体系',
                    '研究', '改革', '建设', '协同', '融合']

    if not os.path.exists(dst_dir):
        os.mkdir(dst_dir)

    all_titles = read_excel(src_path)
    for year_name, year_titles in all_titles.items():
        for area_name, area_titles in year_titles.items():
            img_name = year_name + '_' + area_name
            print(img_name)
            img_path = os.path.join(dst_dir, img_name + '.png')
            keywords_freqs, relevance_freqs = count_words_freqs(all_keywords, area_titles, relevance_thre=0)
            plot_relevance1(img_path, all_keywords, keywords_freqs, relevance_freqs, max_freq=1)
            # plot_relevance2(img_path, all_keywords, keywords_freqs, relevance_freqs, max_freq=1)



if __name__ == '__main__':
    main()
