'''
Version: 0.0.1
Author: ider
Date: 2021-07-06 15:09:22
LastEditors: ider
LastEditTime: 2021-07-07 00:42:11
Description: 
'''
import csv
import json
from typing import Collection
from logic import miniworld
import json
import pdb
import numpy as np
import pymongo

fa_path = "/home/ni/data/wiki"

# remove 'Medicine', 'Economics', 'Linguistics', 'Geography', 
Subjects = ['Biology', 'Chemistry', 'Computer science', 'Engineering disciplines', 'Environmental science', 'Geology',
            'Materials science', 'Mathematics', 'Philosophy', 'Physics', 'Political science', 'Psychology', 'Sociology', ]

def get_all_linksout(file_path: str) -> dict:
    
    ret_dict = {}
    with open(file_path,"rt") as f:
        reader = csv.reader(f, delimiter='\t')

        for row in reader:
            page_list = json.loads(row[1])
            if page_list:
                ret_dict[int(row[0])] = set(page_list)
    return ret_dict


def get_all_linksout_filter(file_path: str, rangeset: set) -> dict:
    
    ret_dict = {}
    with open(file_path,"rt") as f:
        reader = csv.reader(f, delimiter='\t')
        for row in reader:
            key = int(row[0])
            page_list = json.loads(row[1])
            if page_list and key in rangeset:
                new_list = []
                for item in page_list:
                    if item in rangeset:
                        new_list.append(item)
                if new_list:
                    ret_dict[int(row[0])] = set(new_list)
    return ret_dict


def get_subject_page_ids(file_path: str) -> set:

    ret_set = set()
    with open(file_path,"rt")as f:
        for row in f:
            ret_set.add(int(row))
    return ret_set


def get_subject_page_ids_filter(file_path: str, rangeset:set) -> set:

    ret_set = set()
    all_count = 0
    with open(file_path,"rt")as f:
        for row in f:
            all_count += 1
            key = int(row)
            if key in rangeset:
                ret_set.add(key)
    return ret_set,all_count

  
def my_logic():
    
    """
    计算所有年份的小世界性质,结果一次性保存
    """
    ret_data = {}
    for year in range(2007, 2022):
        # year = 2020
        # path =f"/tmp{fa_path}/wdd/{year}/articleByCats/linksin_lv2_node_v5_newDB_new/all_linksout.txt"
        path = f"{fa_path}/wdd/{year}/articleByCats/linksin_lv2_node_v5_newDB_new/all_linksout.txt"
        all_links_out_dict = get_all_linksout(path)

        for subject in Subjects:
            ret_data.setdefault(subject, [])

            # path = f"/tmp{fa_path}/wdd/{year}/articleByCats/linksin_lv2_node_v5_newDB_new/arts_{subject}.txt"
            path = f"{fa_path}/wdd/{year}/articleByCats/linksin_lv2_node_v5_newDB_new/arts_{subject}.txt"
            subject_ids_set = get_subject_page_ids(path)

            # subject id 映射到网络
            node_subject_id_map = {}
            subject_node_id_map = {}
            i = 0 
            for subject_id in subject_ids_set:
                subject_node_id_map[subject_id] = i
                node_subject_id_map[i] = subject_id
                i += 1

            # 生成新的边映射
            nc  = 0
            edges = []
            for subject_id in subject_ids_set:
                node_id = subject_node_id_map[subject_id]

                if subject_id not in all_links_out_dict:
                    nc += 1
                    continue

                for target_id in all_links_out_dict[subject_id]:
                    if target_id in subject_ids_set:
                        target_node_id = subject_node_id_map[target_id]
                        edges.append([node_id, target_node_id])
            # print("subject_ids_set", len(subject_ids_set),nc)
            # print(len(edges), edges[0:2])
            mw = miniworld.Graph(edges,isDirected=True)

            avg_path = mw.get_avg_path()
            cc = mw.get_clustering_coefficient()
            nv = mw.get_num_vertices()
            ret_data[subject].append([avg_path, cc, nv, len(subject_ids_set)])

            print(subject, avg_path, cc, nv,  len(subject_ids_set))
        
    with open("/tmp/data.json",'wt')as f:
        json.dump(ret_data, f)


def my_logic_level(typePath="linksin_lv2_node_v5_newDB_new_noLiterature"):
    """
    计算所有年份的小世界性质,结果一次性保存
    """
    ret_data = {}
    for year in range(2007, 2022):
        for month in [3,6,9,12]:
            if year == 2021 and month >3:
                continue
            # year = 2020
            # path =f"/tmp{fa_path}/wdd/{year}/articleByCats/linksin_lv2_node_v5_newDB_new/all_linksout.txt"
            path = f"{fa_path}/wdd/{year}_{month}/articleByCats/linksin_lv2_node_v5_newDB_new_noLiterature/all_linksout.txt"
            all_links_out_dict = get_all_linksout(path)

            for subject in Subjects:
                ret_data.setdefault(subject, [])

                # path = f"/tmp{fa_path}/wdd/{year}/articleByCats/linksin_lv2_node_v5_newDB_new_noLiterature/arts_{subject}.txt"
                path = f"{fa_path}/wdd/{year}_{month}/articleByCats/{typePath}/arts_{subject}.txt"
                subject_ids_set = get_subject_page_ids(path)

                # subject id 映射到网络
                node_subject_id_map = {}
                subject_node_id_map = {}
                i = 0 
                for subject_id in subject_ids_set:
                    subject_node_id_map[subject_id] = i
                    node_subject_id_map[i] = subject_id
                    i += 1

                # 生成新的边映射
                nc  = 0
                edges = []
                for subject_id in subject_ids_set:
                    node_id = subject_node_id_map[subject_id]

                    if subject_id not in all_links_out_dict:
                        nc += 1
                        continue

                    for target_id in all_links_out_dict[subject_id]:
                        if target_id in subject_ids_set:
                            target_node_id = subject_node_id_map[target_id]
                            edges.append([node_id, target_node_id])
                # print("subject_ids_set", len(subject_ids_set),nc)
                # print(len(edges), edges[0:2])
                mw = miniworld.Graph(edges,isDirected=True)

                avg_path = mw.get_avg_path()
                cc = mw.get_clustering_coefficient()
                nv = mw.get_num_vertices()
                ret_data[subject].append([avg_path, cc, nv, len(subject_ids_set)])

                print(subject, avg_path, cc, nv,  len(subject_ids_set))
        
    with open(f"/tmp/data_{typePath}.json",'wt')as f:
        json.dump(ret_data, f)


def xueshu_small_world(typePath="linksin_lv2_node_v5_newDB_new_noLiterature"):
    """
    学术圈内的小世界计算
    """
    ret_data = {}
    for year in range(2007, 2022):
        for month in [3,6,9,12]:
            if year == 2021 and month >3:
                continue
            # year = 2020
            # path =f"/tmp{fa_path}/wdd/{year}/articleByCats/linksin_lv2_node_v5_newDB_new/all_linksout.txt"
            path = f"{fa_path}/wdd/{year}_{month}/articleByCats/linksin_lv2_node_v5_newDB_new_noLiterature/all_linksout.txt"
            all_links_out_dict = get_all_linksout(path)
            all_subject_ids_set = set()
            for subject in Subjects:
                path = f"{fa_path}/wdd/{year}_{month}/articleByCats/{typePath}/arts_{subject}.txt"
                subject_ids_set = get_subject_page_ids(path)
                all_subject_ids_set.update(subject_ids_set)
            
            # subject id 映射到网络
            node_subject_id_map = {}
            subject_node_id_map = {}
            for i,subject_id in enumerate(all_subject_ids_set):
                subject_node_id_map[subject_id] = i
                node_subject_id_map[i] = subject_id

            # 生成新的边映射
            
            edges = []
            nc = 0
            for subject_id in all_subject_ids_set:
                node_id = subject_node_id_map[subject_id]

                if subject_id not in all_links_out_dict:
                    nc += 1
                    continue

                for target_id in all_links_out_dict[subject_id]:
                    if target_id in all_subject_ids_set:
                        target_node_id = subject_node_id_map[target_id]
                        edges.append([node_id, target_node_id])

            mw = miniworld.Graph(edges,isDirected=True)
            ave_distance = mw.get_avg_path()
            ret_data.setdefault("xueshu", [])
            ret_data['xueshu'].append([ave_distance, mw.component_graph.num_vertices()])

            all_sp = mw.all_sp
            graph_size = mw.graph.num_vertices()
            
            print("xueshu",len(all_links_out_dict),len(all_subject_ids_set),year,month,len(edges),ave_distance)

            for subject in Subjects:
                
                # pdb.set_trace()
                ret_data.setdefault(subject, [])

            #     # path = f"/tmp{fa_path}/wdd/{year}/articleByCats/linksin_lv2_node_v5_newDB_new_noLiterature/arts_{subject}.txt"
                path = f"{fa_path}/wdd/{year}_{month}/articleByCats/{typePath}/arts_{subject}.txt"
                subject_ids_set = get_subject_page_ids(path)

                # 生成索引
                index_array = np.full(graph_size,False,np.bool_)
                for ids in subject_ids_set:
                    try:
                        index_array[subject_node_id_map[ids]] = True
                    except:
                        print("index out range")
                subject_size = len(subject_ids_set)
                ret = 0
                for i in range(graph_size):
                    if index_array[i]:
                        vvvv = all_sp[i].get_array()
                        if len(vvvv) > 0:
                            ret = np.sum([ret,np.sum(vvvv * index_array)])
                        else:
                            subject_size -= 1
                ave_distance = float(ret/(subject_size**2-subject_size))
                ret_data[subject].append([ave_distance, subject_size])
                print(subject,len(all_links_out_dict),subject_size,year,month,len(edges),ave_distance)
        
    with open(f"/tmp/data_xueshu_{typePath}.json",'wt')as f:
        json.dump(ret_data, f)


def xueshu_small_world_disruption(lv="linksin_lv2_node_v5_newDB_xueshu_new_noLiterature"):
    """
    学术圈内的小世界计算,平均最短距离分布
    unique, counts = np.unique(aa, return_counts=True)
    print(unique,counts)
    """
    ret_data = {}
    for year in range(2007, 2022):
        for month in [3,6,9,12]:
            if year == 2021 and month >3:
                continue
            # year = 2020
            full_subject_ids_set = set()
            for subject in Subjects:
                path = f"{fa_path}/wdd/{year}_{month}/articleByCats/linksin_lv{lv}_node_v5_newDB_xueshu_new_noLiterature/arts_{subject}.txt"
                subject_ids_set = get_subject_page_ids(path)
                full_subject_ids_set.update(subject_ids_set)
            
            path = f"{fa_path}/wdd/{year}_{month}/articleByCats/linksin_lv{lv}_node_v5_newDB_new_noLiterature/all_linksout.txt"
            all_links_out_dict = get_all_linksout_filter(path, full_subject_ids_set)


            # 过滤没有 linksout and linsin 学术圈的学科
            exists_links_set = set()
            for key, item in all_links_out_dict.items():
                exists_links_set.add(key)
                exists_links_set.update(item)

            all_subject_ids_set = set()
            for subject_id in full_subject_ids_set:
                if subject_id in exists_links_set:
                    all_subject_ids_set.add(subject_id)

            # subject id 映射到网络
            node_subject_id_map = {}
            subject_node_id_map = {}
            for i,subject_id in enumerate(all_subject_ids_set):
                subject_node_id_map[subject_id] = i
                node_subject_id_map[i] = subject_id

            # 生成新的边映射
            edges = []

            for subject_id in all_subject_ids_set:
                node_id = subject_node_id_map[subject_id]

                if subject_id not in all_links_out_dict:
                    continue

                for target_id in all_links_out_dict[subject_id]:
                    if target_id in all_subject_ids_set:
                        target_node_id = subject_node_id_map[target_id]
                        edges.append([node_id, target_node_id])

            mw = miniworld.Graph(edges,isDirected=True)
            ave_distance = mw.get_avg_path()
            vvvv = mw.all_sp.get_2d_array(np.arange(mw.graph.num_vertices()))
            unique, counts = np.unique(vvvv, return_counts=True)
            print(unique,counts)

            ret_data.setdefault("xueshu", [])
            ret_data['xueshu'].append([ave_distance, mw.component_graph.num_vertices(), len(full_subject_ids_set),list(unique),list(counts)])

            all_sp = mw.all_sp
            graph_size = mw.graph.num_vertices()
            
            print("xueshu",mw.component_graph.num_vertices(),len(full_subject_ids_set),year,month,len(edges),ave_distance)

            for subject in Subjects:
                
                # pdb.set_trace()
                ret_data.setdefault(subject, [])

            #     # path = f"/tmp{fa_path}/wdd/{year}/articleByCats/linksin_lv2_node_v5_newDB_new_noLiterature/arts_{subject}.txt"
                path = f"{fa_path}/wdd/{year}_{month}/articleByCats/linksin_lv{lv}_node_v5_newDB_xueshu_new_noLiterature/arts_{subject}.txt"
                # subject_ids_set = get_subject_page_ids(path)
                subject_ids_set,all_subject_count = get_subject_page_ids_filter(path,exists_links_set)

                # 生成索引
                index_array = np.full(graph_size,False,np.bool_)
                for ids in subject_ids_set:
                    try:
                        index_array[subject_node_id_map[ids]] = True
                    except:
                        print("index out range")
                subject_size = len(subject_ids_set)

                np_array_cache = []
                np_sum_cache = []
                for i in range(graph_size):
                    if index_array[i]:
                        vvvv = all_sp[i].get_array()
                        if len(vvvv) > 0:
                            np_array_cache.append(vvvv)
                            np_sum_cache.append(np.sum(vvvv * index_array))
                        else:
                            subject_size -= 1
                ave_distance = float(np.sum(np_sum_cache)/(subject_size**2-subject_size))

                unique, counts = np.unique(np_array_cache, return_counts=True)
                print(unique,counts)
                ret_data[subject].append([ave_distance, subject_size, all_subject_count,list(unique),list(counts)])
                print(subject,subject_size,all_subject_count,year,month,len(edges),ave_distance)
        
    with open(f"/tmp/data_xueshu_linksin_lv{lv}_node_v5_newDB_xueshu_new_noLiterature.json",'wt')as f:
        json.dump(ret_data, f)


def xueshu_small_world_disruption(lv="linksin_lv2_node_v5_newDB_xueshu_new_noLiterature"):
    """
    学术圈内的小世界计算,平均最短距离分布
    unique, counts = np.unique(aa, return_counts=True)
    print(unique,counts)
    """
    ret_data = {}
    for year in range(2007, 2022):
        for month in [3,6,9,12]:
            if year == 2021 and month >3:
                continue
            # year = 2020
            full_subject_ids_set = set()
            for subject in Subjects:
                path = f"{fa_path}/wdd/{year}_{month}/articleByCats/linksin_lv{lv}_node_v5_newDB_xueshu_new_noLiterature/arts_{subject}.txt"
                subject_ids_set = get_subject_page_ids(path)
                full_subject_ids_set.update(subject_ids_set)
            
            path = f"{fa_path}/wdd/{year}_{month}/articleByCats/linksin_lv{lv}_node_v5_newDB_new_noLiterature/all_linksout.txt"
            all_links_out_dict = get_all_linksout_filter(path, full_subject_ids_set)


            # 过滤没有 linksout and linsin 学术圈的学科
            exists_links_set = set()
            for key, item in all_links_out_dict.items():
                exists_links_set.add(key)
                exists_links_set.update(item)

            all_subject_ids_set = set()
            for subject_id in full_subject_ids_set:
                if subject_id in exists_links_set:
                    all_subject_ids_set.add(subject_id)

            # subject id 映射到网络
            node_subject_id_map = {}
            subject_node_id_map = {}
            for i,subject_id in enumerate(all_subject_ids_set):
                subject_node_id_map[subject_id] = i
                node_subject_id_map[i] = subject_id

            # 生成新的边映射
            edges = []

            for subject_id in all_subject_ids_set:
                node_id = subject_node_id_map[subject_id]

                if subject_id not in all_links_out_dict:
                    continue

                for target_id in all_links_out_dict[subject_id]:
                    if target_id in all_subject_ids_set:
                        target_node_id = subject_node_id_map[target_id]
                        edges.append([node_id, target_node_id])

            mw = miniworld.Graph(edges,isDirected=True)
            ave_distance = mw.get_avg_path()
            vvvv = mw.all_sp.get_2d_array(np.arange(mw.graph.num_vertices()))
            unique, counts = np.unique(vvvv, return_counts=True)
            print(unique,counts)

            ret_data.setdefault("xueshu", [])
            ret_data['xueshu'].append([ave_distance, mw.component_graph.num_vertices(), len(full_subject_ids_set),list(unique),list(counts)])

            all_sp = mw.all_sp
            graph_size = mw.graph.num_vertices()
            
            print("xueshu",mw.component_graph.num_vertices(),len(full_subject_ids_set),year,month,len(edges),ave_distance)

            for subject in Subjects:
                
                # pdb.set_trace()
                ret_data.setdefault(subject, [])

            #     # path = f"/tmp{fa_path}/wdd/{year}/articleByCats/linksin_lv2_node_v5_newDB_new_noLiterature/arts_{subject}.txt"
                path = f"{fa_path}/wdd/{year}_{month}/articleByCats/linksin_lv{lv}_node_v5_newDB_xueshu_new_noLiterature/arts_{subject}.txt"
                # subject_ids_set = get_subject_page_ids(path)
                subject_ids_set,all_subject_count = get_subject_page_ids_filter(path,exists_links_set)

                # 生成索引
                index_array = np.full(graph_size,False,np.bool_)
                for ids in subject_ids_set:
                    try:
                        index_array[subject_node_id_map[ids]] = True
                    except:
                        print("index out range")
                subject_size = len(subject_ids_set)

                np_array_cache = []
                np_sum_cache = []
                for i in range(graph_size):
                    if index_array[i]:
                        vvvv = all_sp[i].get_array()
                        if len(vvvv) > 0:
                            np_array_cache.append(vvvv)
                            np_sum_cache.append(np.sum(vvvv * index_array))
                        else:
                            subject_size -= 1
                ave_distance = float(np.sum(np_sum_cache)/(subject_size**2-subject_size))

                unique, counts = np.unique(np_array_cache, return_counts=True)
                print(unique,counts)
                ret_data[subject].append([ave_distance, subject_size, all_subject_count,list(unique),list(counts)])
                print(subject,subject_size,all_subject_count,year,month,len(edges),ave_distance)
        
    with open(f"/tmp/data_xueshu_linksin_lv{lv}_node_v5_newDB_xueshu_new_noLiterature.json",'wt')as f:
        json.dump(ret_data, f)



def xueshu_small_world_recon(lv="2",startYear=2004):
    """
    学术圈内的小世界计算,平均最短距离分布
    unique, counts = np.unique(aa, return_counts=True)
    print(unique,counts)
    """
    Collection = pymongo.MongoClient("192.168.1.222").graph[f"wikipeida_direct_xueshu_lv{lv}_v20211110"]
    # ret_data = {}
    for year in range(startYear, 2022):
        for month in [3,6,9,12]:
            
            if year == 2004 and month <= 6:
                continue

            if year == 2021 and month >3:
                continue
            doc = Collection.find_one({"_id":f"xueshu_{year}_{month}"})
            if doc:
                continue
            # year = 2020
            full_subject_ids_set = set()
            for subject in Subjects:
                path = f"{fa_path}/wdd/{year}_{month}/articleByCats/linksin_lv{lv}_node_v5_newDB_xueshu_new_noLiterature/arts_{subject}.txt"
                subject_ids_set = get_subject_page_ids(path)
                full_subject_ids_set.update(subject_ids_set)
            
            path = f"{fa_path}/wdd/{year}_{month}/articleByCats/linksin_lv{lv}_node_v5_newDB_new_noLiterature/all_linksout.txt"
            all_links_out_dict = get_all_linksout_filter(path, full_subject_ids_set)


            # 过滤没有 linksout and linsin 学术圈的学科
            exists_links_set = set()
            for key, item in all_links_out_dict.items():
                exists_links_set.add(key)
                exists_links_set.update(item)

            all_subject_ids_set = set()
            for subject_id in full_subject_ids_set:
                if subject_id in exists_links_set:
                    all_subject_ids_set.add(subject_id)

            # subject id 映射到网络
            node_subject_id_map = {}
            subject_node_id_map = {}
            for i,subject_id in enumerate(all_subject_ids_set):
                subject_node_id_map[subject_id] = i
                node_subject_id_map[i] = subject_id

            # 生成新的边映射
            edges = []

            for subject_id in all_subject_ids_set:
                node_id = subject_node_id_map[subject_id]

                if subject_id not in all_links_out_dict:
                    continue

                for target_id in all_links_out_dict[subject_id]:
                    if target_id in all_subject_ids_set:
                        target_node_id = subject_node_id_map[target_id]
                        edges.append([node_id, target_node_id])

            mw = miniworld.GraphEX(edges,isDirected=True)
            ave_distance, unique, counts = mw.get_avg_path_and_unique()
            print(unique,counts)

            graph_size = mw.graph.num_vertices()
            all_sp = mw.all_sp

            # ret_data.setdefault("xueshu", [])
            # ret_data['xueshu'].append([ave_distance, graph_size, len(full_subject_ids_set),mw.get_num_edges(),list(unique),list(counts)])

            Collection.insert_one({
                "_id": f"xueshu_{year}_{month}",
                "ave_distance":ave_distance,
                "graph_size": graph_size,
                "subject_size":len(full_subject_ids_set),
                "edge_size":mw.get_num_edges(),
                "unique":[int(item) for item in unique],
                "count":[int(item) for item in counts]
            })
            
            print("xueshu",graph_size,len(full_subject_ids_set),year,month,mw.get_num_edges(),ave_distance)

            for subject in Subjects:
                
                # pdb.set_trace()
                # ret_data.setdefault(subject, [])

            #     # path = f"/tmp{fa_path}/wdd/{year}/articleByCats/linksin_lv2_node_v5_newDB_new_noLiterature/arts_{subject}.txt"
                path = f"{fa_path}/wdd/{year}_{month}/articleByCats/linksin_lv{lv}_node_v5_newDB_xueshu_new_noLiterature/arts_{subject}.txt"
                # subject_ids_set = get_subject_page_ids(path)
                subject_ids_set,all_subject_count = get_subject_page_ids_filter(path,exists_links_set)

                # 生成索引
                index_array = np.full(graph_size,False,np.bool_)
                for ids in subject_ids_set:
                    try:
                        index_array[subject_node_id_map[ids]] = True
                    except:
                        print("index out range")
                subject_size = len(subject_ids_set)
                subject_size_end = subject_size
                # unqiue count
                np_array_cache = []
                # distance count sum
                np_sum_cache = []
                # path count
                path_size = []
                for i in range(graph_size):
                    if index_array[i]:
                        # ndarray
                        vvvv = all_sp[i].get_array()
                        # subject array
                        tmpv = vvvv[index_array]
                        np_array_cache.append(tmpv)
                        # 255 is can't reach
                        vv = tmpv[tmpv!=255]

                        path_size.append(np.count_nonzero(vv))

                        ss = np.sum(vv)
                        if ss == 0 :
                            subject_size_end -= 1
                        np_sum_cache.append(ss)

                unique, counts = np.unique(np_array_cache, return_counts=True)
                ave_distance = float(np.sum(np_sum_cache)/np.sum(path_size))

                print(unique,counts)
                # all_subject_count 学科所有的文章
                # subject_size 学术圈中联通的学科文章数
                # subject_size_end 排除学科内不联通的文章数
                # ret_data[subject].append([ave_distance, subject_size_end, subject_size,list(unique),list(counts)])
                Collection.insert_one({
                    "_id": f"{subject}_{year}_{month}",
                    "ave_distance":ave_distance,
                    "graph_size": subject_size_end,
                    "subject_size":subject_size,
                    # "edge_size":0,
                    "unique":[int(item) for item in unique],
                    "count":[int(item) for item in counts]
                })
                print(subject,subject_size_end,subject_size,year,month,len(edges),ave_distance)
        
    # with open(f"/tmp/data_xueshu_linksin_lv{lv}_node_v5_newDB_xueshu_new_noLiterature.json",'wt')as f:
    #     json.dump(ret_data, f)


def xueshu_small_world_connect(lv="2",startYear=2004):
    """
    学术圈内的小世界计算,平均最短距离分布
    unique, counts = np.unique(aa, return_counts=True)
    print(unique,counts)
    """
    Collection = pymongo.MongoClient("192.168.1.222").graph[f"wikipeida_direct_xueshu_lv{lv}_connect_v20211110"]
    # ret_data = {}
    for year in range(startYear, 2022):
        for month in [3,6,9,12]:
            
            if year == 2004 and month <= 6:
                continue

            if year == 2021 and month >3:
                continue
            doc = Collection.find_one({"_id":f"xueshu_{year}_{month}"})
            if doc:
                continue
            # year = 2020
            full_subject_ids_set = set()
            for subject in Subjects:
                path = f"{fa_path}/wdd/{year}_{month}/articleByCats/linksin_lv{lv}_node_v5_newDB_xueshu_new_noLiterature/arts_{subject}.txt"
                subject_ids_set = get_subject_page_ids(path)
                full_subject_ids_set.update(subject_ids_set)
            
            path = f"{fa_path}/wdd/{year}_{month}/articleByCats/linksin_lv{lv}_node_v5_newDB_new_noLiterature/all_linksout.txt"
            all_links_out_dict = get_all_linksout_filter(path, full_subject_ids_set)


            # 过滤没有 linksout and linsin 学术圈的学科
            exists_links_set = set()
            for key, item in all_links_out_dict.items():
                exists_links_set.add(key)
                exists_links_set.update(item)

            all_subject_ids_set = set()
            for subject_id in full_subject_ids_set:
                if subject_id in exists_links_set:
                    all_subject_ids_set.add(subject_id)

            # subject id 映射到网络
            node_subject_id_map = {}
            subject_node_id_map = {}
            for i,subject_id in enumerate(all_subject_ids_set):
                subject_node_id_map[subject_id] = i
                node_subject_id_map[i] = subject_id

            # 生成新的边映射
            edges = []

            for subject_id in all_subject_ids_set:
                node_id = subject_node_id_map[subject_id]

                if subject_id not in all_links_out_dict:
                    continue

                for target_id in all_links_out_dict[subject_id]:
                    if target_id in all_subject_ids_set:
                        target_node_id = subject_node_id_map[target_id]
                        edges.append([node_id, target_node_id])

            mw = miniworld.Graph(edges,isDirected=True)
            ave_distance, unique, counts = mw.get_avg_path_and_unique()
            print(unique,counts)

            graph_size = mw.graph.num_vertices()
            all_sp = mw.all_sp

            # ret_data.setdefault("xueshu", [])
            # ret_data['xueshu'].append([ave_distance, graph_size, len(full_subject_ids_set),mw.get_num_edges(),list(unique),list(counts)])

            Collection.insert_one({
                "_id": f"xueshu_{year}_{month}",
                "ave_distance":ave_distance,
                "graph_size": graph_size,
                "subject_size":len(full_subject_ids_set),
                "edge_size":mw.get_num_edges(),
                "unique":[int(item) for item in unique],
                "count":[int(item) for item in counts]
            })
            
            print("xueshu",graph_size,len(full_subject_ids_set),year,month,mw.get_num_edges(),ave_distance)

            for subject in Subjects:
                
                # pdb.set_trace()
                # ret_data.setdefault(subject, [])

            #     # path = f"/tmp{fa_path}/wdd/{year}/articleByCats/linksin_lv2_node_v5_newDB_new_noLiterature/arts_{subject}.txt"
                path = f"{fa_path}/wdd/{year}_{month}/articleByCats/linksin_lv{lv}_node_v5_newDB_xueshu_new_noLiterature/arts_{subject}.txt"
                # subject_ids_set = get_subject_page_ids(path)
                subject_ids_set,all_subject_count = get_subject_page_ids_filter(path,exists_links_set)

                # 生成索引
                index_array = np.full(graph_size,False,np.bool_)
                for ids in subject_ids_set:
                    try:
                        index_array[subject_node_id_map[ids]] = True
                    except:
                        print("index out range")
                subject_size = len(subject_ids_set)
                if not subject_size:
                    continue
                subject_size_end = subject_size
                # unqiue count
                np_array_cache = []
                # distance count sum
                np_sum_cache = []
                # path count
                path_size = []
                for i in range(graph_size):
                    if index_array[i]:
                        # ndarray
                        vvvv = all_sp[i].get_array()
                        # subject array
                        tmpv = vvvv[index_array]
                        np_array_cache.append(tmpv)
                        # 255 is can't reach
                        vv = tmpv[tmpv!=255]

                        path_size.append(np.count_nonzero(vv))

                        ss = np.sum(vv)
                        if ss == 0 :
                            subject_size_end -= 1
                        np_sum_cache.append(ss)

                unique, counts = np.unique(np_array_cache, return_counts=True)
                ave_distance = float(np.sum(np_sum_cache)/np.sum(path_size))

                print(unique,counts)
                # all_subject_count 学科所有的文章
                # subject_size 学术圈中联通的学科文章数
                # subject_size_end 排除学科内不联通的文章数
                # ret_data[subject].append([ave_distance, subject_size_end, subject_size,list(unique),list(counts)])
                Collection.insert_one({
                    "_id": f"{subject}_{year}_{month}",
                    "ave_distance":ave_distance,
                    "graph_size": subject_size_end,
                    "subject_size":subject_size,
                    # "edge_size":0,
                    "unique":[int(item) for item in unique],
                    "count":[int(item) for item in counts]
                })
                print(subject,subject_size_end,subject_size,year,month,len(edges),ave_distance)
        
    # with open(f"/tmp/data_xueshu_linksin_lv{lv}_node_v5_newDB_xueshu_new_noLiterature.json",'wt')as f:
    #     json.dump(ret_data, f)


def subject_small_world_recon(lv="2",startYear=2004):
    """
    学术圈内的小世界计算,平均最短距离分布
    unique, counts = np.unique(aa, return_counts=True)
    print(unique,counts)
    """
    Collection = pymongo.MongoClient("192.168.1.222").graph[f"wikipeida_direct_subject_lv{lv}"]
    # ret_data = {}
    for year in range(startYear, 2022):
        for month in [3,6,9,12]:
            if year == 2021 and month >3:
                continue
            if year == 2004 and month <= 6:
                continue
            
            
            path = f"{fa_path}/wdd/{year}_{month}/articleByCats/linksin_lv{lv}_node_v5_newDB_new_noLiterature/all_linksout.txt"
            all_links_out_dict = get_all_linksout(path)

            for subject in Subjects:

                doc = Collection.find_one({"_id":f"{subject}_{year}_{month}"})
                if doc:
                    continue

                
                path = f"{fa_path}/wdd/{year}_{month}/articleByCats/linksin_lv{lv}_node_v5_newDB_xueshu_new_noLiterature/arts_{subject}.txt"
                # subject_ids_set = get_subject_page_ids(path)
                full_subject_ids_set = get_subject_page_ids(path)

                # 去掉离散的点
                subject_ids_set = set()
                for subject_id in full_subject_ids_set:
                    if subject_id in all_links_out_dict:
                        for target_id in all_links_out_dict[subject_id]:
                            if target_id in full_subject_ids_set:
                                subject_ids_set.update([subject_id,target_id])

                # subject id 映射到网络
                node_subject_id_map = {}
                subject_node_id_map = {}
                for i,subject_id in enumerate(subject_ids_set):
                    subject_node_id_map[subject_id] = i
                    node_subject_id_map[i] = subject_id

                # 生成新的边映射
                edges = []

                for subject_id in subject_ids_set:
                    node_id = subject_node_id_map[subject_id]

                    if subject_id not in all_links_out_dict:
                        continue

                    for target_id in all_links_out_dict[subject_id]:
                        if target_id in subject_ids_set:
                            target_node_id = subject_node_id_map[target_id]
                            edges.append([node_id, target_node_id])

                mw = miniworld.GraphEX(edges,isDirected=True)
                ave_distance, unique, counts = mw.get_avg_path_and_unique()
                print(unique,counts)

                graph_size = mw.get_num_vertices()

                Collection.insert_one({
                    "_id": f"{subject}_{year}_{month}",
                    "ave_distance":ave_distance,
                    "graph_size": graph_size,
                    "subject_size":len(full_subject_ids_set),
                    "edge_size":mw.get_num_edges(),
                    "unique":[int(item) for item in unique],
                    "count":[int(item) for item in counts]
                })
                
                print(subject,graph_size,len(full_subject_ids_set),year,month,mw.get_num_edges(),ave_distance)


def subject_small_world_connect(lv="2",startYear=2004):
    """
    学术圈内的小世界计算,平均最短距离分布,强连通
    unique, counts = np.unique(aa, return_counts=True)
    print(unique,counts)
    """
    Collection = pymongo.MongoClient("192.168.1.222").graph[f"wikipeida_direct_subject_lv{lv}_connect"]
    # ret_data = {}
    for year in range(startYear, 2022):
        for month in [3,6,9,12]:
            if year == 2021 and month >3:
                continue
            if year == 2004 and month <= 6:
                continue
            
            
            path = f"{fa_path}/wdd/{year}_{month}/articleByCats/linksin_lv{lv}_node_v5_newDB_new_noLiterature/all_linksout.txt"
            all_links_out_dict = get_all_linksout(path)

            for subject in Subjects:

                doc = Collection.find_one({"_id":f"{subject}_{year}_{month}"})
                if doc:
                    continue

                
                path = f"{fa_path}/wdd/{year}_{month}/articleByCats/linksin_lv{lv}_node_v5_newDB_xueshu_new_noLiterature/arts_{subject}.txt"
                # subject_ids_set = get_subject_page_ids(path)
                full_subject_ids_set = get_subject_page_ids(path)

                # 去掉离散的点
                subject_ids_set = set()
                for subject_id in full_subject_ids_set:
                    if subject_id in all_links_out_dict:
                        for target_id in all_links_out_dict[subject_id]:
                            if target_id in full_subject_ids_set:
                                subject_ids_set.update([subject_id,target_id])

                # subject id 映射到网络
                node_subject_id_map = {}
                subject_node_id_map = {}
                for i,subject_id in enumerate(subject_ids_set):
                    subject_node_id_map[subject_id] = i
                    node_subject_id_map[i] = subject_id

                # 生成新的边映射
                edges = []

                for subject_id in subject_ids_set:
                    node_id = subject_node_id_map[subject_id]

                    if subject_id not in all_links_out_dict:
                        continue

                    for target_id in all_links_out_dict[subject_id]:
                        if target_id in subject_ids_set:
                            target_node_id = subject_node_id_map[target_id]
                            edges.append([node_id, target_node_id])
                            
                if not edges:
                    continue
                mw = miniworld.Graph(edges,isDirected=True)
                ave_distance, unique, counts = mw.get_avg_path_and_unique()
                print(unique,counts)

                graph_size = mw.get_num_vertices()

                Collection.insert_one({
                    "_id": f"{subject}_{year}_{month}",
                    "ave_distance":ave_distance,
                    "graph_size": graph_size,
                    "subject_size":len(full_subject_ids_set),
                    "edge_size":mw.get_num_edges(),
                    "unique":[int(item) for item in unique],
                    "count":[int(item) for item in counts]
                })
                
                print(subject,graph_size,len(full_subject_ids_set),year,month,mw.get_num_edges(),ave_distance)

