'''
@Version: 0.0.2
@Author: ider
@Date: 2019-12-27 11:29:07
@LastEditors  : ider
@LastEditTime : 2019-12-28 23:55:01
@Description: 随机计算各年的子类的无向图的小世界，取节点数 50 间隔进行计算,顺序为 google 距离
    0.0.2: 更换点的逻辑，一旦有跳跃点过50，取跳跃后的点，不再取前一个点，减小运算量
'''


import os
import csv
import json
import pymongo
import graph_tool.all as gt
import numpy as np

import logging
import logging.config
from config import MONGO_URL
from DataGet import WikiLinkYear

Table = pymongo.MongoClient(MONGO_URL).small_world.rand_category_undirect_limit
Table_Schedult = pymongo.MongoClient(MONGO_URL).small_world.schedule_rand_category_undirect_limit

years = [i for i in range(2007,2021)]
del(years[years.index(2012)])
del(years[years.index(2010)])
for year in years:
    logging.info(year)
    wly = WikiLinkYear(year)
    # 每年对应的 大类 ids
    categorys_ids_dict,categorys_ids_set = wly.get_article_ids()

    remove_kes = set()
    for key in categorys_ids_dict.keys():
        if Table_Schedult.find_one({'n':key,'y':year}):
            remove_kes.add(key)
    for key in remove_kes:
        del(categorys_ids_dict[key])
        del(categorys_ids_set[key])  

    if not categorys_ids_dict:
        continue

    # 从文件中加载所有的 edge
    category_edgs_dict = {}
    for key,value in wly.iterate_edges():
        for cat,cat_id_set in categorys_ids_set.items():
            if key in cat_id_set and value in cat_id_set:
                category_edgs_dict.setdefault(cat,[])
                category_edgs_dict[cat].append([key,value])
    

    for cate_name ,edges in category_edgs_dict.items():
        page_id_set = categorys_ids_set[cate_name]
        page_id_list = categorys_ids_dict[cate_name].copy()

        # 打乱 page_id_list
        np.random.shuffle(page_id_list)
        # page_id 映射到 node
        page_id_to_node_id_dict = {}
        for i,page_id in enumerate(page_id_list):
            page_id_to_node_id_dict[page_id] = i
        
        # 反映射
        node_id_page_id_to_dict=dict(zip(page_id_to_node_id_dict.values(),page_id_to_node_id_dict.keys()))
        
        # 映射后的边
        map_edges = []
        for edge in edges:
            if edge[0] in page_id_set and edge[1] in page_id_set:
                map_edges.append([page_id_to_node_id_dict[edge[0]],page_id_to_node_id_dict[edge[1]]])

        # 加载数据图
        graph = gt.Graph(directed=False)
        graph.add_vertex(len(page_id_list))
        graph.add_edge_list(map_edges)
        
        # 减少图中的点，并且计算连通图
        last_flag = 1
        last_vertices_div = 0 # 当前点/50的位置
        last_num_vertices = None
        while 1:
            glc = gt.extract_largest_component(graph)
            num_vertices = glc.num_vertices()
            num_edges = glc.num_edges()
            
            # 首次运行，计算一次
            if last_flag :
                last_flag = 0

                if num_vertices % 50 == 0:
                    last_vertices_div = num_vertices // 50 - 1
                else:
                    last_vertices_div = num_vertices // 50
                
                all_sp = gt.shortest_distance(glc,directed=False)
                avg_path = float(np.sum([np.sum(i.get_array()) for i in all_sp])/(glc.num_vertices()**2-glc.num_vertices()))
                clust = gt.local_clustering(glc)
                vertex_avgs = gt.vertex_average(glc, clust)
                clustering_coefficient = float(vertex_avgs[0])
                Table.update_one({'n':cate_name,'y':year,'nv':num_vertices},
                                 {'$set':{'ad':avg_path, 'cc':clustering_coefficient,'ne':num_edges,}},upsert=True)
                logging.info(f'{year},{cate_name},{num_vertices}/{graph.num_vertices()},{avg_path},{clustering_coefficient}')
            
            # 满足条件,且于上次不同，计算一次
            elif num_vertices%50 == 0 and last_vertices_div != num_vertices // 50 -1:
                last_vertices_div = num_vertices // 50 -1
                
                all_sp = gt.shortest_distance(glc,directed=False)
                avg_path = float(np.sum([np.sum(i.get_array()) for i in all_sp])/(glc.num_vertices()**2-glc.num_vertices()))
                clust = gt.local_clustering(glc)
                vertex_avgs = gt.vertex_average(glc, clust)
                clustering_coefficient = float(vertex_avgs[0])
                Table.update_one({'n':cate_name,'y':year,'nv':num_vertices},
                                 {'$set':{'ad':avg_path, 'cc':clustering_coefficient,'ne':num_edges,}},upsert=True)
                logging.info(f'{year},{cate_name},{num_vertices}/{graph.num_vertices()},{avg_path},{clustering_coefficient}')
            
            # 跳过 50 节点，则后一次的图
            elif num_vertices // 50 < last_vertices_div:
                last_vertices_div = num_vertices // 50 
                
                all_sp = gt.shortest_distance(glc,directed=False)
                avg_path = float(np.sum([np.sum(i.get_array()) for i in all_sp])/(glc.num_vertices()**2-glc.num_vertices()))
                clust = gt.local_clustering(glc)
                vertex_avgs = gt.vertex_average(glc, clust)
                clustering_coefficient = float(vertex_avgs[0])
                Table.update_one({'n':cate_name,'y':year,'nv':num_vertices},
                                 {'$set':{'ad':avg_path, 'cc':clustering_coefficient,'ne':num_edges}},upsert=True)
                logging.info(f'{year},{cate_name},{num_vertices}/{graph.num_vertices()},{avg_path},{clustering_coefficient}')
    
            # 小于 50 退出
            if num_vertices <= 50:
                break

            # graph 抛弃最远的节点 
            graph.remove_vertex(page_id_to_node_id_dict[page_id_list.pop()])
            
        Table_Schedult.insert_one({'n':cate_name,'y':year})
        
            