'''
@Version: 0.0.1
@Author: ider
@Date: 2019-12-27 11:26:03
@LastEditors  : ider
@LastEditTime : 2020-02-05 00:35:08
@Description: 读取文件中的 edges， pageid，category
'''
import gzip
import csv
import json
import os
import math
from collections import OrderedDict
import logging
import sys
from config import Edges_Path, Page_Subject_Path, Subject_Rank_Path

csv.field_size_limit(sys.maxsize)

class WikiLinkYear:

    def __init__(self, year):
        '''
        @description: 接受年份，迭代返回 cagegory:page_ids
        @param year： 选择的年份
        '''
        self.year = year
    
    def get_article_ids(self,level,method='google'):
        if level == 2 and method == 'google':
            return self._get_article_ids_google_level2()
        elif level == 3 and method == 'google':
            return self._get_article_ids_google_level3()
        elif level == 2 and method == 'category':
            return self._get_article_ids_category_level2()
        elif level == 3 and method == 'category':
            return self._get_article_ids_category_level3()
        raise Exception(f'unexcept argument:,{level},{method}')

    def _get_article_ids_google_level2(self):
        '''
        @description: 获取当年的 article id dict,按照 google距离由小到大排序
        @return: categorys_ids_dict,categorys_ids_set_dict
        '''
        categorys_ids_dict = {}
        categorys_ids_set_dict = {}
        current_year_subject_path = os.path.join(
            Page_Subject_Path, str(self.year))
        article_ids_path = os.path.join(
            current_year_subject_path, 'article_ids')
        article_distance1_path = os.path.join(
            current_year_subject_path, 'article_distance1')
        for file_name in os.listdir(article_ids_path):
            tmp_page_name_score_dict = {}
            tmp_page_name_id_dict = {}
            category_name = file_name.replace('.txt.gz', '')
            with gzip.open(os.path.join(article_ids_path, file_name), 'rt')as f:
                reader = csv.reader(f, delimiter='\t')
                for row in reader:
                    try:
                        tmp_page_name_id_dict[row[0]] = int(row[1])
                    except Exception:
                        logging.warning(
                            f'row 无法解析 {os.path.join(article_ids_path,file_name)}:{row}')

            with gzip.open(os.path.join(article_distance1_path, file_name), 'rt', encoding='utf-8', errors='ignore')as f:
                reader = csv.reader(f, delimiter='\t')
                for row in reader:
                    try:
                        tmp_page_name_score_dict[row[0]] = float(row[1])
                    except Exception:
                        logging.warning(
                            f'row 无法解析 {os.path.join(article_distance1_path,file_name)}:{row}')
            id_list = []
            for page_name, page_id in tmp_page_name_id_dict.items():
                id_list.append(
                    [page_id, tmp_page_name_score_dict.get(page_name, 999)])

            id_list.sort(key=lambda x: x[1])
            categorys_ids_dict[category_name] = list(
                map(lambda x: x[0], id_list))
            categorys_ids_set_dict[category_name] = set(
                categorys_ids_dict[category_name])
        return categorys_ids_dict, categorys_ids_set_dict


    def _get_article_ids_google_level3(self):
        '''
        @description: 获取当年的 article id dict,按照 google距离由小到大排序
        @return: categorys_ids_dict,categorys_ids_set_dict
        '''
        categorys_ids_dict = {}
        categorys_ids_set_dict = {}
        current_year_subject_path = os.path.join(
            Page_Subject_Path, str(self.year))
        article_ids_path = os.path.join(
            current_year_subject_path, 'article_ids_lv3')
        article_distance1_path = os.path.join(
            current_year_subject_path, 'article_distance_lv3')
        for file_name in os.listdir(article_ids_path):
            tmp_page_name_score_dict = {}
            tmp_page_name_id_dict = {}
            category_name = file_name.replace('.txt.gz', '')
            with gzip.open(os.path.join(article_ids_path, file_name), 'rt')as f:
                reader = csv.reader(f, delimiter='\t')
                for row in reader:
                    try:
                        tmp_page_name_id_dict[row[0]] = int(row[1])
                    except Exception:
                        logging.warning(
                            f'row 无法解析 {os.path.join(article_ids_path,file_name)}:{row}')

            with gzip.open(os.path.join(article_distance1_path, file_name), 'rt', encoding='utf-8', errors='ignore')as f:
                reader = csv.reader(f, delimiter='\t')
                for row in reader:
                    try:
                        tmp_page_name_score_dict[row[0]] = float(row[1])
                    except Exception:
                        logging.warning(
                            f'row 无法解析 {os.path.join(article_distance1_path,file_name)}:{row}')
            id_list = []
            for page_name, page_id in tmp_page_name_id_dict.items():
                id_list.append(
                    [page_id, tmp_page_name_score_dict.get(page_name, 999)])

            id_list.sort(key=lambda x: x[1])
            categorys_ids_dict[category_name] = list(
                map(lambda x: x[0], id_list))
            categorys_ids_set_dict[category_name] = set(
                categorys_ids_dict[category_name])
        return categorys_ids_dict, categorys_ids_set_dict

    def _get_article_ids_category_level2(self):
        '''
        @description: 获取当年的 article id dict,按照 先 category,同 category的依据 google 距离
        @return: categorys_ids_dict,categorys_ids_set_dict
        '''
        categorys_ids_dict = {}
        categorys_ids_set_dict = {}
        current_year_subject_path = os.path.join(
            Page_Subject_Path, str(self.year))
        article_ids_path = os.path.join(
            current_year_subject_path, 'article_ids')
        article_distance1_path = os.path.join(
            current_year_subject_path, 'article_distance1')

        # category 距离
        wiki_100d_path = os.path.join(
            Subject_Rank_Path, str(self.year))          
            
        child_articles_path = os.path.join(
            wiki_100d_path, 'child_articles.txt.gz')

        subject_pageids_dict = {}
        with gzip.open(child_articles_path,'rt')as f:
            reader = csv.reader(f, delimiter='\t')
            for row in reader:
                page_list = json.loads(row[1])
                if page_list:
                    subject_pageids_dict[row[0]] = set(page_list)

        for file_name in os.listdir(wiki_100d_path):
            if file_name == 'child_articles.txt.gz':
                continue
            tmp_page_name_score_dict = {}
            tmp_page_name_id_dict = {}
            tmp_subject_score_list = []
            category_name = file_name.replace('.txt.gz', '').replace('s1_', '')

            # wiki_100d 中的 category rank 文件，分数越小越靠前
            with gzip.open(os.path.join(wiki_100d_path, file_name), 'rt', encoding='utf-8', errors='ignore')as f:
                reader = csv.reader(f, delimiter='\t')
                for row in reader:
                    try:
                        tmp_subject_score_list.append([row[0],float(row[1])])
                    except Exception:
                        logging.warning(
                            f'row 无法解析 {os.path.join(article_distance1_path,file_name)}:{row}')
            tmp_subject_score_list.sort(key=lambda x: x[1])

            with gzip.open(os.path.join(article_ids_path, f'{category_name}.txt.gz'), 'rt')as f:
                reader = csv.reader(f, delimiter='\t')
                for row in reader:
                    try:
                        tmp_page_name_id_dict[row[0]] = int(row[1])
                    except Exception:
                        logging.warning(
                            f'row 无法解析 {os.path.join(article_ids_path,category_name + ".gz")}:{row}')

            with gzip.open(os.path.join(article_distance1_path, f'{category_name}.txt.gz'), 'rt', encoding='utf-8', errors='ignore')as f:
                reader = csv.reader(f, delimiter='\t')
                for row in reader:
                    try:
                        tmp_page_name_score_dict[row[0]] = float(row[1])
                    except Exception:
                        logging.warning(
                            f'row 无法解析 {os.path.join(article_distance1_path,category_name + ".gz")}:{row}')
            id_list = []
            for page_name, page_id in tmp_page_name_id_dict.items():
                id_list.append(
                    [page_id, tmp_page_name_score_dict.get(page_name, 999)])
            
            # 按 google score 排序的结果
            id_list.sort(key=lambda x: x[1])

            # 将 list 转化为 dict
            id_list_rank_dict = {}
            for i,(k,_) in enumerate(id_list):
                id_list_rank_dict[k] = i

            # 合并相同分数的 subject
            subject_rank_dict = OrderedDict()
            for subject_name,score in tmp_subject_score_list:
                if math.isnan(score):
                    subject_rank_dict.setdefault('nan',set())
                    subject_rank_dict['nan'].add(subject_name)
                else:
                    subject_rank_dict.setdefault(score,set())
                    subject_rank_dict[score].add(subject_name)

            # 将 subject 转换到 pageid
            page_id_rank_summary = OrderedDict()
            for score, subject_name_set in subject_rank_dict.items():
                #同一分数下的所有 page_id
                page_ids_set = set()
                for subject_name in subject_name_set:
                    page_ids_set.update(subject_pageids_dict.get(subject_name,[]))
                page_ids_list = list(page_ids_set)
                page_ids_list.sort(key=lambda x: id_list_rank_dict.get(x,999))
                for page_id in page_ids_list:
                    page_id_rank_summary.setdefault(page_id,None)

            # 保存结果
            categorys_ids_dict[category_name] = list(page_id_rank_summary.keys())
            categorys_ids_set_dict[category_name] = set(page_id_rank_summary.keys())
        return categorys_ids_dict, categorys_ids_set_dict


    def _get_article_ids_category_level3(self):
        '''
        @description: 获取当年的 article id dict,按照 先 category,同 category的依据 google 距离
        @return: categorys_ids_dict,categorys_ids_set_dict
        '''
        categorys_ids_dict = {}
        categorys_ids_set_dict = {}
        current_year_subject_path = os.path.join(
            Page_Subject_Path, str(self.year))
        article_ids_path = os.path.join(
            current_year_subject_path, 'article_ids_lv3')
        article_distance1_path = os.path.join(
            current_year_subject_path, 'article_distance_lv3')

        # category 距离
        wiki_100d_path = os.path.join(
            Subject_Rank_Path, str(self.year))          
            
        child_articles_path = os.path.join(
            wiki_100d_path, 'child_articles.txt.gz')

        subject_pageids_dict = {}
        with gzip.open(child_articles_path,'rt')as f:
            reader = csv.reader(f, delimiter='\t')
            for row in reader:
                page_list = json.loads(row[1])
                if page_list:
                    subject_pageids_dict[row[0]] = set(page_list)

        for file_name in os.listdir(wiki_100d_path):
            if file_name == 'child_articles.txt.gz':
                continue
            tmp_page_name_score_dict = {}
            tmp_page_name_id_dict = {}
            tmp_subject_score_list = []
            category_name = file_name.replace('.txt.gz', '').replace('s1_', '')

            # wiki_100d 中的 category rank 文件，分数越小越靠前
            with gzip.open(os.path.join(wiki_100d_path, file_name), 'rt', encoding='utf-8', errors='ignore')as f:
                reader = csv.reader(f, delimiter='\t')
                for row in reader:
                    try:
                        tmp_subject_score_list.append([row[0],float(row[1])])
                    except Exception:
                        logging.warning(
                            f'row 无法解析 {os.path.join(article_distance1_path,file_name)}:{row}')
            tmp_subject_score_list.sort(key=lambda x: x[1])

            with gzip.open(os.path.join(article_ids_path, f'{category_name}.txt.gz'), 'rt')as f:
                reader = csv.reader(f, delimiter='\t')
                for row in reader:
                    try:
                        tmp_page_name_id_dict[row[0]] = int(row[1])
                    except Exception:
                        logging.warning(
                            f'row 无法解析 {os.path.join(article_ids_path,category_name + ".gz")}:{row}')

            with gzip.open(os.path.join(article_distance1_path, f'{category_name}.txt.gz'), 'rt', encoding='utf-8', errors='ignore')as f:
                reader = csv.reader(f, delimiter='\t')
                for row in reader:
                    try:
                        tmp_page_name_score_dict[row[0]] = float(row[1])
                    except Exception:
                        logging.warning(
                            f'row 无法解析 {os.path.join(article_distance1_path,category_name + ".gz")}:{row}')
            id_list = []
            for page_name, page_id in tmp_page_name_id_dict.items():
                id_list.append(
                    [page_id, tmp_page_name_score_dict.get(page_name, 999)])
            
            # 按 google score 排序的结果
            id_list.sort(key=lambda x: x[1])

            # 将 list 转化为 dict
            id_list_rank_dict = {}
            for i,(k,_) in enumerate(id_list):
                id_list_rank_dict[k] = i

            # 合并相同分数的 subject
            subject_rank_dict = OrderedDict()
            for subject_name,score in tmp_subject_score_list:
                if math.isnan(score):
                    subject_rank_dict.setdefault('nan',set())
                    subject_rank_dict['nan'].add(subject_name)
                else:
                    subject_rank_dict.setdefault(score,set())
                    subject_rank_dict[score].add(subject_name)

            # 将 subject 转换到 pageid
            page_id_rank_summary = OrderedDict()
            for score, subject_name_set in subject_rank_dict.items():
                #同一分数下的所有 page_id
                page_ids_set = set()
                for subject_name in subject_name_set:
                    page_ids_set.update(subject_pageids_dict.get(subject_name,[]))
                page_ids_list = list(page_ids_set)
                page_ids_list.sort(key=lambda x: id_list_rank_dict.get(x,999))
                for page_id in page_ids_list:
                    page_id_rank_summary.setdefault(page_id,None)

            # 保存结果
            categorys_ids_dict[category_name] = list(page_id_rank_summary.keys())
            categorys_ids_set_dict[category_name] = set(page_id_rank_summary.keys())
        return categorys_ids_dict, categorys_ids_set_dict

    def iterate_edges(self):
        '''
        @description: 迭代出该类下所有的边
        @return: x,y
        '''
        current_edge_path = os.path.join(Edges_Path, str(self.year))
        for file_name in os.listdir(current_edge_path):
            current_edge_file_path = os.path.join(current_edge_path, file_name)
            with gzip.open(current_edge_file_path, 'rt')as f:
                reader = csv.reader(f, delimiter='\t')
                for row in reader:
                    if len(row) <=1:
                        continue
                    key = int(row[0])
                    link_list = json.loads(row[1])
                    for value in link_list:
                        yield key, value

    def mapping_graph(self,categorys_ids_dict,categorys_ids_set_dict):
        '''
        @description: 完成对图中点的映射 
        @param  self.categorys_ids_dict, self.categorys_ids_set_dict = self.get_article_ids()
        @return: 
        '''
        self.page_id_count = 0
        self.page_id_node_id_dict = {}
        self.node_id_in_subject_set = set()

        # 优先处理 subject
        self.categorys_ids_dict = categorys_ids_dict
        self.categorys_ids_set_dict = categorys_ids_set_dict

        for _, id_set in self.categorys_ids_set_dict.items():
            for _id in id_set:
                if _id not in self.page_id_node_id_dict:
                    self.page_id_node_id_dict[_id] = self.page_id_count
                    self.node_id_in_subject_set.add(self.page_id_count)
                    self.page_id_count += 1
                
        # 处理其他节点
        for k,v in self.iterate_edges():
            if k not in self.page_id_node_id_dict:
                self.page_id_node_id_dict[k] = self.page_id_count
                self.page_id_count += 1
            if v not in self.page_id_node_id_dict:
                self.page_id_node_id_dict[v] = self.page_id_count
                self.page_id_count += 1
        self.node_id_page_id_dict = dict(zip(self.page_id_node_id_dict.values(), self.page_id_node_id_dict.keys()))
        