import copy
import json
import time

from parsel import Selector

from apps.crawler_platform.core_platform.core_sql import CoreSqlValue
from apps.crawler_platform.core_platform.g_model import DealModel, CallBackModel, DealInsertModel, EtlDealModel, \
    NssdListModel

__all__ = [
    "nssd_list_callback",
    "nssd_list_etl_callback",
]


def nssd_list_callback(callmodel: CallBackModel[NssdListModel]) -> DealModel:
    result = DealModel()
    para_dicts = callmodel.para_dicts
    task_info = callmodel.redis_all.parse_dict["1_1"].task_info
    info_dicts = {"task_name": callmodel.sql_model.task_name,
                  "task_tag": callmodel.sql_model.task_tag,
                  "task_tag_next": task_info.task_tag_next}
    if "1_1" in para_dicts["data"]:
        html = callmodel.para_dicts['data']['1_1']['html']
        # print(html)
        res = Selector(text=html)
        sql_dict = callmodel.sql_model.dict()
        sql_dict.pop("id")
        sql_dict.pop("update_time")
        sql_dict.pop("create_time")
        sql_dict.pop("null_dicts")
        sql_dict.pop("err_msg")
        sql_dict.pop("other_dicts")
        sql_dict.pop("state")
        sql_dict.pop("failcount")
        di_model_bef = DealInsertModel()
        di_model_bef.insert_pre = CoreSqlValue.insert_ig_it
        if sql_dict['page_index'].endswith('01'):
            new_dict = copy.deepcopy(sql_dict)
            year_list = res.xpath('//div[@id="qkyearslist"]/ul/li/a/@title').extract()
            for year in year_list:
                try:
                    int(year)
                except:
                    continue
                if int(year) > int(sql_dict['page_index'][:4]):
                    new_dict["page_index"] = f'{year}01'
                    di_model_bef.lists.append(new_dict)
        href_list = res.xpath('//div[@id="numlist"]/ul/li/a/@href').extract()
        for href in href_list:
            new_dict = copy.deepcopy(sql_dict)
            page_index = href.split('/')[-2]
            list_rawid = href.split('/')[-3]
            new_dict["page_index"] = page_index
            new_dict["list_rawid"] = list_rawid
            di_model_bef.lists.append(new_dict)

        result.befor_dicts.insert.append(di_model_bef)

    return result


def nssd_list_etl_callback(callmodel) -> EtlDealModel:
    result = EtlDealModel()
    save_data = list()

    html = callmodel.para_dicts['data']['1_1']['html']
    list_json = json.loads(callmodel.sql_model.list_json)
    res = Selector(text=html)
    sql_dict = callmodel.sql_model.dict()
    tr_list = res.xpath('//table[@class="t_list"]//tr')
    for tr in tr_list[1:]:
        if '暂无文献' in tr.extract():
            continue
        data = dict()
        id_info = tr.xpath('td[1]/a/@href').extract_first()
        _id = id_info.split('id=')[-1]
        down_date = time.strftime('%Y%m%d', time.localtime())
        data['_id'] = _id
        data['url'] = f'http://www.nssd.cn/articles/Article_Read.aspx?id={_id}'
        data['gch5'] = list_json['gch5']
        data['gch'] = list_json['gch']
        data['title'] = tr.xpath('td[1]/a/@title').extract_first()
        data['author'] = tr.xpath('td[2]/text()').extract_first()
        data['year'] = str(sql_dict['page_index'])[:4]
        data['issue'] = str(sql_dict['page_index'])[4:]
        data['journal_rawid'] = sql_dict['list_rawid']
        data['down_date'] = down_date
        save_data.append({'table': 'nssd_latest', 'data': data})

    result.save_data = save_data
    return result
