#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   domain_info_original_again.py    
@Contact :   291622538@qq.com

@Modify Time      @Author    @Version    @Description
------------      -------    --------    -----------
2020/10/13 10:33   fan        1.0         获取581643条域名源json信息
'''
import json
import os

import tldextract
from scrapy import Spider
from scrapy import Request
import csv

# # 项目根目录
# # 获取当前文件路径
# current_path = os.path.abspath(__file__)
# # 获取当前文件的父目录
# father_path = os.path.abspath(os.path.dirname(current_path) + os.path.sep + ".")
# print(father_path)
from website.items import DomainInfoOriginalItem, DomainInfoOriginalOverItem


# 获取顶级域名对应的api
def get_services_dict(file_name):
    with open(file_name, 'r') as load_f:
        load_dict = json.load(load_f)
    services_dict = {}
    for service in load_dict["services"]:
        for suffix in service[0]:
            services_dict[suffix] = "".join(service[1])
    return services_dict


class DomainInfoOriginal(Spider):
    # 定义爬虫名称
    name = "domain_info_original"

    custom_settings = {
        'ITEM_PIPELINES': {'website.pipelines.SaveDomainInfoOriginalPipeline': 300},
    }

    def start_requests(self):
        services_dict = get_services_dict("data/dns.json")
        with open("data/university_list.csv", "r", encoding="utf-8") as f:
            for num, line in enumerate(csv.reader(f), 1):
                # if num == 200:
                #     break
                # print(line)
                domainName = line[4]
                val = tldextract.extract(domainName)
                if val.suffix in services_dict.keys():
                    URL = services_dict[val.suffix] + "domain/" + domainName
                    # dont_filter 相同的url只请求一次： False
                    yield Request(URL, callback=self.original_parse, meta={"domainName": domainName},
                                  dont_filter=False,
                                  errback=lambda response, domainName=domainName: self.save_overtime(
                                      response, domainName))
                else:
                    print("不存在接口")

    def original_parse(self, response, **kwargs):
        domainInfo = response.text
        original = DomainInfoOriginalItem(domainName=response.meta["domainName"], domainInfo=domainInfo)
        yield original

    def save_overtime(self, response, domainName):

        over_domain = DomainInfoOriginalOverItem(domainName=domainName)
        yield over_domain
