#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File    :   xml_domain_info_test.py
@Contact :   291622538@qq.com

@Modify Time      @Author    @Version    @Description
------------      -------    --------    -----------
2020/12/11 10:49   fan        1.0         None
"""
from abc import ABC
import csv
from xml.dom import minidom

import requests
from scrapy import Spider
from scrapy import Request

from scrapy.crawler import CrawlerProcess

from website.items import *


# def get_proxy():
#     return requests.get("http://192.168.37.22:5010/get/").json()


def get_domain_info(response, domain):
    domain_info = {"domain": domain}
    data = response.text
    # 将xml数据解析成dom文件
    root = minidom.parseString(data)
    # 根据标签名获取标签
    country_code = root.getElementsByTagName('COUNTRY')[0].getAttribute("CODE") if root.getElementsByTagName(
        'COUNTRY') else ""
    country_name = root.getElementsByTagName('COUNTRY')[0].getAttribute("NAME") if root.getElementsByTagName(
        'COUNTRY') else ""
    country_rank = root.getElementsByTagName('COUNTRY')[0].getAttribute("RANK") if root.getElementsByTagName(
        'COUNTRY') else ""
    owner_name = root.getElementsByTagName('OWNER')[0].getAttribute("NAME") if root.getElementsByTagName(
        'OWNER') else ""
    popularity_rank = root.getElementsByTagName('POPULARITY')[0].getAttribute("TEXT") if root.getElementsByTagName(
        'POPULARITY') else ""
    reach_rank = root.getElementsByTagName('REACH')[0].getAttribute("RANK") if root.getElementsByTagName(
        'REACH') else ""
    # print(country_code)
    # print(country_name)
    # print(country_rank)
    # print(owner_name)
    # print(popularity_rank)
    # print(reach_rank)
    rls_list = []
    rls_prefix = root.getElementsByTagName('RLS')[0].getAttribute("PREFIX") if root.getElementsByTagName('RLS') else ""
    for rls in root.getElementsByTagName('RL'):
        href = rls_prefix + rls.getAttribute("HREF")
        # print(href)
        title = rls.getAttribute("TITLE")
        # print(title)
        rls_list.extend([href, title])
    domain_info["country_code"] = country_code
    domain_info["country_name"] = country_name
    domain_info["country_rank"] = country_rank
    domain_info["owner_name"] = owner_name
    domain_info["popularity_rank"] = popularity_rank
    domain_info["reach_rank"] = reach_rank
    domain_info["rls_list"] = rls_list

    # print(domain_info)
    return domain_info


class XmlDomainInfo(Spider, ABC):
    # 定义爬虫名称
    name = "xml_domain_info_spider"

    custom_settings = {
        'ITEM_PIPELINES': {'website.pipelines.SaveXmlDomainInfoPipeline': 300},
    }

    def start_requests(self):

        with open('./root_data/top-1m.csv', 'r', encoding='utf-8') as f:
            for num, line in enumerate(csv.reader(f)):

                if num == 200:
                    break

                domain_name = line[1]
                url = "http://data.alexa.com/data?cli=2&dat=snbamz&url=%s" % domain_name
                # url = "http://stuffgate.com/sogou.com"
                add_params = {"domain_name": domain_name}
                # t_filter=True 设置为 True 表示不被过滤，可以一直访问直到成功;设置为 False 表示只访问一次
                yield Request(url, callback=self.website_content_parse, dont_filter=False,
                              errback=lambda response, domain=domain_name: self.save_overtime(
                                  response, domain), cb_kwargs=add_params)
                # meta={'proxy': 'http://62.84.70.130:80'}

    def website_content_parse(self, response, **kwargs):
        # print(kwargs["domain_name"])
        # print("请求的url：", response.url)
        try:
            xml_domain_info = get_domain_info(response, kwargs["domain_name"])
            item = XmlDomainInfoItem(xml_domain_info=xml_domain_info)
            # print(item)
            yield item
            # print("==============================")
        except Exception as e:
            print(e)
            with open('./data/xml_err_domain.txt', 'a', encoding='utf-8') as f:
                f.write(kwargs["domain_name"] + '\n')

    def save_overtime(self, response, domain):
        # print(response)
        print("请求超时的域名：", domain)
        with open('./data/xml_over_time_domain.txt', 'a', encoding='utf-8') as f:
            f.write(domain + '\n')


if __name__ == '__main__':
    process = CrawlerProcess()
    process.crawl(XmlDomainInfo)
    process.start()
