#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File    :   stuff_gate_com.py    
@Contact :   291622538@qq.com

@Modify Time      @Author    @Version    @Description
------------      -------    --------    -----------
2020/12/8 15:05   fan        1.0         None
"""
from abc import ABC
import csv
from scrapy import Spider
from scrapy import Request
from scrapy.crawler import CrawlerProcess
import re
from website.items import *
import requests


# def get_proxy():
#     return requests.get("http://192.168.37.22:5010/get/").json()


class StuffGateComContent(Spider, ABC):
    # 定义爬虫名称
    name = "stuff_gate_com_content_spider"

    custom_settings = {
        'ITEM_PIPELINES': {'website.pipelines.SaveStuffGateContentPipeline': 300},
    }

    def start_requests(self):

        with open('./root_data/top-1m.csv', 'r', encoding='utf-8') as f:
            for num, line in enumerate(csv.reader(f)):

                if num == 2:
                    break

                domain_name = line[1]
                # url = "http://stuffgate.com/%s" % domain_name
                url = "http://stuffgate.com/fifa55hot.net"
                add_params = {"domain_name": domain_name}
                # t_filter=True 设置为 True 表示不被过滤，可以一直访问直到成功;设置为 False 表示只访问一次
                yield Request(url, callback=self.website_content_parse, dont_filter=False,
                              errback=lambda response, domain=domain_name: self.save_overtime(
                                  response, domain), cb_kwargs=add_params)
                # meta={'proxy': 'http://' + get_proxy().get("proxy")}

    def website_content_parse(self, response, **kwargs):
        # print(kwargs["domain_name"])
        # print("请求的url：", response.url)
        try:
            domain_info = dict()
            domain_info["url"] = response.url
            cards = response.xpath('//div[@class="card"]')
            for card in cards:
                h4 = "".join(card.xpath('./h4/text()').extract())
                # print(h4)
                if h4 == "Site Profile":
                    site_profile = dict()
                    for num, tr in enumerate(card.xpath('.//tr')):
                        if num == 0:
                            key = "".join(tr.xpath('./th/text()').extract()).replace(" ", "_")
                            value = "".join(tr.xpath('./th/span/text()').extract())
                        else:

                            key = "".join(tr.xpath('./td/text()').extract()[0]).replace(" ", "_")
                            try:
                                value = "".join(tr.xpath('./td/a/text()').extract()[0].split("?")[0]) if tr.xpath(
                                    './td/a/text()') else "".join(
                                    tr.xpath('./td/text()').extract()[1])
                            except IndexError as e:
                                # print(e)
                                value = ""
                        # print(key, "==>", value)
                        site_profile[key] = value
                        domain_info["site_profile"] = site_profile
                elif h4 == "WHOIS Information":
                    whois_information = dict()
                    for tr in card.xpath('.//div[@id="whois_domain"]//tr'):
                        key = "".join(tr.xpath('./td/text()').extract()[0]).replace(" ", "_")
                        value = "\n".join(tr.xpath('./td/text()').extract()[1:])
                        # print(key, "==>", value)
                        whois_information[key] = value
                        domain_info["whois_information"] = whois_information
                elif h4 == "DNS Record Analysis":
                    dns_record_analysis = list()
                    for num, tr in enumerate(card.xpath('.//tr')):
                        if num == 0:
                            continue
                        line = tr.xpath('./td').extract()
                        line = [re.sub('<[^>]*>', '', td.replace('<br>', ';'), count=0, flags=0) for td in line]
                        dns_record_analysis.append(line)
                        # print(line)
                    domain_info["dns_record_analysis"] = dns_record_analysis
                # elif h4 == "HTTP Header Analysis":
                #     print("HTTP Header Analysis")
                # elif h4 == "Site Traffic":
                #     print("Site Traffic")
                elif 'in top 1000000 sites' in h4:
                    in_top_100 = list()
                    for num, tr in enumerate(card.xpath('.//tr')):
                        if num == 0:
                            continue
                        rank = tr.xpath('./td/text()').extract()[0]
                        website = tr.xpath('./td/a/text()').extract()[0] if tr.xpath('./td/a/text()') else \
                            tr.xpath('./td/text()').extract()[1]
                        # print(rank, " ==> ", website)
                        in_top_100.append({"rank": rank, "website": website})
                    # print(in_top_100)
                    domain_info["in_top_100"] = in_top_100
                # elif h4 == "Websites with similar rankings":
                #     print("Websites with similar rankings")
                # elif h4 == "Is this your website?":
                #     print("Is this your website?")
            # print(domain_info)
            item = StuffGateContentItem(domain_info=domain_info)
            # print(item)
            yield item
            # print("==============================")
        except Exception as e:
            print(e)
            with open('./data/err_domain.txt', 'a', encoding='utf-8') as f:
                f.write(kwargs["domain_name"] + '\n')

    def save_overtime(self, response, domain):
        # print(response)
        print("请求超时的域名：", domain)
        with open('./data/over_time_domain.txt', 'a', encoding='utf-8') as f:
            f.write(domain + '\n')
        # url = "http://stuffgate.com/%s" % domain_name
        # add_params = {"domain_name": domain_name}
        # # t_filter=True 设置为 True 表示不被过滤，可以一直访问直到成功
        # yield Request(url, callback=website_content_parse, dont_filter=True,
        #               errback=lambda response, domain=domain_name: save_overtime(
        #                   response, domain_name), cb_kwargs=add_params,
        #               meta={'proxy': 'http://' + get_proxy().get("proxy")})


if __name__ == '__main__':
    # process = CrawlerProcess()
    # process.crawl(StuffGateComContent)
    # process.start()

    string = '<input type="submit" id="su" value="百度一下" class="bg s_btn">'

    pattern = re.compile(r'<input type="submit" id="(.*?)" value="(.*?)" class="bg s_btn">')

    result = pattern.findall(string)
    print(result)
