import time
from urllib import parse
import scrapy
import re
from scrapy.http import request
from scrapy.utils.trackref import NoneType
from selenium import webdriver
import requests
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# from urllib.request import quote, unquote
from selenium.webdriver.common.keys import Keys
import pprint
from lxml import html
from urllib3.packages.six import b

class YqSpider(scrapy.Spider):
    name = 'yq'
    allowed_domains = ['ncov.dxy.cn']
    start_urls = ['https://ncov.dxy.cn/ncovh5/view/pneumonia?link=&share=&source=']

    def close(self):
        self.browser.close()
    def start_requests(self):
        opt = Options()
        opt.add_argument('--no-sandbox')
        opt.add_argument('--disable-gpu')
        opt.add_experimental_option('excludeSwitches', ['enable-automation'])
        opt.add_argument("--disable-dev-usage")
        desired_capabilities = DesiredCapabilities.CHROME  # 修改页面加载策略
        desired_capabilities["pageLoadStrategy"] = "none"
        # opt.add_argument('--headless')
         #INFO = 0 WARNING = 1 LOG_ERROR = 2 LOG_FATAL = 3 default is 0
        opt.add_argument("–disable-gpu")
        opt.add_argument("log-level=3")
        opt.add_experimental_option('excludeSwitches', ['enable-logging'])
        
        opt.add_argument('disable-infobars')
        self.chrome_driver = 'C:/Users/dd/Desktop/chromedriver.exe'
        self.browser = webdriver.Chrome(self.chrome_driver, options=opt)
        self.browser.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
            "source": """
            Object.defineProperty(navigator, 'webdriver', {
            get: () => undefined
            })
        """
        })
        
        self.browser.set_window_size(1920, 900)
        self.browser.set_page_load_timeout(30)
        return super().start_requests()
    def parse(self, response, **kwargs):
        self.browser.find_element_by_xpath("/html/body/div/div/div[3]/div[16]/div[27]").click()
        time.sleep(0.2)
        response = html.fromstring(self.browser.page_source)
        time.sleep(0.2)
        self.parseinfo(response)

    def parseinfo(self, response):
        
        alltr = response.xpath(".//div[has-class('areaBox___Sl7gp')]/div")[2:-2]
        allDQ = []
        for alltritem in alltr:
            item = {}
            try:
                item["省份"] = alltritem.xpath("./div[1]/p[1]/text()")[0]
                item["现有确诊"] = alltritem.xpath("./div[1]/p[2]/text()")[0]
                item["累计确诊"] = alltritem.xpath("./div[1]/p[3]/text()")[0]
                item["死亡"] = alltritem.xpath("./div[1]/p[4]/text()")[0]
                item["治愈"] = alltritem.xpath("./div[1]/p[5]/text()")[0]
            except Exception:
                break
            print(item)
            with open("./ama/csvhandle/yqdata.csv","a",encoding='utf-8') as f:
                f.write("%s,%s,%s,%s,%s"%(
                    item["省份"],
                    item["现有确诊"],
                    item["累计确诊"],
                    item["死亡"],
                    item["治愈"],
                ))
                f.write("\n")
        for allitem in alltr:
            if len(allitem.xpath("./div")) <=1:
                continue
            else:
                diqdivall = allitem.xpath('./div')[1:]
                for items in diqdivall:
                    allDQ.append(items)
        for alltritem in allDQ:
            item = {}
            try:
                item["地区"] = alltritem.xpath("./p[1]/span/text()")[0]
                item["现有确诊"] = alltritem.xpath("./p[2]/text()")[0]
                item["累计确诊"] = alltritem.xpath("./p[3]/text()")[0]
                item["死亡"] = alltritem.xpath("./p[4]/text()")[0]
                item["治愈"] = alltritem.xpath("./p[5]/text()")[0]
            except Exception as e:
                continue
            
            with open("./ama/csvhandle/yqdqdata.csv","a",encoding='utf-8') as f:
                f.write("%s,%s,%s,%s,%s"%(
                    item["地区"],
                    item["现有确诊"],
                    item["累计确诊"],
                    item["死亡"],
                    item["治愈"]
                ))
                f.write("\n")
        self.browser.find_element_by_xpath("/html/body/div/div/div[3]/div[11]/div/div[2]").click()
        time.sleep(0.5)
        responseff = html.fromstring(self.browser.page_source)
        self.Gninfo(responseff)

    def Gninfo(self,response):
        allinfo = response.xpath("/html/body/div/div/div[3]/div[12]/div/div[2]/ul/li")
        for liitem in allinfo:
            item = {}
            item["较昨日"] = liitem.xpath("./div/div/b/em/text()")[0]
            item["确诊"] = liitem.xpath("./strong/text()")[0]
            with open("./ama/csvhandle/gndata.csv","a",encoding='utf-8') as f:
                f.write(
                    "%s,%s," % (
                        item["确诊"],
                        item["较昨日"]
                    )
                )
                f.write("\n")
        self.browser.find_element_by_xpath("/html/body/div/div/div[3]/div[18]/div/div[3]").click()
        time.sleep(0.5)
        responseff = html.fromstring(self.browser.page_source)
        self.Gwinfo(responseff)
    def Gwinfo(self,response):
        allinfo = response.xpath("/html/body/div/div/div[3]/div[12]/div/div[2]/ul/li")
        for liitem in allinfo:
            item = {}
            item["较昨日"] = liitem.xpath("./div/div/b/em/text()")[0]
            item["确诊"] = liitem.xpath("./strong/text()")[0]
            with open("./ama/csvhandle/gwdata.csv","a",encoding='utf-8') as f:
                f.write(
                    "%s,%s," % (
                        item["确诊"],
                        item["较昨日"]
                    )
                )
                f.write("\n")