import time
from urllib import parse
import scrapy
import re
from scrapy.http import request
from scrapy.utils.trackref import NoneType
from selenium import webdriver
import requests
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# from urllib.request import quote, unquote
from selenium.webdriver.common.keys import Keys
import pprint

class QhinfoSpider(scrapy.Spider):
    name = 'qhinfo'
    allowed_domains = ['www.fmz.com']
    start_urls = ['https://www.fmz.com/']

    def close(self):
        self.browser.close()
        
    def start_requests(self):
        print()
        opt = Options()
        opt.add_argument('--no-sandbox')
        opt.add_argument('--disable-gpu')
        opt.add_experimental_option('excludeSwitches', ['enable-automation'])
        opt.add_argument("--disable-dev-usage")
        desired_capabilities = DesiredCapabilities.CHROME  # 修改页面加载策略
        desired_capabilities["pageLoadStrategy"] = "none"
        opt.add_argument('--headless')
         #INFO = 0 WARNING = 1 LOG_ERROR = 2 LOG_FATAL = 3 default is 0
        opt.add_argument("–disable-gpu")
        opt.add_argument("log-level=3")
        opt.add_experimental_option('excludeSwitches', ['enable-logging'])
        
        opt.add_argument('disable-infobars')
        self.chrome_driver = 'C:/Users/dd/Desktop/chromedriver.exe'
        self.browser = webdriver.Chrome(self.chrome_driver, options=opt)
        self.browser.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
            "source": """
            Object.defineProperty(navigator, 'webdriver', {
            get: () => undefined
            })
        """
        })
        
        self.browser.set_window_size(1920, 900)
        self.browser.set_page_load_timeout(30)
        return super().start_requests()

    def parse(self, response):
        infoCode = input("请输入您的编号：")

        url = 'https://www.fmz.com/robot/' + str(infoCode)

        yield scrapy.Request(
            url,
            dont_filter=True,
            callback=self.parseinfo
        )

    def parseinfo(self, response):
        divC = response.xpath('.//div[@id="summary-tbl"]/div/div')
        item = {}
        item["初始化时间"] = divC.xpath("./div[2]/p/text()").get("None")
        item["运行时间"] = divC.xpath("./div[3]/p/text()").get("None")
        item["预估做多爆仓"] = divC.xpath("./div[4]/p/text()").get("None").replace('预估做多爆仓价:',"")
        item["预估做空爆仓"] = divC.xpath("./div[5]/p/text()").get("None").replace('预估做空爆仓价:',"")

        allInfoC = divC.xpath("./div[7]/div/div[@class='tab-content']//tbody/tr")
        item["初始余额"] = allInfoC.xpath("./td[1]/span/text()").get("None")
        item["钱包余额"] = allInfoC.xpath("./td[2]/span/text()").get("None")
        item["保证金余额"] = allInfoC.xpath("./td[3]/span/text()").get("None")
        item["已用保证金"] = allInfoC.xpath("./td[4]/span/text()").get("None")
        item["保证金比率"] = allInfoC.xpath("./td[5]/span/text()").get("None")
        item["总收益"] = allInfoC.xpath("./td[6]/span/text()").get("None")
        item["当日收益"] = allInfoC.xpath("./td[7]/span/text()").get("None")
        item["平均日化"] = allInfoC.xpath("./td[8]/span/text()").get("None")
        item["预估月化"] = allInfoC.xpath("./td[9]/span/text()").get("None")
        item["预估年化"] = allInfoC.xpath("./td[10]/span/text()").get("None")
        item["循环延时"] = allInfoC.xpath("./td[11]/span/text()").get("None")

        allInfoCB = divC.xpath("./div[8]/div/div[@class='tab-content']//tbody/tr")
        item["币种    "] = allInfoCB.xpath("./td[1]/span/text()").get("None")
        item["方向    "] = allInfoCB.xpath("./td[2]/span/text()").get("None")
        item["数量    "] = allInfoCB.xpath("./td[3]/span/text()").get("None")
        item["持仓价格"] = allInfoCB.xpath("./td[4]/span/text()").get("None")
        item["预估回归收益"] = allInfoCB.xpath("./td[5]/span/text()").get("None")
        # item["网格初始价格"] = allInfoCB.xpath("./td[6]/span/text()").get("None")
        item["现价    "] = allInfoCB.xpath("./td[7]/span/text()").get("None")
        item["挂单买价"] = allInfoCB.xpath("./td[8]/span/text()").get("None")
        item["挂单卖价"] = allInfoCB.xpath("./td[9]/span/text()").get("None")
        item["未实现盈亏"] = allInfoCB.xpath("./td[10]/span/text()").get("None")
        item["成交额"] = allInfoCB.xpath("./td[11]/span/text()").get("None")

        lists = item.items()
        print("*" * 50)
        print("\n")
        for key,value in lists:
            # print(key,value)
            print("%s：\t\t%s" % (key,value))
        print("\n")
        print("*" * 50)



