from logging import exception
import time
from urllib import parse
import scrapy
import re
from scrapy.http import request
from scrapy.utils.trackref import NoneType
from selenium import webdriver
import requests
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# from urllib.request import quote, unquote
from selenium.webdriver.common.keys import Keys
class AlibbSpider(scrapy.Spider):
    isFirstPage = True
    name = 'alibb'
    allowed_domains = ['re.1688.com']
    start_urls = ['https://login.1688.com/member/signin.htm']

    def start_requests(self):
        print("init---------------")
        opt = Options()
        opt.add_argument('--no-sandbox')
        opt.add_argument('--disable-gpu')
        opt.add_experimental_option('excludeSwitches', ['enable-automation'])
        opt.add_argument("--disable-dev-usage")
        desired_capabilities = DesiredCapabilities.CHROME  # 修改页面加载策略
        desired_capabilities["pageLoadStrategy"] = "none"
        
        opt.add_argument('--headless')
        self.chrome_driver =  'C:/Users/dd/Desktop/chromedriver.exe'
        self.browser = webdriver.Chrome(self.chrome_driver,options=opt)
        self.browser.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
        "source": """
            Object.defineProperty(navigator, 'webdriver', {
            get: () => undefined
            })
        """
        })
        self.browser.set_window_size(1920,900)
        self.browser.set_page_load_timeout(30)
        return super().start_requests()
        
    def parseLogin(self,response):
        print("zhengl在处理登录！")
        self.browser.find_element_by_xpath('//*[@id="login"]/div[1]/i').click()
        for i in range(10):
            print("注意：请在%d秒内登录" % (10-i))
            time.sleep(1)
        print("-" * 40)
        print("\n请在控制台输入您要搜索的关键字！\n")
        print("-" * 40)
        kw  = input("请输入您要搜索的关键字：")
        return scrapy.Request(
            "https://s.1688.com/selloffer/offer_search.htm?keywords=%s&n=y&netType=16&spm=a260k.dacugeneral.search.0&beginPage=1#sm-filt" % quote(kw) ,
            dont_filter=True,
            callback=self.parseClas
        )
       
    def yanzhen(self,response):
        kw  = input("请输入您要搜索的关键字：")
        self.browser.find_element_by_xpath('//*[@id="home-header-searchbox"]').send
        pass

    def parse(self,response):
        
        url = response.xpath(".//iframe/@src").get("None")
        yield scrapy.Request(
            url,
            callback=self.parseLogin,
            dont_filter=True
        )
        # print(self.browser.find_element_by_tag_name("iframe"))
        # self.browser.switch_to.frame(self.browser.find_element_by_tag_name("iframe"))
        # print(response.xpath('.//body').get("None"))
        # print("打印结束")
        # # self.browser.find_element_by_xpath('//*[@id="login"]/div[1]/i').click()
        # # self.browser.find_element_by_css_selector(".icon-qrcode:before").click()
        # return
        
    def close(self):
        self.browser.close()

    def parsec(self, response):
        allA = response.xpath(".//ul[@class='layer-lst']//a")
        print("----一共捕获到有%d个页面-----" % (len(allA)))
        for itema in allA:
            item = {}
            try:
                item["data-spm-anchor-id"] = re.search(r"data-spm-anchor-id=(.*?)",itema.get()).group(1)
            except Exception as e:
                pass
            item["url"] = itema.xpath("./@href").get("None")
            item["classifyName"] = itema.xpath("./text()").get("None")
            yield scrapy.Request(
                item["url"],
                callback=self.parseClas,
                meta=item,
                dont_filter=True
            )
            # return
    
    def parseClas(self, response):
        if response.xpath('//*[@id="baxia-punish"]/div[2]/div/div[1]/div[2]/div') != None:
            print("注意：当前页面有验证码拦截！")
        input("是否处理完！")
        allC = response.xpath(".//ul[@id='sm-offer-list']/div")
        print("当前页面元素个数%s" % str(len(allC)))
        for itemc in allC:
            if itemc.xpath("./a/@href").get("None") == "None":
                continue
            item = {}
            # item["classify"] = response.meta.get("classifyName")
            callUrl = itemc.xpath("./div/div[@class='mojar-element-company']/a/@href").get("None")
            item["title"] = itemc.xpath("./div/div[@class='mojar-element-title']//div[@class='title']/text()").get("None")
            item["price"] = itemc.xpath("./div/div[@class='mojar-element-price']//div[@class='price']/text()").get("None")
            print("*" * 40)
            print(item)
            print(callUrl.startswith("http"))
            print("*" * 40)
            # yield scrapy.Request(
            #     callUrl,
            #     callback=self.parseDetail,
            #     meta = item,
            #     dont_filter=True
            # )
        time.sleep(1000)
        # return

        
    
    # def parseDetail(self,response):
    #     self.isFirstPage = False
    #     try:
    #         allli = response.xpath('//*[@id="topnav"]/div/ul/li')
    #         for itenli in allli:
    #             if itenli.xpath("./a/text()").get("None").strip() == "联系方式":
    #                 # self.browser.find_element_by_xpath('//*[@id="topnav"]/div/ul/li[%d]' % (allli.index(itenli) + 1 )).click()
    #                 yield scrapy.Request(
    #                     itenli.xpath("./a/@href").get(""),
    #                     callback=self.handleContent,
    #                     meta=response.meta,
    #                     dont_filter=True
    #                 )
    #     except Exception as e:
    #         print(e)
    #         print("当前页面异常，没有联系方式，跳过！")

    # def handleContent(self,response):
    #     item = {}
    #     superM = response.xpath(".//div[@class='props-part']/div[1]")
    #     item["classify"] = response.meta.get("classify")
    #     item["title"] = response.meta.get("title")
    #     item["price"] = response.meta.get("price")
    #     item["dianName"] = superM.xpath("./div[@class='contact-info']/h4/text()").get("None").strip()
    #     item["lianxiR"]  = superM.xpath("./div[@class='contact-info']/dl/dd/a/text()").get("None").strip() + superM.xpath("./div[@class='contact-info']/dl/dd/text()").get("").replace("&nbsp;","").strip()
    #     allDl = superM.xpath("./div[@class='fd-line']/div[2]/dl")
    #     someExist = {"dir":False,"cal":False}
    #     for itemdl in allDl:
    #         if itemdl.xpath("./dt/text()").get("None").strip().replace("&nbsp;","") == "移动电话：":
    #             someExist["cal"] = True
    #             item["call"] = itemdl.xpath("./dd/text()").get("None").strip().replace("&nbsp;","")
    #         # if itemdl.xpath("./dt/text()").get("None").strip().replace("&nbsp;","") == "地址：":
    #         #     item["addR"] = itemdl.xpath("./dd/text()").get("None").strip().replace("&nbsp;","")
    #         #     someExist["dir"] = True
    #     if someExist["cal"] == False:
    #         item["call"]  = "None"
    #     item['addR']  = allDl.xpath(".//dd[@class='address']/text()").get('None').strip()
    #     print(item)
    #     yield item
