import random
import time
from selenium import webdriver

import scrapy

import requests
from scrapy.spidermiddlewares.httperror import HttpError
from scrapy.utils.project import get_project_settings
from twisted.internet.error import DNSLookupError, TCPTimedOutError

from domainPro.items import ProItem

class SopcSpider(scrapy.Spider):
    name = 'sopc'
    # allowed_domains = ['www.xxx.com']
    # start_urls = ['http://www.baidu.com/']
    so_pc ='https://www.so.com/s?q={_q}'
    data ={}
    input_return_url =''
    input_upload_url= ''
    proxy_url = ''
    server = ''
    page = ''
    is_snapshoot = ''
    page_num ={}
    def __init__(self,name):
        self.server = name['server']
        self.proxy_url = name['proxy_url']
        self.data = name['data']
        self.rw = name['rw']
        self.page = name['page']
        self.rand_time = random.uniform(4, 6)
        self.is_snapshoot = name['is_snapshoot']
        self.input_return_url =name['input_return_url']
        self.input_upload_url =name['input_upload_url']

    def start_requests(self):
        if len(self.data)>0:
            print('开始执行')
            for val in self.data:
                new_url = self.so_pc.format(_q=val['Wordone'])
                self.page_num[val['id']] =1
                yield scrapy.Request(url='http://www.fenruiyun.com/', meta={'url': val['url'],'new_url':new_url,'proxy_url':self.proxy_url,'Wordone':val['Wordone'],'page':val['page'],'id': val['id']}, callback=self.parse,errback=self.errback_httpbin)

    def parse(self, response):
        id = response.meta['id']
        print(response.meta)
        # 浏览器配置
        size = ["1200X800", "1920x1080", "1366x768", "2560x1440"]
        firefox_options = webdriver.FirefoxOptions()
        firefox_options.set_preference("permissions.default.image", 2)
        firefox_options.set_preference('permissions.default.stylesheet', 2)
        # 禁用浏览器缓存
        firefox_options.set_preference("network.http.use-cache", False)
        firefox_options.set_preference("browser.cache.memory.enable", False)
        firefox_options.set_preference("browser.cache.disk.enable", False)
        firefox_options.set_preference("browser.sessionhistory.max_total_viewers", 3)
        firefox_options.set_preference("network.dns.disableIPv6", True)
        firefox_options.set_preference("Content.notify.interval", 750000)
        firefox_options.set_preference("content.notify.backoffcount", 3)
        firefox_options.set_preference("network.http.pipelining", True)
        firefox_options.set_preference("network.http.proxy.pipelining", True)
        firefox_options.set_preference("network.http.pipelining.maxrequests", 32)
        firefox_options.add_argument('window-size=' + random.choice(size))
        firefox_options.add_argument('--headless')
        firefox_options.add_argument('--disable-gpu')
        result = requests.get(response.meta['proxy_url']).json().get('data').get('proxy_list')
        #result = ['http-dynamic-S02.xiaoxiangdaili.com:10030']
        proxy = random.choice(result)
        print(proxy)
        firefox_capabilities = webdriver.DesiredCapabilities.FIREFOX
        firefox_capabilities['marionette'] = True
        firefox_capabilities['proxy'] = {'proxyType': 'MANUAL', 'httpProxy': proxy,
                                         'sslProxy': proxy}
        try:
            driver = webdriver.Firefox(capabilities=firefox_capabilities, options=firefox_options)
        except:
            print('浏览器启动失败')
            return False

        pages = response.meta['page'].split('|')
        page = int(pages[1]) - int(pages[0])
        url_new = response.meta['new_url'] + '&pn=' + str(int(pages[0]))
        try:
            # 隐藏式等待
            driver.implicitly_wait(5)
            driver.get(url_new)
        except:
            print('访问url失败')
            driver.quit()
            print('关闭浏览器')
            return False
        key_seo = response.meta['url']
        rank = 0
        pms = int(pages[0]);
        status = 0
        for i in range(page + 1):
            # 当前页数
            print(str(pms) + "当前页数")
            time.sleep(self.rand_time)
            driver.execute_script("window.scrollTo(0,document.body.scrollHeight);")
            try:
                item = driver.find_elements_by_xpath('//*[@id="main"]/ul[@class="result"]/li[@class="res-list"]')
            except:
                print('未找到搜索结果')
                break
            for val in item:
                driver.implicitly_wait(self.rand_time)
                try:
                    click_title = val.find_element_by_xpath('.//p[@class="g-linkinfo"]/cite')
                except:
                    print("未搜索到列表")
                    continue
                try:
                    click_a = val.find_element_by_xpath('.//h3[@class="res-title "]/a')
                except:
                    print("未搜索到标题")
                    continue
                if key_seo in click_title.text:
                    print("匹配到" + click_title.text)
                    rank = val.get_attribute('id')
                    print('当前排名' + rank)
                    item = ProItem()
                    # 当前页
                    item['rw'] = self.rw

                    item['zt'] = 1
                    # 当前页排名多少
                    item['pm'] = pms
                    # id
                    item['id'] = response.meta['id']
                    # 服务器编号
                    item['bh'] = self.server
                    status = 1
                    click_a.click()
                    yield item
                    break
            if status == 1:
                break
            try:
                pms = pms + 1
                driver.find_element_by_xpath('//*[@id="snext"]').click()
            except:
                print('未找到下一页')
                # 关闭浏览器
                try:
                    time.sleep(self.rand_time)
                    driver.quit()
                    print('关闭浏览器')
                except NameError:
                    print('无法关闭浏览器')
                break

        if rank == 0:
            item = ProItem()
            item['rw'] = self.rw
            # 当前页
            item['zt'] = 0
            # 当前页排名多少
            item['pm'] = 0
            # id
            item['id'] = response.meta['id']
            # 服务器编号
            item['bh'] = self.server
            yield item
        else:
            windows = driver.window_handles
            # 获取最新打开窗口
            driver.switch_to.window(windows[-1])
            driver.implicitly_wait(5)
            time.sleep(self.rand_time)
            keys = ['关于我们', '产品中心', '新闻资讯', '联系我们', '公司简介', '新闻中心', '成功案例', '客户案例', '在线询单']
            for val in keys:
                try:
                    a_link = driver.find_element_by_link_text(val)
                    a_link.click()
                    print("%s点击成功" % val)
                    break
                except:
                    print('%s未找到内页' % val)
        try:
            driver.quit()
            print('关闭浏览器1')
        except BaseException:
            print('无法关闭浏览器')

    def errback_httpbin(self, failure):
        # log all failures
        self.logger.info(repr(failure))
        # in case you want to do something special for some errors,
        # you may need the failure's type:

        if failure.check(HttpError):
            # these exceptions come from HttpError spider middleware
            # you can get the non-200 response
            response = failure.value.response
            self.logger.info('HttpError错误 on %s', response.url)

        elif failure.check(DNSLookupError):
            # this is the original request
            request = failure.request
            self.logger.info('DNSLookupError错误 on %s', request.url)

        elif failure.check(TimeoutError, TCPTimedOutError):
            request = failure.request
            self.logger.info('TimeoutError错误 on %s', request.url)


