import requests
from tools.getUrls import getUrls
from tools.logout import savelog
from tools.plain_text import get_plain_text
from concurrent.futures import ThreadPoolExecutor, as_completed, wait
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from lxml import etree, html
from retrying import retry
from parseA import ParseData
from pyquery import PyQuery as pq
from selenium import webdriver
import charset_normalizer
import aiomultiprocess
import aiohttp,asyncio

# 忽略因跳过SSLVerify输出的警告
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)


class SpiderWebsite:
    # fail_url = []
    fail_data = []
    datas = []

    def __init__(self):
        self.chrome_options = webdriver.ChromeOptions()
        # self.chrome_options.add_argument('--headless')
        # self.driver = webdriver.Chrome(options=self.chrome_options)
        self.driver = webdriver.Chrome()
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'
        }

    # 请求失败后连续请求3次
    @retry(stop_max_attempt_number=3)
    def get_requests(self, url):
        resp = requests.get(url, headers=self.headers, timeout=15, verify=False)
        if resp.status_code == 200 or str(resp.status_code).startswith("2") or str(resp.status_code).startswith(
                "3"):
            print(f"{url}已成功请求 {resp.status_code}")
            # 获取相应数据的编码格式
            charset = charset_normalizer.detect(resp.content)['encoding']
            self.get_response(resp.content.decode(charset), url)
        else:
            # self.driver.get(url)
            # print(f'{url}请求进入selenium控制区域')
            # if self.driver.page_source:
            #     print(self.driver.page_source)
            #     resp = self.driver.page_source
            #     self.get_response(resp, url)
            # else:
            raise Exception('error request,retesting')

    def selenium_requests(self,url):
        self.driver.get(url)
        print(self.driver.page_source)
        if self.driver.page_source:
            resp = self.driver.page_source
            self.get_response(resp, url)
        else:
            print(f'{url}源码为空')

    def check_url(self, url):
        # 捕获请求异常
        try:
            # self.get_requests(url)
            self.selenium_requests(url)
        except Exception as e:
            savelog(e, '../../logs/my.log', url)
            # self.fail_url.append(url)
            # self.fail_data.append((url, 0))

    def get_response(self, resp, url):
        '''处理请求成功的url'''
        print(f'Start processing {url} response')
        doc = pq(resp)
        doc.remove('script')
        doc.remove('style')
        doc = str(bytes(str(doc), encoding='utf-8').decode('utf-8').encode('gbk', 'ignore').decode('gbk'))
        plain_text = get_plain_text(doc)
        emt = etree.HTML(str(doc))
        # 获取description，keywords字段
        desc_info, kw_info = ParseData().parse_dc_kw(emt)
        # 获取外链及官网中所有的链接
        facebook_url, youtube_url, twitter_url, linkedin_url, instagram_url, hrefs = ParseData().parse_href(resp, emt,
                                                                                                            url)
        # 获取电话/传真/email
        tel, fax, email = ParseData().parse_tel_fax_email(str(doc))
        # print((url, 1, plain_text, desc_info, kw_info, facebook_url, youtube_url, twitter_url, linkedin_url,
        #        instagram_url,
        #        str(hrefs),
        #        tel, fax, email))
        self.datas.append(
            (url, 1, str(plain_text), desc_info, kw_info, facebook_url, youtube_url, twitter_url, linkedin_url,
             instagram_url,
             str(hrefs), tel, fax, email))

    def main(self):
        # 提取文件中的url并存入列表中
        urls = getUrls('../../common/1.txt')
        # urls = getUrls('../../common/server_url.txt')
        url_li = list(set(urls))
        # url_li = list(set(urls))[:1000]
        print(f'Test {len(url_li)} websites')
        # 利用线程池处理多任务
        with ThreadPoolExecutor(max_workers=1) as pe:
            tasks = [pe.submit(self.check_url, url) for url in url_li]
            wait(tasks, return_when="ALL_COMPLETED")
        # self.check_url(url)

        # 将第一轮请求错误的url重新请求
        # with ThreadPoolExecutor(max_workers=10) as pe:
        #     tasks = [pe.submit(self.check_url, url, self.headers) for url in self.fail_url]
        #     wait(tasks, return_when="ALL_COMPLETED")
        ParseData().conn_mysql(self.datas)
        ParseData().fail_request_conn_mysql(self.fail_data)


if __name__ == '__main__':
    SpiderWebsite().main()
    # SpiderWebsite().main('http://www.wuchanzhongda.cn')
