# coding:utf-8
import aiohttp
import asyncio
import redis
import multiprocessing
import os, sys

import requests

sys.path.append('/root/qvenv')
import charset_normalizer
from tools.logout import save_log
from tools.plain_text import get_plain_text
from tools.Dingding import dingtalk_robot
from tools.get_tasks import GetTasks
from multiprocessing import Pool
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from lxml import etree
from retrying import retry
from website1000_parse import ParseData
from pyquery import PyQuery as pq
from selenium import webdriver
from configs.cfg import *
import time
import pymysql


# 忽略因跳过SSLVerify输出的警告
# requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

class SpiderWebsite:
    connect = pymysql.connect(host=mysql_host, user=mysql_name, password=mysql_pwd,
                              database='spiderdb', port=mysql_port, charset='utf8')
    cur = connect.cursor()
    pool = redis.ConnectionPool(host=redis_host, port=redis_port, password=redis_pwd,
                                decode_responses=True, db=11)
    db = redis.Redis(connection_pool=pool)
    pipeline = db.pipeline()

    def __init__(self):
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36',
        }

    # 请求失败后连续请求3次
    @retry(stop_max_attempt_number=3)
    async def get_requests(self, url):
        # 创建一个TCPConnector
        conn = aiohttp.TCPConnector(ssl=False)
        # 设置请求超时时间
        timeout = aiohttp.ClientTimeout(total=60)
        # 作为参数传入ClientSession
        async with aiohttp.ClientSession(trust_env=True, connector=conn, timeout=timeout) as client:
            async with client.get(url, headers=self.headers) as resp:
                if str(resp.status).startswith("2"):
                    # print(f'{url} status_code: {resp.status}')
                    # 获取相应数据的编码格式
                    # charset = charset_normalizer.detect(content)['encoding']
                    # 自动检测编码格式
                    content = await resp.content.read()
                    charset = charset_normalizer.detect(content)['encoding']
                    text = content.decode(charset)
                    await self.handle_response(text, url)
                    try:
                        self.connect.ping(reconnect=True)
                        base_url = url.replace("http://", "")
                        sql = f'UPDATE websiteaddresses SET is_crawl=1 WHERE website="{base_url}"'
                        self.cur.execute(sql)
                        self.connect.commit()
                        print(url,' Update Succeeded')
                    except Exception as e:
                        print(e, '数据修改失败')
                else:
                    raise Exception('Not 2** request')

    async def check_url(self, url):
        # 捕获请求异常
        try:
            await self.get_requests(url)
        except Exception as e:
            # 将失败的url放到队列
            self.db.lpush('fUrlSet', url)
            self.connect.ping(reconnect=True)
            base_url = url.replace("http://", "")
            sql = f'UPDATE websiteaddresses SET is_crawl=2 WHERE website="{base_url}"'
            self.cur.execute(sql)
            self.connect.commit()
            save_log(e, '../../logs/my.log', url)

    async def handle_response(self, resp, url):
        '''处理请求成功的url'''
        # print(f'Start processing {url} response')
        doc = pq(resp)
        doc.remove('script')
        doc.remove('style')
        doc = str(bytes(str(doc), encoding='utf-8').decode('utf-8').encode('gbk', 'ignore').decode('gbk'))
        plain_text = get_plain_text(doc)
        emt = etree.HTML(str(doc))
        # print(str(doc))
        # 获取dp，kw字段
        desc_info, kw_info = ParseData().parse_dc_kw(emt)
        # 获取外链及官网中所有的链接
        # print('description:', desc_info, 'keywords:', kw_info)
        facebook_url, youtube_url, twitter_url, linkedin_url, instagram_url, hrefs = ParseData().parse_href(str(doc),
                                                                                                            emt,
                                                                                                            url)
        # 获取电话/传真/email
        tel, fax, email = ParseData().parse_tel_fax_email(str(doc))
        # print(str((url, 1, str(plain_text), desc_info, kw_info, facebook_url, youtube_url, twitter_url, linkedin_url,
        #      instagram_url,
        #      hrefs, tel, fax, email)))
        self.db.lpush('queue:datasSet',
                      str((url, 1, str(plain_text), desc_info, kw_info, facebook_url, youtube_url, twitter_url,
                           linkedin_url, instagram_url, str(hrefs), tel, fax, email)))

        # print(f'当前datas队列的长度为{self.db.llen("queue:datasSet")}')
        if self.db.llen("queue:datasSet") >= 1000:
            for i in range(1000):
                self.pipeline.brpop('queue:datasSet', timeout=20)
            result = self.pipeline.execute()
            # res = re.compile(r'[\s+\\]')
            datas = [eval(i[1]) for i in result]
            ParseData().conn_mysql(datas)

    # def get_chunks(self, iterable, chunks=1):
    #     """
    #     此函数用于分割若干任务到不同的进程里去
    #     """
    #     lst = list(iterable)
    #     return [lst[i::chunks] for i in range(chunks)]

    def selenium_get_requests(self, url):
        chrome_options = webdriver.ChromeOptions()
        chrome_options.add_argument('--headless')
        driver = webdriver.Chrome(options=chrome_options)
        driver.implicitly_wait(20)
        driver.get(url)

    def handle_tasks(self, url):
        """
        这个就是子进程运行的函数，接收任务列表和用于进程间通讯的Queue
        """
        print(f'pid : {os.getpid()}, ppid : {os.getppid()}')
        # 每个子进程分配一个新的loop
        loop = asyncio.get_event_loop()
        # 初始化业务类，转成task或future
        spider = SpiderWebsite()
        # 协程走起
        loop.run_until_complete(spider.check_url(url))
        # loop.run_until_complete(asyncio.wait(tasks))

    def put_tasks(self):
        while True:
            print(f'当前队列的长度为{self.db.scard("urlSet")}')
            if self.db.scard("urlSet") <= 2000:
                time.sleep(2)
                # conn = pymysql.connect(host=mysql_host, user=mysql_name, password=mysql_pwd,
                #                        database='spiderdb', port=mysql_port, charset='utf8')
                # # 获取执行工具
                self.connect.ping()
                cur = self.connect.cursor()
                sql = f'SELECT website FROM websiteaddresses WHERE is_crawl IS Null LIMIT 10000'
                self.connect.ping(reconnect=True)
                cur.execute(sql)
                put_datas = cur.fetchall()
                # 如果数据没取完,执行下面的操作
                if put_datas:
                    for i in put_datas:
                        print(i[0])
                        self.db.ping()
                        self.db.sadd('urlSet', i[0])
                else:
                    # 如果数据取完,则关闭数据库
                    print('Ending...')
            time.sleep(1)

    def main(self):
        process_count = multiprocessing.cpu_count() * 2
        pool = Pool(process_count)
        count = 1
        while True:
            if self.db.scard('urlSet') > 0:
                url = 'http://' + self.db.spop('urlSet')
                pool.apply_async(self.handle_tasks, args=(url,))
            else:
                if count>=3:
                    break
                else:
                    count += 1
                    time.sleep(10)

        # 等待全部执行完毕，关闭进程池
        pool.close()
        pool.join()

        # 测试某一个url
        # url = 'http://www.dpa.com.sg'
        # self.handle_tasks(url)

        # 等待全部website请求结束，再用selenium统一请求失败的url
        # for _ in self.fail_q.qsize():
        #     url = self.fail_q.get_wait()
        #     self.selenium_get_requests(url)

        # 将剩余的数据存入数据库
        # ParseData().conn_mysql(data_list)


if __name__ == '__main__':
    start_time = time.time()
    spider = SpiderWebsite()
    spider.put_tasks()
    # try:
    #     spider.main()
    #     print(f'A total of {time.time() - start_time} seconds')
    # except Exception as e:
    #     dingtalk_robot(e.args[0], ['0000'], False)
