from urlpool import UrlPool
import aiohttp
import asyncio
from urllib import parse
import pymysql
from gne import GeneralNewsExtractor
import random
import warnings
import sys
from newsender import sender
from settings import *
from fake_useragent import UserAgent
import chardet  # 编码查看
from Pub_Parser import Parser
from Save_data import Base_save
from simulate import sim_requests
import traceback
import nest_asyncio
nest_asyncio.apply()
warnings.filterwarnings("ignore")
requests.packages.urllib3.disable_warnings()

class NewSpider:
    def __init__(self, kw, page, name=''):
        if not isinstance(kw, str):
            raise Exception('关键词错误')
        if name:
            self.name = name
        self.name = kw
        self.kw = parse.quote(kw)
        self.page = int(page) + 1
        self.start_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
        self.saveways = Save_Ways
        self.send_flag = Mails_enable
        self.loop = asyncio.get_event_loop()
        self.session = aiohttp.ClientSession(loop=self.loop)
        self.urlpool = UrlPool(self.name)
        self.timeout = Timeout
        self.ua = UserAgent(verify_ssl=False)
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
        }
        self.params = ''
        self.retry = Retry
        self.proxy_flag = Proxy
        self.fails_url = {}
        self.retry_times = Retry_Times - 1
        self.waitime = WaitTime
        self.semaphore = asyncio.Semaphore(Fetch_Num)
        self.extractor = GeneralNewsExtractor()  # 智能网页解析
        self.save_data = Base_save(self.saveways, self.name)
        self.csv_store = Csv_Store
        self.empty_time = False
        self.count = Count_Num
        self.fail_proxy = []
        self.logger = self.set_logger()
        self.logger.warning('Newspiders start')
        self.logger.warning(f'keywords: {kw}, pages: {page}')

    async def fetch(self, url, proxy=False, is_details=False, method='GET', verify=False, simulate=False,  **kwargs):
        async with self.semaphore:
            if not simulate:
                try:
                    if method == 'GET':
                        if self.proxy_flag or proxy:
                            proxy = proxy if proxy else random.choice(proxy_list)
                            self.logger.warning(f'Use {proxy} download {url}')
                            response = await self.session.get(url, proxy=proxy, headers=self.headers, timeout=self.timeout, verify_ssl=verify, **kwargs)
                        else:
                            response = await self.session.get(url, headers=self.headers, timeout=self.timeout, verify_ssl=verify, **kwargs)
                    elif method == 'POST':
                        if self.proxy_flag or proxy:
                            proxy = proxy if proxy else random.choice(proxy_list)
                            self.logger.warning(f'Use {proxy} download {url}')
                            response = await self.session.post(url, proxy=proxy, headers=self.headers, timeout=self.timeout, verify_ssl=verify, **kwargs)
                        else:
                            response = await self.session.post(url, headers=self.headers, timeout=self.timeout, verify_ssl=verify, **kwargs)
                    status_code = response.status
                    if is_details:  # 判断是否为详情页
                        self.urlpool.set_status(url, status_code)
                    if 200 <= status_code <= 300:
                        log_num["req_ok_num"] += 1
                        html_bytes = await response.read()
                        return html_bytes.decode(chardet.detect(html_bytes).get('encoding', 'utf-8'))
                    if self.retry and self.fails_url.get(url, 0) <= self.retry_times:
                        self.logger.warning(f'Downloader fails({status_code}): {url}')
                        if status_code == 403:
                            self.logger.warning(f'Spider was banned from {parse.urlparse(url).netloc}')
                            proxy = self.get_proxy()
                            self.logger.warning(f'Use {proxy} retry {url}')
                            self.headers['User-Agent'] = self.ua.random  # 当出现封禁时为以防万一更改请求头
                            self.logger.warning(f'Fetch Ua Change: {self.headers["User-Agent"]}')
                            await self.fetch(url, proxy, **kwargs)
                        else:
                            self.fails_url.setdefault(url, 0)
                            self.logger.warning(f'Retry Download {url} times {self.fails_url[url]}')
                            self.fails_url[url] += 1
                            await self.fetch(url, **kwargs)
                    else:
                        self.logger.warning(f'Downloader fails({status_code}): {url}')
                        self.logger.warning(f'Drop url: {url}')
                        log_num["fail_req_num"] += 1
                        return False
                except Exception as e:
                    if isinstance(e, aiohttp.client_exceptions.ClientProxyConnectionError):
                        self.logger.warning(f'Proxy {proxy} cannot use')
                        self.fail_proxy.append(proxy)
                        self.fails_url.setdefault(url, 0)
                        self.fails_url[url] += 1
                        proxy = self.get_proxy()
                        # 释放前一个失败请求的下载器，否者卡死
                        self.semaphore.release()
                        self.headers['User-Agent'] = self.ua.random
                        self.logger.warning(f'Fetch Ua Change: {self.headers["User-Agent"]}')
                        await self.fetch(url, proxy, is_details=is_details)
                    elif isinstance(e, asyncio.TimeoutError):
                        self.logger.warning(f'Download timeout {url}')
                        log_num["fail_req_num"] += 1
                        return False
                    else:
                        self.logger.error(f'Fetch Error {e} {url}')
                        log_num["fail_req_num"] += 1
                        return False
            else:
                response = self.loop.run_until_complete(sim_requests(url, **kwargs))
                if is_details:  # 判断是否为详情页
                    self.urlpool.set_status(url, response.status)
                if 200 <= response.status <= 300:
                    log_num["req_ok_num"] += 1
                    html = response.text
                    return html
                else:
                    self.logger.warning(f'Pyppeteer Downloader fails({response.status}): {url}')
                    log_num["fail_req_num"] += 1
                    return False

    def hub_urls(self):
        news_hub = []
        for pub in Pub_url_list:
            hub_host = urlparse(pub).netloc
            if Offset_Hub.get(hub_host):
                pubs = [pub.format(keywords=self.kw, page=page*Offset_Hub.get(hub_host)) for page in range(1, self.page + 1)]
            else:
                pubs = [pub.format(keywords=self.kw, page=page) for page in range(1, self.page + 1)]
            news_hub.extend(pubs)
        self.logger.info(f'add hubs :{news_hub}')
        self.urlpool.set_hubs(news_hub, self.waitime)

    async def run(self):
        while True:
            urls = self.urlpool.pop(count=self.count)
            if not urls:
                self.empty_time = self.empty_time if self.empty_time else time.time()
                waiting_time = time.time() - self.empty_time
                if 5 < waiting_time < 10:
                    # 爬虫休眠，等待下一次网址池返回url
                    self.logger.warning('spider sleep now')
                    self.logger.info(
                        f'req_ok_num: {log_num["req_ok_num"]}, fail_req_num: {log_num["fail_req_num"]}, hub_ok_num: {log_num["hub_ok_num"]}, hub_error_num: {log_num["hub_error_num"]}, parser_num: {log_num["parser_num"]}, save_ok_num: {log_num["save_ok_num"]}, save_error_num: {log_num["save_error_num"]}, repate_num: {log_num["repate_num"]}, rubbish_num: {log_num["rubbish_num"]}')
                    # hub_url刷新时间间隔前10秒苏醒
                    await asyncio.sleep(self.waitime - 10)
                    self.logger.warning('spider wake up')
                elif waiting_time >= self.waitime + 60:
                    self.logger.warning(f'For {time.time() - self.empty_time} seconds cannot get urls')
                    sys.exit('urlpool is empty! spider closed!')
                else:
                    continue
            else:
                # 重新计算等待时间
                self.empty_time = False
                for url, is_hub in urls.items():
                    if is_hub:
                        await self.hub_downloader(url)
                        self.logger.info(f'get hub_url: {url}')
                    else:
                        await self.detail_downloader(url)
                        self.logger.info(f'get detail_url: {url}')

    async def hub_downloader(self, url):
        self.logger.info(f'download hub url: {url}')
        try:
            host = parse.urlparse(url).netloc
            hub_response = await self.fetch(url, simulate=Hubs_Js)
            # print(hub_response)
            if hub_response:
                hub_url = Parser(host, hub_response, url).execute()
                if hub_url:
                    self.urlpool.addmany(hub_url)
                    log_num["hub_ok_num"] += 1
                else:
                    self.logger.error(f'{host} parser is not existence')
                    log_num["hub_error_num"] += 1
            else:
                return
        except Exception as e:
            traceback.print_exc()
            self.logger.error(f'Hub_Error: {e}')
            log_num["hub_error_num"] += 1

    async def detail_downloader(self, url):
        log_num["parser_num"] += 1
        try:
            detail_response = await self.fetch(url, is_details=True, simulate=Details_Js)
            if detail_response:
                host = urlparse(url).netloc
                if host in Details_url.keys():
                    if host in Details_url_ajax:
                        data = getattr(D_parser(url=url, response=detail_response, Json=True), Details_url.get(host))()
                    else:
                        data = getattr(D_parser(url=url, response=detail_response), Details_url.get(host))()
                else:
                    data = self.extractor.extract(detail_response)
                self.save_data.execute(data=data, url=url)
                log_num['save_ok_num'] += 1
            else:
                return
        except Exception as e:
            if isinstance(e, TypeError):
                self.logger.warning(f'Details url parse fails: {url}')
            elif isinstance(e, pymysql.err.IntegrityError):
                self.logger.warning(f'Repeat title: {url}')
                log_num["repate_num"] += 1
            elif 'short' in str(e) or 'rubbish' in str(e):
                self.logger.error(f'Drop item {e} {url}')
                log_num["rubbish_num"] += 1
            else:
                self.logger.error(f'Detail_downloader error {e}')
            log_num["save_error_num"] += 1


    def set_logger(self):
        logger = logging.getLogger(parse.unquote(self.kw))
        logger.setLevel(level=logging.INFO)
        self.file_name = f"log/{parse.unquote(self.kw)}-log-{time.strftime('%y-%m-%d-%H', time.localtime())}.txt"
        formatter = logging.Formatter('<%(asctime)s - %(name)s> - %(levelname)s : %(message)s')
        handler = logging.FileHandler(self.file_name)
        handler.setLevel(Save_Log_Level)
        handler.setFormatter(formatter)
        console = logging.StreamHandler()
        console.setLevel(Console_Log_Level)
        console.setFormatter(formatter)
        logger.addHandler(handler)
        logger.addHandler(console)
        return logger

    def get_proxy(self):
        proxy = random.choice(proxy_list)
        while proxy in self.fail_proxy:  # 不取失效的代理
            if len(self.fail_proxy) == len(proxy_list):
                self.logger.error('proxy is empty')
                sys.exit('proxy is empty')
            proxy = random.choice(proxy_list)
        return proxy

    def mains(self):
        self.hub_urls()
        try:
            self.loop.run_until_complete(self.run())
        except BaseException as e:
            if isinstance(e, KeyboardInterrupt):
                self.logger.warning('stopped by yourself!')
                self.logger.warning('Closed by master')
                if self.send_flag:
                    sender(self.file_name, parse.unquote(self.kw), self.start_time, 'master closed').post()
            else:
                self.logger.error(f'Error closed: {str(e)}')
                if self.send_flag:
                    sender(self.file_name, parse.unquote(self.kw), self.start_time, str(e)).post()
            del self.urlpool

    def __del__(self):
        if self.send_flag:
            sender(self.file_name, parse.unquote(self.kw), self.start_time, 'proxy empty').post()


def loacl_text(keywords, page):
    NewSpider(kw=keywords, page=page).mains()


if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('-k', type=str, help='your search keywords')
    parser.add_argument('-p', type=int, help='your search page nums')
    arg = parser.parse_args()
    keywords = arg.k
    page = arg.p
    NewSpider(kw=keywords, page=page).mains()

