import json
import os.path
import re
import threading
import time
import traceback
import urllib.parse

import bs4
import requests
from fake_useragent import UserAgent
from tqdm import tqdm

from src.DataManager.ArticleManager import article_manager, cold_back_up
from src.EnvironmentVariables import BASE_PATH

with open(os.path.join(BASE_PATH, 'Crawler', 'config.json'), 'r') as f:
    CONFIG = json.load(f)
ua = UserAgent()
PARALLEL_SEARCH_NUM = 5
COLD_BACKUP_FREQUENCY = 8100
tar_urls = {
    'free_ip_proxies': 'https://www.kuaidaili.com/free/inha/%d/',
    'guba_menu': ['http://guba.eastmoney.com/list,cjpl,f_%d.html', 'http://guba.eastmoney.com/list,gssz,f_%d.html',
                  'http://guba.eastmoney.com/list,zssh000001,f_%d.html']
}


class ParserThread(threading.Thread):
    crawler_list = []
    crawler_list_lock = threading.Lock()
    condition = threading.Condition()
    end_flag = False
    end_lock = threading.Lock()

    def __init__(self):
        super().__init__()

    def run(self) -> None:
        with ParserThread.condition:
            while True:
                try:
                    while (not ParserThread.end_flag) and len(ParserThread.crawler_list) == 0:
                        ParserThread.condition.wait()
                    with ParserThread.end_lock:
                        if ParserThread.end_flag:
                            if len(ParserThread.crawler_list) == 0:
                                return
                            print("last ", len(ParserThread.crawler_list))
                    with ParserThread.crawler_list_lock:
                        target = ParserThread.crawler_list[0]
                        ParserThread.crawler_list.pop(0)
                    with target.finish:
                        try:
                            target.parse()
                        except Exception as e:
                            print('^' * 10 + 'ERROR' + '^' * 10)
                            print(e)
                            print(traceback.format_exc())
                except Exception as e:
                    print('^' * 10 + 'ERROR' + '^' * 10)
                    print(e)
                    print(traceback.format_exc())
                finally:
                    continue

    @classmethod
    def finish(cls):
        with cls.end_lock:
            cls.end_flag = True


class Parser:

    def __init__(self, parse_func, **kwargs):
        self.response = None
        self.parse_func = parse_func
        self.b_s = None
        self.kwargs = kwargs

    def set_response(self, response):
        self.response = response
        self.set_bs(response.text)

    def set_bs(self, _text: str):
        self.b_s = bs4.BeautifulSoup(_text, features='lxml')

    def parse(self):
        try:
            if len(self.kwargs) > 0:
                self.parse_func(self.b_s, **self.kwargs)
            else:
                self.parse_func(self.b_s)
        except Exception as _:
            print('^' * 10 + 'ERROR' + '^' * 10)
            print(traceback.format_exc())


class Crawler(threading.Thread):
    CRAWLER_THREAD_NUM_CTRL = threading.Semaphore(PARALLEL_SEARCH_NUM)
    called_times_lock = threading.Lock()
    called_times = 0
    # 付费代理账户数据
    # 隧道域名:端口号
    TUNNEL = CONFIG['TUNNEL']
    # 用户名密码方式
    USERNAME = CONFIG['USERNAME']
    PASSWORD = CONFIG['PASSWORD']
    USE_PROXIES = True

    def __init__(self, path, parser: Parser, syn=threading.Semaphore(), proxies_type='pay'):
        super().__init__()
        use_proxies = Crawler.USE_PROXIES
        with Crawler.called_times_lock:
            Crawler.called_times += 1
            if Crawler.called_times % COLD_BACKUP_FREQUENCY == 0:
                Crawler.cold_backup()
        self.path = path
        self.parser = parser
        self.syn = syn
        self.USE_PROXIES = use_proxies
        self._url = urllib.parse.urlparse(path)
        self.protocol = self._url.scheme
        self.proxies_type = proxies_type
        self.finish = threading.Lock()
        self.finish.acquire()

    def request(self):
        headers = {'User-Agent': ua.random}
        # print('Crawler target path:  ' + self.path)
        response = None
        if self.USE_PROXIES:
            for _ in range(0, 4):
                if self.proxies_type == 'free':
                    proxy = "func to get free ip TODO"
                    # proxy = proxies_ip_pool.get_random_proxies_ip(self.protocol)
                    # print('Crawler target path:  ' + self.path)
                    # print(f'Use Proxy: {proxy}')
                else:
                    proxy = {
                        "http": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": Crawler.USERNAME,
                                                                        "pwd": Crawler.PASSWORD,
                                                                        "proxy": Crawler.TUNNEL},
                        "https": "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": Crawler.USERNAME,
                                                                         "pwd": Crawler.PASSWORD,
                                                                         "proxy": Crawler.TUNNEL}
                    }

                try:
                    response = requests.get(self.path, headers=headers, proxies=proxy, timeout=8)
                    break
                except requests.RequestException as e:
                    # print("CurrentThread ", threading.current_thread(), e)
                    if self.proxies_type == 'free':
                        # proxies_ip_pool.del_ip(proxy[self.protocol])
                        pass
                # Crawler.CRAWLER_THREAD_NUM_CTRL.release()
                # time.sleep(3)
                # Crawler.CRAWLER_THREAD_NUM_CTRL.acquire()
        else:
            print('Use True IP')
            # print('Crawler target path:  ' + self.path)
            response = requests.get(self.path, headers=headers)
        if response is None:
            raise RuntimeError("网络重试次数过多")
        # print(response)
        self.parser.set_response(response)

    def parse(self):
        self.parser.parse()

    def run(self) -> None:
        try:
            with Crawler.CRAWLER_THREAD_NUM_CTRL:
                self.request()
        except RuntimeError as e:
            print(e)
        except Exception:
            print(traceback.format_exc())
        finally:
            self.syn.release()
            self.finish.release()
            with ParserThread.condition:
                ParserThread.condition.notify_all()

    @classmethod
    def cold_backup(cls):
        for _ in range(0, PARALLEL_SEARCH_NUM):
            cls.CRAWLER_THREAD_NUM_CTRL.acquire()
        cold_back_up()
        for _ in range(0, PARALLEL_SEARCH_NUM):
            cls.CRAWLER_THREAD_NUM_CTRL.release()


class GubaMenuCrawler(threading.Thread):
    begin_page = 1
    year = 2022
    __year_lock = threading.Lock()
    past_date = 13

    def __init__(self, total_start_page=1, total_end_page=6, _type=0):
        super().__init__()
        GubaMenuCrawler.begin_page = total_start_page
        self.total_end_page = total_end_page
        self.type = _type

    @classmethod
    def parse_guba_menu(cls, b_s, **kwargs):
        """
        解析股吧的目录页面
        """
        with cls.__year_lock:

            link_pattern = re.compile(r'.*news.*')
            title_class_pattern = re.compile(r'l3 a3')
            author_class_pattern = re.compile(r'l4 a4')
            box_pattern = re.compile(r'^articleh .*normal_post.*$')
            post_date_pattern = re.compile(r'l5 a5')
            article_boxs = b_s.find_all(name='div', class_=box_pattern)
            for article_box in article_boxs:
                title_tag_box = article_box.find(class_=title_class_pattern)
                target_tag = title_tag_box.find(name='a', href=link_pattern)
                if target_tag is None:
                    continue
                author_box = article_box.find(class_=author_class_pattern)
                author = author_box.find(name='a').text
                title = target_tag.attrs['title']
                link = target_tag.attrs['href']

                # 计算年份，此段代码非常丑陋，因为我最开始没考虑到这个问题，由于这段代码，不允许非顺序的爬取
                post_date = article_box.find(class_=post_date_pattern).text
                post_date = "".join(post_date.split(" ")[0].split("-"))
                this_month = int(post_date[:2])
                if this_month != cls.past_date:
                    if this_month == 1 and cls.past_date == 12:
                        return
                if this_month == 12 and cls.past_date == 1:

                    if 'link' in kwargs.keys():
                        tmp = kwargs['link']
                    else:
                        tmp = ''
                    cls.year -= 1
                    print(threading.current_thread(), tmp, " 当前年份", cls.year)
                cls.past_date = this_month
                post_date = str(GubaMenuCrawler.year) + post_date
                # 计算结束

                if re.match(r'//caifuhao.eastmoney.com/news/.*', link):
                    tar_link = 'https:' + link
                    time_pattern = re.compile(r'news/(\d{8})')
                    post_date = re.search(time_pattern, link)
                    post_date = post_date.groups()[0]
                else:
                    tar_link = r'http://guba.eastmoney.com' + link

                article_data = {'title': title, 'link': tar_link, 'post_date': post_date,
                                'author': author}

                article_manager.add_article(article_data)

    def run(self) -> None:
        for i in tqdm(range(GubaMenuCrawler.begin_page, self.total_end_page),
                      f'股吧{self.type}爬取：'):
            if i < 70000 and ((i < 60000 and i % 5 != 0) or (i > 60000 and i % 2 != 0)):
                continue
            crawler = Crawler(tar_urls['guba_menu'][self.type] % i,
                              Parser(GubaMenuCrawler.parse_guba_menu, link=tar_urls['guba_menu'][self.type] % i),
                              proxies_type='pay')
            with ParserThread.crawler_list_lock:
                ParserThread.crawler_list.append(crawler)
            with Crawler.CRAWLER_THREAD_NUM_CTRL:
                crawler.start()
            time.sleep(1 / PARALLEL_SEARCH_NUM)
        ParserThread.finish()
        with ParserThread.condition:
            ParserThread.condition.notify_all()

        # ip_crawler.end()


class GubaArticleCrawler(threading.Thread):
    target_list = []

    def __init__(self, begin=1, number=10):
        super().__init__()
        self.begin = begin
        self.number = number
        pass

    @classmethod
    def caifuhao_parser(cls, b_s: bs4.BeautifulSoup, **kwargs):
        article_body_box_pattern = re.compile(r'article-body')
        article_body_box = b_s.find(class_=article_body_box_pattern)
        if article_body_box is None:
            article_manager.del_article(article_id=kwargs['article_id'])
            return

        last_tag_pattern = re.compile(r'(zw)?add[cC]ontent')
        last_tag = article_body_box.find(id=last_tag_pattern)
        if last_tag is not None:
            for del_tag in last_tag.find_next_siblings():
                del_tag.decompose()
            last_tag.decompose()
        article_body_text = article_body_box.get_text()

        article_manager.update(main_body=article_body_text, **kwargs)

    @classmethod
    def guba_news_parser(cls, b_s: bs4.BeautifulSoup, **kwargs):
        # article_header_pattern = re.compile('zwHeader')
        # article_header_box = b_s.find(id=article_header_pattern)
        article_body_box_pattern = re.compile('zw_body')
        article_body_box = b_s.find(id=article_body_box_pattern)
        if article_body_box is None:
            article_body_box_pattern = re.compile('zwconbody')
            article_body_box = b_s.find(id=article_body_box_pattern)
            if article_body_box is None:
                article_manager.del_article(article_id=kwargs['article_id'])
                return

        time_pattern = re.compile(r'(\d{4})\D(\d{1,2})\D(\d{1,2})')
        time_tag_pattern = re.compile('zwfbtime')
        time_tag = b_s.find(class_=time_tag_pattern)
        time_list = time_pattern.search(time_tag.text).groups()
        post_date = ''.join(time_list)

        article_body_text = article_body_box.get_text()

        article_manager.update(post_date=post_date, main_body=article_body_text, **kwargs)

    def run(self) -> None:

        order = 'article_id'
        column_name = ['article_id']
        past_id = article_manager.get_data(order_by=order, column_name=column_name, limit=f"{self.begin},1")[0][0]
        for _ in tqdm(range(self.begin, self.begin + self.number)):

            order = 'article_id'
            column_name = ['article_id', 'link']
            random_key = 4
            clause = f'article_id > {past_id} ' + f"AND mod(article_id,13)<{random_key} "  # 数据过多只抽取一部分
            tar_article = article_manager.get_data(order_by=order, column_name=column_name, clause=clause, limit=1)[0]
            tar_article = {'article_id': tar_article[0], 'link': tar_article[1]}
            link = tar_article['link']
            past_id = tar_article['article_id']
            proto = urllib.parse.urlparse(link).scheme
            if proto == 'http':
                crawler = Crawler(path=link, parser=Parser(GubaArticleCrawler.guba_news_parser, **tar_article))
            else:
                crawler = Crawler(path=link, parser=Parser(GubaArticleCrawler.caifuhao_parser, **tar_article))
            with Crawler.CRAWLER_THREAD_NUM_CTRL:
                crawler.start()
            time.sleep(1 / PARALLEL_SEARCH_NUM)


def test(b_s: bs4.BeautifulSoup):
    print(b_s.get_text())


if __name__ == '__main__':
    ParserThread().start()
    GubaMenuCrawler(9870, 28800, _type=2).start()
    # GubaMenuCrawler(73250, 73500, _type=2).start()
    # GubaArticleCrawler(0, 1).start()
    # article = GubaArticleCrawler(begin=10, number=2)
    # article.start()

    # with open('test.html', 'r', encoding='utf8') as f:
    #    a = Parser(GubaArticleCrawler.guba_news_parser)
    #    a.set_bs(f.read())
    #    a.parse()
