import logging
import os
import time
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from xpinyin import Pinyin

from common import logger_config
from common.common_config import project_base_dir
from common.get_proxy import get_proxys
from common.place_reader import read_csv_and_date_to_queue
# 初始化日志控件
from history.History import History
from search.search import Search

logger_config.init_file_log()
logger_config.init_console_log(level=logging.DEBUG)

# 定义结果存放根路径
output_path = f'{project_base_dir}/output'

p = Pinyin()
search = Search()


class CrawlerHistory:

    def __init__(self, place_file, begin_date, end_date, thread_max_workers, timeout) -> None:
        self.place_file = place_file
        self.begin_date = begin_date
        self.end_date = end_date

        # 加载地名文件
        self.place_queue = read_csv_and_date_to_queue(begin_date, end_date, place_file_path=place_file)
        self.all_job_count = self.place_queue.qsize()
        # 定义线程池
        self.executor = ThreadPoolExecutor(max_workers=thread_max_workers, thread_name_prefix='crawler')
        self.history = History(get_proxy_func=get_proxys, timeout=timeout)


# 定义线程工作者任务
def job(place_queue, all_job_count, history):
    # 任务开始，先读取第一次的viewstate
    viewstate = ''
    while 1:
        try:
            # 队列里还有数据
            if place_queue.qsize() > 0:
                job = place_queue.get()
                place_name, dates = job
                viewstate = process_job(job, viewstate, history)
            else:
                logging.info(f"任务已经被取完")
                break
        except Exception as e:
            logging.error(f'爬取异常{e}')
        finally:
            # 完成爬取计算器+1
            logging.info(f"{all_job_count - place_queue.qsize()}/{all_job_count} 已完成")


# 根据place_name得到路径
def get_file_path(place_name):
    split = place_name.split(',')
    split.reverse()
    file_path = output_path
    for space in split:
        if len(space.strip()) > 0:
            file_path += f'/{space}'
    return file_path


# 根据place_name得到查询关键字
def get_search_qry(place_name):
    split = place_name.split(',')
    city = ''
    for place in split:
        if len(place.strip()) > 0:
            city = place
            break
    return f'{city},{split[3]}'


# 真正的爬取任务
def process_job(date_place_name_tuple, viewstate, history):
    place_name, dates = date_place_name_tuple
    pinyin_place_name = p.get_pinyin(u"" + place_name, '').strip()
    avg_url, history_url = search.search(place_name, get_search_qry(pinyin_place_name))
    # 爬取数据
    for date in dates:
        try:
            viewstate = process_job_date(date, history, history_url, pinyin_place_name, place_name, viewstate)
        except Exception as e:
            logging.error(f'爬取{place_name}-{date}异常{e}')
            continue
    return viewstate


def process_job_date(date, history, history_url, pinyin_place_name, place_name, viewstate):
    logging.info(f"爬取地点 {place_name.strip()}-{pinyin_place_name}-{date}")
    viewstate, df = process_day_job(date, history, history_url, viewstate)
    if df is not None:
        # 保存数据
        graph_path = f'{get_file_path(pinyin_place_name)}'
        excel_path = f'{graph_path}/history.csv'
        my_file = Path(graph_path)
        is_write_title = False
        # 不存路径就建一个
        if not my_file.exists():
            os.makedirs(graph_path)
        # 不存在就创建一个excel
        if not Path(excel_path).exists():
            is_write_title = True
            logging.debug(f"不存在{excel_path}创建一个，并写入url信息")
        else:
            is_write_title = False
            logging.debug(f"存在{excel_path}追加数据")
        df.to_csv(path_or_buf=excel_path, index=False, header=is_write_title, mode='a')
    return viewstate


def process_day_job(date, history, history_url, viewstate):
    url, viewstate, df = history.find(history_url, date, viewstate=viewstate)
    return viewstate, df


if __name__ == '__main__':
    # 线程数
    thread_count = 256
    timeout = 60
    # 地名和开始时间
    crawler_history = CrawlerHistory(
        place_file=f"{project_base_dir}/place_file/place.csv",
        begin_date="2021-01-01",
        end_date="2021-02-01",
        thread_max_workers=thread_count,
        timeout=timeout)

    for i in range(thread_count):
        crawler_history.executor.submit(job, crawler_history.place_queue,
                                        crawler_history.all_job_count, crawler_history.history)
