import logging
import os
import pandas as pd
import time
from concurrent.futures import ThreadPoolExecutor
from openpyxl import Workbook
from pathlib import Path
from xpinyin import Pinyin

from common import logger_config
from common.common_config import project_base_dir
from common.get_proxy import get_proxys
from common.place_reader import read_csv_to_queue
# 初始化日志控件
from graph.Graph import Graph
from search.search import Search

logger_config.init_file_log()
logger_config.init_console_log(level=logging.INFO)

# 定义结果存放根路径
output_path = f'{project_base_dir}/output'
p = Pinyin()
search = Search()


class CrawLerGraph:

    def __init__(self, place_file_path, thread_max_workers, timeout) -> None:
        # 加载地名文件
        self.place_queue = read_csv_to_queue(place_file_path)
        self.all_job_count = self.place_queue.qsize()
        # 定义线程池
        self.executor = ThreadPoolExecutor(max_workers=thread_max_workers, thread_name_prefix='crawler')
        self.g = Graph(get_proxy_func=get_proxys, timeout=timeout)


# 定义线程工作者任务
def job(place_queue, all_job_count, g):
    while 1:
        try:
            # 队列里还有数据
            if place_queue.qsize() > 0:
                place_name = place_queue.get()
                process_job(place_name, g)
            else:
                logging.info(f"任务已经被取完")
                break
        except Exception as e:
            logging.error(f'{place_name}抓取失败-：{e}')
        finally:
            logging.info(f"{all_job_count - place_queue.qsize()}/{all_job_count} 已完成")


# 根据place_name得到路径
def get_file_path(place_name):
    split = place_name.split(',')
    split.reverse()
    file_path = output_path
    for space in split:
        if len(space.strip()) > 0:
            file_path += f'/{space}'
    return file_path


# 根据place_name得到查询关键字
def get_search_qry(place_name):
    split = place_name.split(',')
    city = ''
    for place in split:
        if len(place.strip()) > 0:
            city = place
            break
    return f'{city},{split[3]}'


# 真正的爬取任务
def process_job(place_name, g):
    logging.info(f"crawler place {place_name}")
    pinyin_place_name = p.get_pinyin(u"" + place_name, '').strip()
    avg_url, history_url = search.search(place_name, get_search_qry(pinyin_place_name))
    # 执行爬虫操作
    url, df_tuple_list = g.find(avg_url)
    graph_path = f'{get_file_path(pinyin_place_name)}'
    excel_path = f'{graph_path}/graph.xlsx'
    my_file = Path(graph_path)
    # 创建一个工作薄对象,也就是创建一个excel文档
    if len(df_tuple_list) > 0:
        if not my_file.exists():
            os.makedirs(graph_path)
        # 不存在就创建一个excel
        if not Path(excel_path).exists():
            wb = Workbook()
            wb.save(excel_path)
        writer = pd.ExcelWriter(excel_path, engine='openpyxl')
        # 写入url信息
        df = pd.DataFrame(columns={"url"}, data=[url], index=[0])
        df.to_excel(writer, sheet_name='url', index=False)
        for df_tuple in df_tuple_list:
            title, df = df_tuple
            title = title.replace(" ", '').replace(',', '')[0:30]
            df.to_excel(writer, sheet_name=title)
        writer.save()


if __name__ == '__main__':
    # 要使用的线程数
    thread_count = 8
    # 超时时间
    timeout = 20
    # 指定地名文件，这里指定place_min.csv
    ler_graph = CrawLerGraph(place_file_path=f"{project_base_dir}/place_file/place.csv",
                             thread_max_workers=thread_count, timeout=timeout)
    for i in range(thread_count):
        submit = ler_graph.executor.submit(job, ler_graph.place_queue, ler_graph.all_job_count, ler_graph.g)
