import logging
import multiprocessing
import time

from modules.html_parse.parse_main import ParseMain
from modules.request.request_main import request_main as req
from modules.storage.file_store import FileStore
from scrapy_main.base.base_scraper import BaseScraper
from scrapy_main.opera.scrapy_data_saver import ScrapyDataSaver
from scrapy_main.opera.scrapy_result import ScrapyResult
from utils.common import get_timestamp
from utils.format import format_url_by_template, convert_dict_keys
from utils.os_main import get_abs_file_path, create_directory

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


# 爬取不需要登录的博客类型的页面
class BlogMiddle(BaseScraper):
    def __init__(self, json_file_path=None, params=None):
        self.script_name = None
        self.counter = None
        self.resulter = None
        self.database_config_param = None
        self.database_config_name = None
        self.search_keys = None
        self.paging_url = None
        self.search_url = None
        self.search_key = None
        self.headers = None
        self.proxy = None
        self.saver = None
        self.detail_href = None
        self.detail_url = None
        self.detail_paging_url = None
        self.target_dir = None
        self.save_path = None
        self.enable_new_only = None
        self.enable_save_database = None
        self.enable_multi_process = None
        self.current_page = 1
        self.page_count = 1
        self.page_index = 1


        if json_file_path:
            self.init_by_json(json_file_path)
        if params:
            self.init_by_params(params)

        self.init_saver()
        self.init_resulter()

        # if self.target_dir:
        #     create_directory(self.target_dir)

    # 通过json文件初始化
    def init_by_json(self, file_path):
        try:
            absolute_file_path = get_abs_file_path(file_path)
            file_store = FileStore(absolute_file_path)
            result = file_store.read()

            for key, value in result.items():
                # 设置对象参数的值
                setattr(self, key, value)

            logging.info(f"Initialization from JSON file {file_path} successful.")
        except Exception as e:
            logging.error(f"Failed to initialize object from JSON file {file_path}: {str(e)}")

    # 通过参数对象初始化
    def init_by_params(self, params):
        try:
            for key, value in params.items():
                # 设置对象参数的值
                setattr(self, key, value)

            logging.info(f"Initialization from params successful.")
        except Exception as e:
            logging.error(f"Failed to initialize object from params: {str(e)}")

    def init_saver(self):
        try:
            self.saver = ScrapyDataSaver()
            logging.info(f"Initialize saver successful.")
            # 是否开启存储到线上
            if self.enable_save_database:
                self.database_config_param = convert_dict_keys(self.database_config_param)

                self.saver.init_saver_engine(self.database_config_name, self.database_config_param)

        except Exception as e:
            logging.error(f"Failed to initialize saver: {str(e)}")

    def init_resulter(self):
        try:
            start_time = get_timestamp()
            self.resulter = ScrapyResult(self.script_name, start_time)
            logging.info(f"Initialize resulter successful.")

        except Exception as e:
            logging.error(f"Failed to initialize resulter: {str(e)}")

    # 爬取列表
    def scrape_page_list(self, url):
        logging.info(f"请求链接：{url}")
        request_data = req.request('get', url, proxy=self.proxy, headers=self.headers, verify=True)
        if request_data and request_data.status_code == 200:
            parse_html = ParseMain(request_data.text)
            self.get_pagination(parse_html)

            return self.get_page_list(parse_html)

        else:
            raise Exception("请求异常：", f"{url}, Error Code: {request_data.code}")

    # 爬取详情页
    def scrape_page_detail(self, page, counter=None, search_key=None):
        raise NotImplementedError

    # 爬取整个页面 包括列表页和详情页
    def scrape_whole_page(self, search_key, counter=None):
        self.search_key = search_key
        search_url = format_url_by_template(self.search_url, self.__dict__)
        self._scrape_page_list_and_details(search_url, search_key, counter)

        for i in range(2, self.page_count + 1):
            self.page_index = i
            page_url = format_url_by_template(self.paging_url, self.__dict__)
            self._scrape_page_list_and_details(page_url, search_key, counter)



    def _scrape_page_list_and_details(self, page_url, search_key, counter):
        page_list = self.scrape_page_list(page_url)
        if page_list:
            for page in page_list:
                self.scrape_page_detail(page, counter, search_key)

    # 开始爬取
    def start_scraper(self):

        processes = []
        # 创建共享的计数器
        counter = multiprocessing.Value('i', 0)

        for search_key in self.search_keys:
            if not self.enable_multi_process:
                self.scrape_whole_page(search_key.get('value'))
            else:
                args = (search_key.get('value'), counter,)
                process = multiprocessing.Process(target=self.scrape_whole_page, args=args)
                processes.append(process)
                process.start()

        # 等待所有进程完成
        for process in processes:
            process.join()

        end_time = get_timestamp()
        self.resulter.set_consume_time_str(end_time)

        if not self.enable_save_database or self.enable_multi_process:
            self.resulter.count = counter.value

        result_info = self.resulter.get_result_info()
        return {
            'msg': '爬取完成',
            'code': 200,
            'data': result_info
        }

    # 爬取详细的分页内容
    def scrapy_detail_page_content(self, url):
        time.sleep(2)
        request_detail = req.request('get', url, proxy=self.proxy, headers=self.headers, verify=True)
        print(f"请求链接：{url}")
        if request_detail:
            parse_html = ParseMain(request_detail.text)
            return self.get_detail_page_content(parse_html)
        else:
            return ''


