import copy
import logging

from modules.storage.file_store import FileStore
from modules.storage.mongodb_store import MongodbStore
from modules.storage.mysql_store import MysqlStore
from scrapy_main.config.aws_service import AwsService
from settings.enums import CSV_TYPE
from settings.enums import JSON_TYPE
from settings.enums import TXT_TYPE
from modules.request.request_main import request_main as req
from utils.os_main import get_abs_file_path, get_file_extension

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


# SCRAPER_MAPPING = {
#     'mongodb': {'method': init_mongodb},
#     'mysql': {'method': init_mysql},
#  }

class ScrapyDataSaver:

    def init_saver_engine(self, database_config_name, params):
        self.saver_engine = None
        if database_config_name == 'mongodb':
            self.saver_engine = self.init_mongodb(params)
        elif database_config_name == 'mysql':
            self.saver_engine = self.init_mysql(params)
        elif database_config_name == 'aws':
            self.saver_engine = self.init_aws(params)

    @staticmethod
    # 初始化mongodb
    def init_mongodb(params):
        mongodb_connect = MongodbStore(host=params['host'], port=params['port'], database=params['database'],
                                       account=params['account'], password=params['password'])
        mongodb_connect.build_connect()
        return mongodb_connect

    @staticmethod
    # 初始化mysql
    def init_mysql(params):
        mysql_connect = MysqlStore(host=params['host'], port=params['port'], database=params['database'],
                                   account=params['account'], password=params['password'])
        mysql_connect.build_connect()
        return mysql_connect


    @staticmethod
    def init_aws(params):
        aws = AwsService(region=params['region'], bucket_name=params['bucket_name'])
        return aws

    # 获取数据
    def get_data(self, collection, conditions=None):
        return self.saver_engine.search(collection, conditions)

    # 新增
    def add_data(self, collection, data, id=None):
        return self.saver_engine.add(collection, data, id)

    # 没有就新增，有就更新
    def add_or_update_data(self, table_collection_index, data, condition=None):
        return self.saver_engine.add_or_update(table_collection_index, data, condition)

    # 保存到csv
    def save_data_to_csv(self, results, source_file_path):

        result_file = FileStore(source_file_path, 'r', 'w')
        save_results = []
        if isinstance(results, dict):
            save_result = self.turn_content_list_to_str(results)
            save_results.append(save_result)
        elif isinstance(results, list):
            for result in results:
                save_result = self.turn_content_list_to_str(result)
                save_results.append(save_result)

        result_file.write(save_results)
        print(f'保存成功，文件路径：{source_file_path}')

    # 保存到json
    def save_data_to_json(self, results, source_file_path):
        result_file = FileStore(source_file_path, 'r', 'w')
        save_results = {
            'results': []
        }
        if isinstance(results, dict):
            save_result = self.turn_content_list_to_str(results)
            save_results['results'].append(save_result)
        elif isinstance(results, list):
            for result in results:
                save_result = self.turn_content_list_to_str(result)
                save_results['results'].append(save_result)

        result_file.write(save_results)
        print(f'保存成功，文件路径：{source_file_path}')

    # 保存到json
    def save_data_to_txt(self, results, source_file_path):
        result_file = FileStore(source_file_path, 'r', 'a')

        if isinstance(results, dict):
            save_result = self.turn_content_list_to_str(results)
            # 添加分割线
            save_result['divide'] = f'{"=" * 60}\n'
            result_file.write(save_result)
        elif isinstance(results, list):
            for result in results:
                save_result = self.turn_content_list_to_str(result)
                # 添加分割线
                save_result['divide'] = f'{"=" * 60}\n'
                result_file.write(save_result)
        print(f'保存成功，文件路径：{source_file_path}')

    # 将数组形式的内容转为字符串并拼接
    def turn_content_list_to_str(self, result):
        save_result = copy.deepcopy(result)
        for key, value in result.items():
            if isinstance(value, list):
                # 将列表中的字典的所有值用逗号连接成一个字符串
                detail_content = ', '.join(
                    str(val) for content in value if isinstance(content, dict) for val in content.values())
                save_result[key + '_values'] = detail_content
                print(detail_content)
        return save_result

    # 根据扩展名保存文件
    def save_data_to_file(self, results, source_file_path):
        absolute_path = get_abs_file_path(source_file_path)
        extension = get_file_extension(source_file_path)
        if extension == TXT_TYPE:
            self.save_data_to_txt(results, absolute_path)
        elif extension == CSV_TYPE:
            self.save_data_to_csv(results, absolute_path)
        elif extension == JSON_TYPE:
            self.save_data_to_json(results, absolute_path)

    # 下载流媒体
    def save_to_media(self, url, file_path, headers=None):
        try:
            request_data = req.request("get", url, stream=True, verify=True, headers=headers)
            if request_data:
                # 成功的生成媒体文件
                file = FileStore(file_path)
                file.write(request_data)
                print(f"下载成功：{url}，下载地址：{file_path}")
                logging.error(f"下载成功：{url}，下载地址：{file_path}")
        except Exception as e:
            logging.error(f"下载失败：{url}, 错误信息：{e}")

    def save_file_to_server(self, url, file_name, directory=None, headers=None):
        try:
            request_data = req.request("get", url, stream=True, verify=True, headers=headers)
            if request_data:
                # 成功的生成媒体文件
                self.saver_engine.upload_byte_stream(file_name, request_data.content, directory)
        except Exception as e:
            logging.error(f"下载失败：{url}, 错误信息：{e}")

    def check_file_exists(self, file_path):
        return self.saver_engine.check_file_exists(file_path)


if __name__ == '__main__':
    save_name = 'list-10135'
    source_file_path = f'files/{save_name}.csv'
    scrapy_saver = ScrapyDataSaver()
    results = scrapy_saver.get_data_from_mongodb(save_name)
    scrapy_saver.save_data_to_file(results, source_file_path)
