import os, time, logging, threading, json, codecs, sys, requests, queue, copy
from concurrent.futures import ThreadPoolExecutor
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.triggers.interval import IntervalTrigger
from psycopg2 import connect, OperationalError
from obs import ObsClient  # 假设使用华为云OBS SDK
from pprint import pprint

from utils import obs_wrapper
from utils import pg
from utils import outter_config
from utils import writer
from utils import if_complaints_exists
from utils import quality_preprocessing
from utils import purchase_year_analyze
from utils import cuzu_preprocessing

from llm import tool


# ==================== 文件处理 ====================
def fetch_data(file):
    data = []
    try:
        # 下载文件逻辑
        response = obs_wrapper.get_object(
            bucket_name=outter_config.bucket_name,
            object_key=file
        )
        if response.status >= 300:
            logging.error(f"文件下载失败: {file}, 状态码: {response.status}")
            return

        # 处理文件逻辑
        content_str = str(response.body.buffer, "utf-8")
        filename_queue = []
        for line in content_str.split('\n'):
            line = line.strip()
            if not line:
                continue
            try:
                line = json.loads(line)
                mx_content_type = line.get('mx_content_type')
                if mx_content_type == 'UGC':
                    data.append(line)
            except Exception as e:
                logging.error(f"下载文件异常: {file}, 错误: {e}")
        # 这里添加具体的处理逻辑

        # 标记为已处理
        pg.mark_as_processed(file)
    except Exception as e:
        logging.error(f"下载文件异常: {file}, 错误: {e}")
    finally:
        return data


def process_file(e):
    """处理单个文件"""
    '''
    数据处理逻辑
    '''
    try:
        mx_fid = outter_config.str2int(e['mx_fid'])
        # 根据fid将对应的信息拿出来
        _ = outter_config.fid2ModelName(mx_fid)
        mx_res_type = e['mx_res_type']
        mx_id = e['mx_id']
        mx_offset = e['mx_offset']
        mx_BodyType = _['BodyType']
        mx_Brand = _['Brand']
        mx_BrandIndicator = _['BrandIndicator']
        mx_Fuel_Type = _['FuelType']
        mx_Group = _['Group']
        mx_ModelName = _['ModelName']
        mx_OEM = _['OEM']
        mx_ProdType = e['mx_ProdType']
        mx_Segment = _['Segment']

        mx_target_vehicle_sentence = e['mx_target_vehicle_sentence']
        mx_title = e['mx_title']
        mx_text = e['mx_text']

        mx_author_location = e['mx_author_location']
        mx_content_type = e['mx_content_type']
        mx_domain = e['mx_domain']
        mx_hint_words = e['mx_hint_words']
        mx_is_comment = e['mx_is_comment']
        mx_posttime = e['mx_posttime']
        mx_url = e['mx_url']
        cuzu_res = e['cuzu_res']
        mx_forum_types_res = e['mx_forum_types_res']

        if mx_res_type == 'quality':
            customer_voice_uni = mx_title + mx_text
            if len(mx_hint_words):
                vehicle = mx_hint_words
            else:
                vehicle = mx_ModelName

            # 先分析原话中和指定车型相关的语句
            sentence = tool.sentence_split(
                customer_voice=customer_voice_uni,
                vehicle=vehicle
            )
            if_complaints_exists_res = if_complaints_exists(
                title_zh=mx_title,
                text_zh=mx_text
            )
            if if_complaints_exists_res:
                complaint_analysis_res = quality_preprocessing(
                    title_zh=sentence,
                    text_zh=''
                )

                if len(complaint_analysis_res):
                    # 将对应的数据进行存储
                    purchase_year_res = purchase_year_analyze(
                        title_zh=mx_title, text_zh=mx_text
                    )
                    create_time = mx_posttime
                    purchase_year = purchase_year_res
                    info_category = 'maixun_crawled'
                    URL = outter_config.mxUrlSplit(url=mx_url, if_comment=mx_is_comment)
                    FLAG_function_group_problem = True
                    OEM_src = mx_OEM
                    ModelName_src = mx_ModelName
                    vehicle_line_src = ''
                    OEM = mx_OEM
                    ModelName = mx_ModelName
                    vehicle_line = ''
                    url_unique = ''
                    title_zh = mx_title
                    text_zh = mx_text
                    title_en = tool.translate(text=title_zh)
                    text_en = tool.translate(text=text_zh)
                    websites = outter_config.mx_domain2websites(mx_domain=mx_domain)
                    maixun_author_location = mx_author_location
                    maixun_domain = mx_domain
                    maixun_fid = mx_fid

                    complaint_tags = complaint_analysis_res
                    e_preprocessed = {
                        'FLAG': 'Quality',
                        'create_time': create_time,
                        'purchase_year': purchase_year,
                        'info_category': 'maixun_crawled',
                        'URL': URL,
                        'FLAG_function_group_problem': FLAG_function_group_problem,
                        'OEM_src': OEM_src,
                        'ModelName_src': ModelName_src,
                        'vehicle_line_src': vehicle_line_src,
                        'OEM': OEM,
                        'ModelName': ModelName,
                        'vehicle_line': vehicle_line,
                        'url_unique': url_unique,
                        'title_zh': title_zh,
                        'text_zh': text_zh,
                        'title_en': title_en,
                        'text_en': text_en,
                        'websites': websites,
                        'maixun_author_location': maixun_author_location,
                        'maixun_domain': maixun_domain,
                        'maixun_fid': maixun_fid,
                        'complaint_tags': complaint_tags,
                    }
                    writer.write(json.dumps(e_preprocessed, ensure_ascii=False), current_date=create_time)
                    url = 'http://127.0.0.1:8000/complaint/idmi_complaints'
                    res = requests.post(
                        url,
                        data={'msg': json.dumps(e_preprocessed, ensure_ascii=False)}
                    )
                    if res.status_code == 200:
                        logging.info('success data insert')
                    else:
                        logging.info('failure data insert')


        elif mx_res_type == 'CuZu':
            customer_voice_uni = mx_title + mx_text
            if len(mx_hint_words):
                vehicle = mx_hint_words
            else:
                vehicle = mx_ModelName

            # 先分析原话中和指定车型相关的语句
            vehicle_relevant_sentence = tool.sentence_split(
                customer_voice=customer_voice_uni,
                vehicle=vehicle
            )
            cuzu_tags = cuzu_preprocessing(
                title_zh=vehicle_relevant_sentence,
                text_zh='',
                vehicle=vehicle
            )

            if len(cuzu_tags):
                # 将对应的数据进行存储
                maixun_fid = mx_fid
                create_time = mx_posttime
                info_category = 'maixun_crawled'
                URL = outter_config.mxUrlSplit(url=mx_url, if_comment=mx_is_comment)
                forum_type_res = mx_forum_types_res
                content_type = mx_content_type
                hint_words = mx_hint_words
                is_comment = mx_is_comment
                target_vehicle_sentence = vehicle_relevant_sentence
                target_vehicle_sentence_en = tool.translate(target_vehicle_sentence)
                maixun_title = mx_title
                maixun_text = mx_text
                website = outter_config.mx_domain2websites(mx_domain=mx_domain)
                maixun_author_location = mx_author_location
                maixun_domain = mx_domain

                maixun_cuzu_res = cuzu_tags

                e_preprocessed = {
                    'FLAG': 'CuZu',
                    'maixun_fid': maixun_fid,
                    'create_time': create_time,
                    'info_category': 'maixun_crawled',
                    'URL': URL,
                    'forum_type_res': forum_type_res,
                    'content_type': content_type,
                    'hint_words': hint_words,
                    'is_comment': is_comment,
                    'target_vehicle_sentence': target_vehicle_sentence,
                    'target_vehicle_sentence_en': target_vehicle_sentence_en,
                    'maixun_title': maixun_title,
                    'maixun_text': maixun_text,
                    'website': website,
                    'maixun_author_location': maixun_author_location,
                    'maixun_domain': maixun_domain,
                    'maixun_cuzu_res': maixun_cuzu_res
                }
                writer.write(json.dumps(e_preprocessed, ensure_ascii=False), current_date=create_time)
                url = 'http://127.0.0.1:8000/maixun_cuzu/maixun_cuzu_storage'
                res = requests.post(
                    url,
                    data={'msg': json.dumps(e_preprocessed, ensure_ascii=False)}
                )
                if res.status_code == 200:
                    logging.info('success data insert')
                else:
                    logging.info('failure data insert')
        '''
        数据处理逻辑
        '''
    except Exception as e:
        logging.error(f'文件处理异常 - {e}')





# ==================== 任务调度 ====================
def acquire_lock(file):
    """获取任务锁"""
    if os.path.exists(file):
        if file == outter_config.fetch_lock_file:
            logging.warning("数据下载正在运行，跳过本次执行")
            return False
        elif file == outter_config.process_lock_file:
            logging.warning("数据处理任务正在运行，跳过本次执行")
            return False
    try:
        open(file, 'w').close()
        return True
    except Exception as e:
        if file == outter_config.fetch_lock_file:
            logging.error(f"数据下载任务lock获取锁失败--{e}")
            return False
        elif file == outter_config.process_lock_file:
            logging.error(f"数据处理任务lock获取锁失败--{e}")
            return False


def release_lock(file):
    """释放任务锁"""
    if os.path.exists(file):
        os.remove(file)


def scan_new_files(fid_list, days):
    """扫描新文件任务"""
    logging.info("debug")

    if not acquire_lock(outter_config.fetch_lock_file):
        return
    try:
        logging.info("开始扫描新文件")
        for fid in fid_list:
            for day in days:
                # 模拟OBS文件列表获取
                new_files = get_obs_files(fid, day)  # 需要实现具体的获取逻辑
                for new_file in new_files:
                    filename = new_file['filename']
                    fid = new_file['fid']
                    day = new_file['day']
                    oem = outter_config.FID2INFO[fid]['OEM']
                    modelname = outter_config.FID2INFO[fid]['ModelName']

                    pg.check_and_insert_file(
                        filename=filename,
                        fid=fid,
                        day=day,
                        oem=oem,
                        modelname=modelname,
                    )

    except Exception as e:
        logging.error(f"扫描新文件失败: {e}")
    finally:
        release_lock(outter_config.fetch_lock_file)


def get_obs_files(fid, day):
    """获取OBS中的文件列表（示例实现）"""
    # 实际使用时需要替换为真实获取逻辑
    prefix = os.path.join(outter_config.prefix_path, str(fid), day)
    if sys.platform.startswith('win'):
        prefix = prefix.replace('\\', '/')
    else:
        ...
    object_keys = obs_wrapper.list_objects(
        bucket_name=outter_config.bucket_name,
        prefix=prefix,
        tail_filename='.new.jsonl'
    )
    return [{'filename': _, 'fid': fid, 'day': day} for _ in object_keys]


def process_pending_files():
    """处理待处理文件任务"""
    if not acquire_lock(outter_config.process_lock_file):
        return

    try:
        logging.info("开始处理待处理文件")
        files_to_process = pg.get_unprocessed_files()
        fetch_data_start_time = time.time()
        data_list = []
        for file in files_to_process:
            data = fetch_data(file)
            data_list += data
        fetch_data_end_time = time.time()
        logging.info(f'下载数据量 : {len(files_to_process)}, 耗时 : {fetch_data_end_time - fetch_data_start_time}s')
        preprocess_start_time = time.time()

        
        with ThreadPoolExecutor(max_workers=outter_config.num_threads) as executor:
            # futures = [
            #     executor.submit(
            #         process_file,
            #         filename
            #     ) for filename in files_to_process
            # ]
            # 提交所有任务
            futures = [
                executor.submit(
                    lambda e: process_file(e),  # 组合处理与写入
                    element
                ) for element in data_list
            ]
            # 等待所有任务完成
            for future in futures:
                try:
                    future.result()
                except Exception as e:
                    logging.error(f"线程处理异常: {e}")
            preprocess_end_time = time.time()
            logging.info(f'处理数据量 : {len(data_list)}, 耗时 : {preprocess_end_time - preprocess_start_time}s')
    except Exception as e:
        logging.error(f"处理文件任务失败: {e}")
    finally:
        release_lock(outter_config.process_lock_file)


# ==================== 主程序 ====================
def main():
    # 获取对应的fid_list 和 days
    fid_list = outter_config.fid_list()
    days = []
    days_4 = outter_config.generate_month_dates(2025, 4)
    days_5 = outter_config.generate_month_dates(2025, 5)
    days_6 = outter_config.generate_month_dates(2025, 6)

    days += days_4
    days += days_5
    days += days_6

    # 创建文件表
    pg.create_files_table()

    # 展示对应的fid_list 和 days
    logging.info(f"fid_list: {fid_list}")
    logging.info(f"days: {days}")

    # 启动调度器
    scheduler = BlockingScheduler()

    # 添加定时任务
    scheduler.add_job(
        scan_new_files,
        trigger=IntervalTrigger(hours=4),
        id='scan_new_files_job',
        name='扫描新文件任务',
        replace_existing=True,
        kwargs={'fid_list': fid_list, 'days': days}
    )

    scheduler.add_job(
        process_pending_files,
        trigger=IntervalTrigger(minutes=2),
        id='process_files_job',
        name='处理文件任务',
        replace_existing=True
    )

    logging.info("调度器启动")
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        logging.info("调度器已关闭")
    finally:
        release_lock(outter_config.fetch_lock_file)
        release_lock(outter_config.process_lock_file)


if __name__ == "__main__":
    main()
