import json, codecs, time, os, requests, traceback, copy
import emoji
import logging
import datetime
from concurrent.futures import ThreadPoolExecutor
from pprint import pprint

from obs import ObsClient

from django.conf import settings
from django.views import View
from django.http import JsonResponse

from .utils import tool
from .utils import if_complaints_exists
from .utils import quality_analyze
from .utils import purchase_year_analyze
from .utils import translator
from .utils import quality_preprocessing
from .utils import cuzu_preprocessing
from .utils import makedirs
from .utils import makelogger
from .utils import maixun_info
from .utils import get_yesterday_list
from .utils import generate_month_dates
from .utils import writer


from complaint.views import StoreIdmiComplaint
from maixun_cuzu.views import MaiXunCuZuStorage


with codecs.open(
        filename='idmi_backend/data_configs/maixun_fid_20250717.json',
        mode='r', encoding='utf-8'
) as fr:
    '''
    脉讯数据源的 fid
    '''
    FID2INFO = {}
    _ = json.load(fp=fr)
    for key, value in _.items():
        FID2INFO[int(key)] = value


NUM_THREADS = os.getenv('NUM_THREADS', 10)
def fid2ModelName(fid):
    if fid in FID2INFO:
        return FID2INFO[fid]
    else:
        return None

def mx_domain2websites(mx_domain):
    if mx_domain in settings.MAIXUN_DOMAIN_CONFIG:
        return settings.MAIXUN_DOMAIN_CONFIG[mx_domain]
    else:
        return mx_domain





def clean_sentence(sentence):
    if len(sentence):
        sentence = copy.deepcopy(sentence)
        sentence = sentence. \
            replace('\r', ' '). \
            replace('\n', ' '). \
            replace('\t', ' '). \
            replace('"', ' '). \
            replace("'", ' '). \
            replace('“', ' '). \
            replace('”', ' '). \
            replace('{', ' '). \
            replace('}', ' '). \
            replace(']', ''). \
            replace('[', '')
        return sentence
    else:
        return ''


def data_clean(line, fid):
    lable_failture = line.get('lable_failture', {})
    lable_result = line.get('lable_result', {})
    forum_types_res = lable_result.get('forum_types_res', [])
    lable_failture_forum_types_res = lable_failture.get('forum_types_res', [])
    if ('车辆质量与可靠性' in forum_types_res) or ('车辆质量与可靠性' in lable_failture_forum_types_res):
        # 先判断是否和质量问题相关
        res = {}
        res['mx_forum_types_res'] = forum_types_res
        title = clean_sentence(line.get('title', ''))
        text = clean_sentence(line.get('text', ''))
        if text==title:
            text = ''
        res['mx_fid'] = fid
        res['mx_res_type'] = 'quality'
        res['mx_id'] = line.get('id', '')
        res['mx_offset'] = line.get('offset', '')
        res['mx_BodyType'] = line.get('BodyType', '')
        res['mx_Brand'] = line.get('Brand', '')
        res['mx_BrandIndicator'] = line.get('BrandIndicator', '')
        res['mx_Fuel_Type'] = line.get('Fuel Type', '')
        res['mx_Group'] = line.get('Group', '')
        res['mx_ModelName'] = line.get('ModelName', '')
        res['mx_OEM'] = line.get('OEM', '')
        res['mx_ProdType'] = line.get('ProdType', '')
        res['mx_Segment'] = line.get('Segment', '')

        res['mx_target_vehicle_sentence'] = lable_result.get('target_vehicle_sentence', '')
        res['mx_title'] = title
        res['mx_text'] = text
        res['mx_author_location'] = line.get('author_location', '')
        res['mx_content_type'] = line.get('content_type', 'UGC')
        res['mx_domain'] = line.get('domain', )
        res['mx_hint_words'] = ''.join(line.get('hint_words', []))
        res['mx_is_comment'] = line.get('is_comment', 0)
        res['mx_posttime'] = line.get('posttime')[:10]
        res['mx_url'] = line.get('url', '')
        res['cuzu_res'] = []
        return res
    cuzu_res = lable_result.get('cuzu_res', [])
    if isinstance(cuzu_res, list):
        if len(cuzu_res) and ('车辆营销内容' not in forum_types_res) and ('车辆营销内容' not in lable_failture_forum_types_res):
            res = {}
            res['mx_forum_types_res'] = forum_types_res
            title = clean_sentence(line.get('title', ''))
            text = clean_sentence(line.get('text', ''))
            if text==title:
                text = ''

            res['mx_fid'] = fid
            res['mx_res_type'] = 'CuZu'
            res['mx_id'] = line.get('id', '')
            res['mx_offset'] = line.get('offset', '')
            res['mx_BodyType'] = line.get('BodyType', '')
            res['mx_Brand'] = line.get('Brand', '')
            res['mx_BrandIndicator'] = line.get('BrandIndicator', '')
            res['mx_Fuel_Type'] = line.get('Fuel Type', '')
            res['mx_Group'] = line.get('Group', '')
            res['mx_ModelName'] = line.get('ModelName', '')
            res['mx_OEM'] = line.get('OEM', '')
            res['mx_ProdType'] = line.get('ProdType', '')
            res['mx_Segment'] = line.get('Segment', '')

            res['mx_target_vehicle_sentence'] = lable_result.get('target_vehicle_sentence', '')
            res['mx_title'] = title
            res['mx_text'] = text
            res['mx_author_location'] = line.get('author_location', '')
            res['mx_content_type'] = line.get('content_type', 'UGC')
            res['mx_domain'] = line.get('domain', )
            res['mx_hint_words'] = ''.join(line.get('hint_words', []))
            res['mx_is_comment'] = line.get('is_comment', 0)
            res['mx_posttime'] = line.get('posttime')[:10]
            res['mx_url'] = line.get('url', '')
            res['cuzu_res'] = cuzu_res # 这个后续还会再分析,但是先入库放到这里

            return res
    return None


def download_obs_data(fid, day):
    obs_client = ObsClient(
        access_key_id=maixun_info.ak,
        secret_access_key=maixun_info.sk,
        server=maixun_info.endpoint,
    )
    fa_write = codecs.open(
        filename=os.path.join(maixun_info.FOLDER, day, str(fid) + ".jsonl"),
        mode='a', encoding='utf-8'
    )
    prefix = os.path.join(maixun_info.prefix_path, str(fid), day)
    data_amount = 0
    try:
        # 获取所有匹配前缀的对象
        marker = None
        while True:
            try:
                resp = obs_client.listObjects(
                    maixun_info.bucket_name,
                    prefix=prefix,
                    marker=marker,
                    max_keys=1000
                )

                if resp.status < 300:

                    for content in resp.body.contents:
                        object_key = content.key
                        if object_key[-6:] != ".jsonl":
                            continue

                        # 获取对象内容
                        resp_get = obs_client.getObject(
                            maixun_info.bucket_name,
                            object_key,
                            loadStreamInMemory=True
                        )

                        if resp_get.status < 300:
                            # 读取并处理内容
                            content_str = str(resp_get.body.buffer, "utf-8")
                            for line in content_str.split('\n'):
                                line = line.strip()
                                if not line:
                                    continue
                                try:
                                    line = json.loads(line)
                                    content_type = line.get('content_type')
                                    if content_type == 'UGC':
                                        line = data_clean(
                                            line=line,
                                            fid=fid
                                        )
                                        if isinstance(line, dict):
                                            fa_write.write(json.dumps(line, ensure_ascii=False))
                                            fa_write.write('\r\n')
                                            fa_write.flush()
                                            data_amount += 1
                                        else:
                                            ...
                                    else:
                                        # 脉讯已经判定的PGC数据直接略过
                                        ...
                                except json.JSONDecodeError:
                                    continue
                        else:
                            print(f'获取对象失败：{resp_get.errorCode}')

                    if not resp.body.is_truncated:
                        break
                    marker = resp.body.next_marker
                else:
                    print(f'列出对象失败：{resp.errorCode}')
                    break
            except Exception as e:
                err_msg = traceback.format_exc()
                print(f'处理失败：{err_msg}')
                raise

    finally:
        obs_client.close()
        fa_write.close()
        logging.info(f'DOWNLOAD --- totally fetch data_amount = {data_amount} pcs')


def fetch_maixun_data(fid_list, days):
    for day in days:
        for fid in fid_list:
            logging.info('----------------------------')
            logging.info('DOWNLOAD {fid}  {day} '.format(
                fid=FID2INFO[fid], day=day
            ))
            if os.path.exists(os.path.join(
                    maixun_info.FOLDER, day
            )):
                ...
            else:
                os.makedirs(os.path.join(
                    maixun_info.FOLDER, day
                ))
            download_obs_data(
                fid=fid,
                day=day,
            )
            logging.info(f'----------------------------')


def mxUrlSplit(url : str, if_comment : int) -> str :
    if if_comment :
        if '?mx_comment_id' in url :
            return url.split('?mx_comment_id')[0]
        else:
            return url
    else:
        return url

def s_encode(s):
    try:
        s = s.encode('utf-8', 'replace').decode('utf-8')
        s = emoji.demojize(s)
        return s
    except Exception:
        return ''



def preprocess(e):
    '''
    通过utils里的函数针对性的处理数据
    '''
    mx_fid = e['mx_fid']
    # 根据fid将对应的信息拿出来
    _ = fid2ModelName(mx_fid)
    mx_res_type = e['mx_res_type']
    mx_id = e['mx_id']
    mx_offset = e['mx_offset']
    mx_BodyType = _['BodyType']
    mx_Brand = _['Brand']
    mx_BrandIndicator = _['BrandIndicator']
    mx_Fuel_Type = _['FuelType']
    mx_Group = _['Group']
    mx_ModelName = _['ModelName']
    mx_OEM = _['OEM']
    mx_ProdType = e['mx_ProdType']
    mx_Segment = _['Segment']

    mx_target_vehicle_sentence = s_encode(e['mx_target_vehicle_sentence'])
    mx_title = s_encode(e['mx_title'])
    mx_text = s_encode(e['mx_text'])


    mx_author_location = s_encode(e['mx_author_location'])
    mx_content_type = e['mx_content_type']
    mx_domain = e['mx_domain']
    mx_hint_words = e['mx_hint_words']
    mx_is_comment = e['mx_is_comment']
    mx_posttime = e['mx_posttime']
    mx_url = e['mx_url']
    cuzu_res = e['cuzu_res']
    mx_forum_types_res = e['mx_forum_types_res']

    if mx_res_type == 'quality':
        customer_voice_uni = mx_title + mx_text
        if len(mx_hint_words) :
            vehicle = mx_hint_words
        else:
            vehicle = mx_ModelName

        # 先分析原话中和指定车型相关的语句
        sentence = tool.sentence_split(
            customer_voice=customer_voice_uni,
            vehicle=vehicle
        )
        if_complaints_exists_res = if_complaints_exists(
            title_zh=mx_title,
            text_zh=mx_text
        )
        if if_complaints_exists_res :
            complaint_analysis_res = quality_preprocessing(
                title_zh=sentence,
                text_zh=''
            )

            if len(complaint_analysis_res):
                # 将对应的数据进行存储
                purchase_year_res = purchase_year_analyze(
                    title_zh=mx_title, text_zh=mx_text
                )
                create_time = mx_posttime
                purchase_year = purchase_year_res
                info_category = 'maixun_crawled'
                URL = mxUrlSplit(url=mx_url, if_comment=mx_is_comment)
                FLAG_function_group_problem = True
                OEM_src = mx_OEM
                ModelName_src = mx_ModelName
                vehicle_line_src = ''
                OEM = mx_OEM
                ModelName = mx_ModelName
                vehicle_line = ''
                url_unique = ''
                title_zh = mx_title
                text_zh = mx_text
                translate_res = translator.translate_util_zh2en(
                    title=title_zh,
                    text=text_zh
                )
                title_en = translate_res['title_en']
                text_en = translate_res['text_en']
                websites = mx_domain2websites(mx_domain=mx_domain)
                maixun_author_location = mx_author_location
                maixun_domain = mx_domain
                maixun_fid = mx_fid

                complaint_tags = complaint_analysis_res
                return {
                    'FLAG' : 'Quality',
                    'create_time': create_time,
                    'purchase_year': purchase_year,
                    'info_category': info_category,
                    'URL': URL,
                    'FLAG_function_group_problem': FLAG_function_group_problem,
                    'OEM_src': OEM_src,
                    'ModelName_src': ModelName_src,
                    'vehicle_line_src': vehicle_line_src,
                    'OEM': OEM,
                    'ModelName': ModelName,
                    'vehicle_line': vehicle_line,
                    'url_unique': url_unique,
                    'title_zh': title_zh,
                    'text_zh': text_zh,
                    'title_en': title_en,
                    'text_en': text_en,
                    'websites': websites,
                    'maixun_author_location': maixun_author_location,
                    'maixun_domain': maixun_domain,
                    'maixun_fid': maixun_fid,
                    'complaint_tags': complaint_tags,
                }

            else:
                return None
        else:
            return None
    elif mx_res_type == 'CuZu':
        customer_voice_uni = mx_title + mx_text
        if len(mx_hint_words):
            vehicle = mx_hint_words
        else:
            vehicle = mx_ModelName

        # 先分析原话中和指定车型相关的语句
        vehicle_relevant_sentence = tool.sentence_split(
            customer_voice=customer_voice_uni,
            vehicle=vehicle
        )
        cuzu_tags = cuzu_preprocessing(
            title_zh=vehicle_relevant_sentence,
            text_zh='',
            vehicle=vehicle
        )

        if len(cuzu_tags):
            # 将对应的数据进行存储
            maixun_fid = mx_fid
            create_time = mx_posttime
            info_category = 'maixun_crawled'
            URL = mxUrlSplit(url=mx_url, if_comment=mx_is_comment)
            forum_type_res = mx_forum_types_res
            content_type = mx_content_type
            hint_words = mx_hint_words
            is_comment = mx_is_comment
            target_vehicle_sentence = vehicle_relevant_sentence
            target_vehicle_sentence_en = translator.translate_util_zh2en(
                title=target_vehicle_sentence,
                text=''
            )['title_en']
            maixun_title = mx_title
            maixun_text = mx_text
            website = mx_domain2websites(mx_domain=mx_domain)
            maixun_author_location = mx_author_location
            maixun_domain = mx_domain

            maixun_cuzu_res = cuzu_tags

            return {
                'FLAG': 'CuZu',
                'maixun_fid': maixun_fid,
                'create_time': create_time,
                'info_category': info_category,
                'URL': URL,
                'forum_type_res': forum_type_res,
                'content_type': content_type,
                'hint_words': hint_words,
                'is_comment': is_comment,
                'target_vehicle_sentence': target_vehicle_sentence,
                'target_vehicle_sentence_en': target_vehicle_sentence_en,
                'maixun_title': maixun_title,
                'maixun_text': maixun_text,
                'website': website,
                'maixun_author_location': maixun_author_location,
                'maixun_domain': maixun_domain,
                'maixun_cuzu_res': maixun_cuzu_res
            }

    else:
        logging.error('当前数据不在待处理范围内')
        logging.error(json.dumps(e, ensure_ascii=False))
        return None

from complaint.views import StoreIdmiComplaint
from maixun_cuzu.views import MaiXunCuZuStorage
from django.http import HttpRequest





def write_result(e):

    '''
    将处理的结果通过URL保存到PGSQL数据库
    '''

    if e:
        create_time = e.get('create_time', datetime.datetime.now().strftime("%Y-%m-%d"))
        writer.write(json.dumps(e, ensure_ascii=False), create_time)

        if e['FLAG']=='Quality':
            e = json.dumps(e, ensure_ascii=False)
            # url = 'http://0.0.0.0:8000/complaint/idmi_complaints'
            # print(url)
            # res = requests.post(
            #     url,
            #     data={'msg' : e}
            # )
            request = HttpRequest()
            request.method = 'POST'
            request.POST = {'msg' : e}
            view = StoreIdmiComplaint()
            res = view.post(request)
            if res.status_code == 200:
                logging.info('data insert into pg : {}'.format(e))
            else:
                logging.error('data insert into pg error')

        elif e['FLAG']=='CuZu':
            e = json.dumps(e, ensure_ascii=False)
            # url = 'http://0.0.0.0:8000/idmi_cuzu/store_intime'
            # print(url)
            # res = requests.post(
            #     url,
            #     data={'msg' : e}
            # )
            request = HttpRequest()
            request.method = 'POST'
            request.POST = {'msg' : e}
            view = MaiXunCuZuStorage()
            res = view.post(request)
            if res.status_code == 200:
                logging.info('data insert into pg : {}'.format(e))
            else:
                logging.error('data insert into pg error')

        else:
            logging.error('当前的数据无法写入数据库, {}'.format(
                json.dumps(e, ensure_ascii=False)
            ))

    else:
        ...


def remove_duplicate_data(data_list_duplicated : list) -> list:
    # 依据脉讯的mx_title, mx_text 对数据进行去重处理
    SET = set()
    data_list = []
    for data in data_list_duplicated:
        mx_title = data['mx_title']
        mx_text = data['mx_text']
        uni = mx_title + mx_text
        if uni not in SET:
            SET.add(uni)
            data_list.append(data)
    return data_list


# def data_preprocessing_single(fid, day):
#
#     filepath = os.path.join(maixun_info.FOLDER, day, str(fid) + ".jsonl")
#
#     if os.path.exists(filepath) and os.path.getsize(filepath) > 0:
#         logging.info('开始处理 {model}  {day}'.format(
#             model=FID2INFO[fid], day=day
#         ))
#     else:
#         logging.error('当前车型无数据 {model}  {day}'.format(
#             model=FID2INFO[fid], day=day
#         ))
#         return None
#     with codecs.open(filename=filepath, mode='r', encoding='utf-8') as fr:
#         data_list_duplicated = [json.loads(line) for line in fr.readlines()]
#         original_data_amount = len(data_list_duplicated)
#         logging.info('去重前 {model}  {day} 数据量 {duplicated_data_amount}'.format(
#             model=FID2INFO[fid], day=day, duplicated_data_amount=original_data_amount
#         ))

def data_preprocessing_batch(fid_list, days):
    all_data_list = []
    for day in days:
        day_data_list = []
        for fid in fid_list:
            filepath = os.path.join(maixun_info.FOLDER, day, str(fid) + ".jsonl")
            if os.path.exists(filepath) and os.path.getsize(filepath) > 0:
                with codecs.open(filename=filepath, mode='r', encoding='utf-8') as fr:
                    _ = [json.loads(line) for line in fr.readlines()]
                if len(_) > 200 :
                    random.shuffle(_)
                    day_data_list += _[:200]
                else:
                    day_data_list += _

        day_data_list = remove_duplicate_data(day_data_list)
        all_data_list += day_data_list
    if len(all_data_list) > 100:
        with ThreadPoolExecutor(max_workers=NUM_THREADS) as executor:
            # 提交所有任务
            futures = [
                executor.submit(
                    lambda e: write_result(preprocess(e)),  # 组合处理与写入
                    element
                ) for element in all_data_list
            ]

            # 等待所有任务完成（可选进度监控）
            for future in futures:
                future.result()
    else:
        for element in all_data_list:
            write_result(preprocess(element))


def main():
    makedirs()
    makelogger()

    '''
    控制所有的 DOWNLOAD 和 PREPROCESSING 的代码
    '''
    logging.info('===============================================')
    fid_list = list(FID2INFO.keys())
    print(fid_list)
    # 处理单天的数据
    # days = get_yesterday_list()
    # 按照月份全量的打入数据
    days = []
    days_1 = generate_month_dates(2025, 4)
    # days_2 = generate_month_dates(2025, 5)
    # days_3 = generate_month_dates(2025, 6)
    # days_4 = generate_month_dates(2025, 7)
    days.extend(days_1)
    # days.extend(days_2)
    # days.extend(days_3)
    # days.extend(days_4)
    # days = ['2025-07-01']
    logging.info('当前日期: {}, 获取 {} 的数据'.format(
        datetime.datetime.now().strftime('%Y-%m-%d'),
        str(days)
    ))
    fetch_maixun_data(fid_list=fid_list, days=days)
    logging.info('start to preprocess the dataset')
    data_preprocessing_batch(fid_list=fid_list, days=days)
    logging.info('===============================================')



class MaixunData(View):
    def get(self, request):
        main()
        return JsonResponse({'status': 200, 'message': 'success'})





