import json
import os
import pickle
import pandas as pd
from datetime import datetime
from redis import Redis
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import hashlib

def _build_redis(redis_cfg):
    return Redis(
        host=redis_cfg['host'],
        port=redis_cfg['port'],
        db=redis_cfg['db'],
        password=(redis_cfg['password'] or None),
        decode_responses=False
    )

def _allowed_extensions():
    return {'xlsx', 'xls'}

def _hash_text(text: str) -> str:
    return hashlib.md5(text.encode('utf-8')).hexdigest()

def parse_excel_and_store(file_path: str, job_id: str, redis_cfg: dict):
    r = _build_redis(redis_cfg)
    meta_key = f"parsed:{job_id}:meta"
    data_key = f"parsed:{job_id}"
    try:
        meta = {'status': 'running', 'records_count': 0, 'sheets': [], 'error': None, 'started_at': datetime.utcnow().isoformat()}
        r.set(meta_key, json.dumps(meta).encode('utf-8'))
        # 读取所有 sheet
        xl = pd.ExcelFile(file_path)
        all_data = []
        sheets = []
        for sheet_name in xl.sheet_names:
            df = pd.read_excel(file_path, sheet_name=sheet_name, dtype=str)
            sheets.append({'name': sheet_name, 'rows': len(df)})
            # 尽量与现有字段映射逻辑保持一致（可进一步抽取）
            df = df.fillna('')
            for _, row in df.iterrows():
                record = {col: str(row[col]) for col in df.columns}
                all_data.append(record)
        payload = pickle.dumps(all_data, protocol=pickle.HIGHEST_PROTOCOL)
        r.set(data_key, payload)
        meta.update({'status': 'finished', 'records_count': len(all_data), 'sheets': sheets, 'finished_at': datetime.utcnow().isoformat()})
        r.set(meta_key, json.dumps(meta).encode('utf-8'))
    except Exception as e:
        meta = {'status': 'failed', 'error': str(e), 'finished_at': datetime.utcnow().isoformat()}
        r.set(meta_key, json.dumps(meta).encode('utf-8'))
        raise

def ai_analysis_and_store(job_id: str, question: str, ai_cfg: dict, redis_cfg: dict, pool_cfg: dict):
    r = _build_redis(redis_cfg)
    data_key = f"parsed:{job_id}"
    payload = r.get(data_key)
    ai_job_id = os.environ.get('RQ_JOB_ID') or _hash_text(question)  # 兼容独立运行
    result_key = f"ai:{job_id}:{ai_job_id}"

    result = {'job_id': job_id, 'ai_job_id': ai_job_id, 'status': 'running'}
    r.set(result_key, json.dumps(result).encode('utf-8'))

    try:
        if not payload:
            result.update({'status': 'failed', 'error': '解析数据不存在或未完成'})
            r.set(result_key, json.dumps(result).encode('utf-8'))
            return

        records = pickle.loads(payload)
        # 优化提示词，减少内容长度，提高响应速度
        samples = records[:min(len(records), ai_cfg.get('small_data_threshold', 10))]  # 减少样本数量
        
        # 构建优化的提示词
        sample_records = json.dumps(samples, ensure_ascii=False)
        prompt = f"""请分析以下工作记录，按项目组分组输出结构化分析：

{sample_records}

请严格按照以下格式输出，不要添加任何其他内容：

项目组名称（真实名称）
1. 具体工作内容和成果，突出数据和效果
2. 具体工作内容和成果，突出数据和效果
3. 具体工作内容和成果，突出数据和效果
4. 具体工作内容和成果，突出数据和效果

项目组名称（真实名称）
1. 具体工作内容和成果，突出数据和效果
2. 具体工作内容和成果，突出数据和效果

要求：
- 标题必须是项目组真实名称，单独一行，不加任何标记符号或数字
- 用1、2、3、4标记具体工作项（每项一行）
- 突出工作成果、数据指标、效率提升等
- 不要添加总结、分析等额外内容
"""

        session = requests.Session()
        adapter = HTTPAdapter(
            pool_maxsize=pool_cfg.get('pool_maxsize', 100),
            max_retries=Retry(
                total=pool_cfg.get('max_retries', 3),
                backoff_factor=0.5,
                status_forcelist=[429, 500, 502, 503, 504],
                allowed_methods=False
            )
        )
        session.mount('https://', adapter)
        session.mount('http://', adapter)

        headers = {
            "Authorization": f"Bearer {ai_cfg['api_key']}",
            "Content-Type": "application/json"
        }
        data = {
            "model": ai_cfg['model_name'],
            "messages": [{"role": "user", "content": prompt}],
            "temperature": ai_cfg.get('temperature_optimized', 0.3),
            "max_tokens": ai_cfg.get('max_tokens_batch', 1500)  # 减少输出token数量
        }
        resp = session.post(ai_cfg['api_url'], headers=headers, json=data, timeout=ai_cfg.get('timeout', 60))  # 减少超时时间
        if resp.status_code == 200:
            content = resp.json()['choices'][0]['message']['content']
            result.update({'status': 'finished', 'answer': content})
        else:
            result.update({'status': 'failed', 'error': f"{resp.status_code} - {resp.text[:300]}"})
        r.set(result_key, json.dumps(result).encode('utf-8'))
    except Exception as e:
        result.update({'status': 'failed', 'error': str(e)})
        r.set(result_key, json.dumps(result).encode('utf-8'))
        raise