
import re
from tabnanny import check
from langchain import chains
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatTongyi
from langchain.output_parsers import PydanticOutputParser
from typing import Optional, Any
from dotenv import load_dotenv
from domain.check_domain import ComplianceCheckResult
from langchain_core.runnables import RunnablePassthrough
from langchain.prompts import StringPromptTemplate


class BaseChecker:
   
    def __init__(self):
        
        self.llm = ChatTongyi(
            model= self.getLLMName(),
            temperature=0,
            streaming=False
        )
        self.parser = PydanticOutputParser(pydantic_object=ComplianceCheckResult)

        
        # 初始化合规性分析链
        self.compliance_chain = self.create_compliance_chain()
        # LCEL链无需SequentialChain，直接赋值
        self.full_chain = self.compliance_chain

    

    def check_product(self, product_name: str, pics: Optional[list] = None) -> ComplianceCheckResult:
        # 直接调用LCEL链
        result = self.full_chain.invoke({"product_name": product_name, "product_pics": pics})
        # LCEL链已输出ComplianceCheck对象，无需再次parse
        compliance_result = result["compliance_result"]
        return compliance_result
    
    def get_risk_summary(self, check_result: ComplianceCheckResult) -> dict:
        return {
            "risk_level": check_result.risk_assessment.risk_level,
            "main_issues": {
                "prohibited_words": check_result.compliance_check.prohibited_words,
                "missing_elements": check_result.compliance_check.missing_elements
            },
            "suggestions": check_result.risk_assessment.suggestions,
            "legal_basis": check_result.risk_assessment.legal_basis
        }
    
    def create_compliance_chain(self) -> Any:
        name_chain = self._create_name_chain_lcel()
        # 如有更多子链，可按需组合
        # 使用RunnablePassthrough.assign实现输入输出字典包装，兼容原LLMChain输出格式
        compliance_chain = RunnablePassthrough.assign(compliance_result=name_chain)
        return compliance_chain

    def _create_name_chain_lcel(self):
        prompt = PromptTemplate(
            template= self.getTemplate(),
            input_variables=["product_name", "pic_desc"],
            partial_variables={"format_instructions": self.parser.get_format_instructions()}
        )
        # LCEL链式表达：prompt | llm | parser
        chain = prompt | self.llm | self.parser
        return chain
    
    def getTemplate(self) -> StringPromptTemplate:
        pass 

    def getLLMName(self) -> str:
        return "qwen-vl-max"


    def buildResponse(self, product_name:str, product_pics:str, classify_name:str, check_result:ComplianceCheckResult, risk_summary:dict):
        return {
            'product_name': product_name,
            'product_pics': product_pics,
            'classify_name': classify_name,
            'semantic_analysis': {
                'category': check_result.semantic_analysis.category,
                'material': check_result.semantic_analysis.material,
                'function': check_result.semantic_analysis.function,
                'sensitive_words': check_result.semantic_analysis.sensitive_words,
                'abbreviations': check_result.semantic_analysis.abbreviations,
                'special': check_result.semantic_analysis.special
            },
            'compliance_check': {
                'hs_code': check_result.compliance_check.hs_code,
                'prohibited_words': check_result.compliance_check.prohibited_words,
                'missing_elements': check_result.compliance_check.missing_elements
            },
            'risk_assessment': risk_summary
        }