import requests
import json
import logging
from typing import Dict, Any, List
import time

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class TextGenerator:
    def __init__(self):
        """
        初始化ModelScope API文本生成器
        使用Qwen3-Coder模型，通过API调用实现更好的文本生成效果
        """
        self.base_url = 'https://api-inference.modelscope.cn/v1'
        self.api_key = 'ms-ce902928-b199-4185-bd9e-46601335f66b'
        self.model_id = 'Qwen/Qwen3-Coder-480B-A35B-Instruct'
        
        self.headers = {
            'Authorization': f'Bearer {self.api_key}',
            'Content-Type': 'application/json',
        }
        
        logger.info("ModelScope API文本生成器初始化成功")
    
    def generate_text(self, prompt: str, max_length: int = 200, min_length: int = 50, 
                     temperature: float = 0.7, top_p: float = 0.9, 
                     system_prompt: str = None, stream: bool = False) -> Dict[str, Any]:
        try:
            if not prompt or not prompt.strip():
                raise ValueError("提示文本不能为空")
            
            # 构建消息
            messages = []
            
            # 添加系统提示词
            if system_prompt:
                messages.append({
                    "role": "system",
                    "content": system_prompt
                })
            else:
                messages.append({
                    "role": "system",
                    "content": "你是一个智能助手，能够生成高质量、连贯的文本内容。请根据用户的要求生成合适的文本，确保内容准确、流畅。"
                })
            
            # 添加用户提示
            messages.append({
                "role": "user",
                "content": prompt.strip()
            })
            
            # 构建请求数据
            request_data = {
                "model": self.model_id,
                "messages": messages,
                "max_tokens": max_length,
                "min_tokens": min_length,
                "temperature": max(0.0, min(2.0, temperature)),
                "top_p": max(0.0, min(1.0, top_p)),
                "stream": stream
            }
            
            if stream:
                return self._generate_streaming(request_data)
            else:
                return self._generate_sync(request_data)
                
        except Exception as e:
            logger.error(f"文本生成失败: {str(e)}")
            return {
                "success": False,
                "error": str(e),
                "prompt": prompt,
                "generated_text": "",
                "full_text": "",
                "usage": {}
            }
    
    def _generate_sync(self, request_data: Dict[str, Any]) -> Dict[str, Any]:
        """同步生成文本"""
        try:
            response = requests.post(
                f"{self.base_url}/chat/completions",
                headers=self.headers,
                json=request_data,
                timeout=120  # 设置较长的超时时间
            )
            
            response.raise_for_status()
            result = response.json()
            
            # 提取生成的文本
            if "choices" in result and len(result["choices"]) > 0:
                generated_text = result["choices"][0]["message"]["content"]
                full_text = request_data["messages"][-1]["content"] + generated_text
                
                # 获取使用统计
                usage = result.get("usage", {})
                
                return {
                    "success": True,
                    "prompt": request_data["messages"][-1]["content"],
                    "generated_text": generated_text.strip(),
                    "full_text": full_text.strip(),
                    "usage": usage,
                    "model": self.model_id
                }
            else:
                raise Exception("API响应格式异常")
                
        except requests.exceptions.Timeout:
            return {
                "success": False,
                "error": "文本生成超时，请稍后重试",
                "prompt": request_data["messages"][-1]["content"],
                "generated_text": "",
                "full_text": "",
                "usage": {}
            }
        except requests.exceptions.ConnectionError:
            return {
                "success": False,
                "error": "无法连接到文本生成服务，请检查网络连接",
                "prompt": request_data["messages"][-1]["content"],
                "generated_text": "",
                "full_text": "",
                "usage": {}
            }
        except Exception as e:
            return {
                "success": False,
                "error": str(e),
                "prompt": request_data["messages"][-1]["content"],
                "generated_text": "",
                "full_text": "",
                "usage": {}
            }
    
    def _generate_streaming(self, request_data: Dict[str, Any]) -> Dict[str, Any]:
        """流式生成文本（返回生成器）"""
        try:
            response = requests.post(
                f"{self.base_url}/chat/completions",
                headers=self.headers,
                json=request_data,
                stream=True,
                timeout=120
            )
            
            response.raise_for_status()
            
            # 这里简化处理，实际使用需要逐块处理
            # 返回一个模拟的流式响应
            return {
                "success": True,
                "stream": True,
                "message": "流式生成已启动",
                "note": "当前实现为简化版本，完整流式需要WebSocket支持"
            }
            
        except Exception as e:
            return {
                "success": False,
                "error": str(e),
                "stream": False
            }
    
    def generate_with_context(self, messages: List[Dict[str, str]], 
                            max_length: int = 200, temperature: float = 0.7) -> Dict[str, Any]:
        """
        基于对话上下文生成文本
        
        Args:
            messages: 对话消息列表，格式为 [{"role": "user/assistant", "content": "文本"}]
            max_length: 最大生成长度
            temperature: 采样温度
            
        Returns:
            dict: 生成结果
        """
        try:
            if not messages or not isinstance(messages, list):
                raise ValueError("消息格式错误")
            
            request_data = {
                "model": self.model_id,
                "messages": messages,
                "max_tokens": max_length,
                "temperature": temperature,
                "stream": False
            }
            
            return self._generate_sync(request_data)
            
        except Exception as e:
            return {
                "success": False,
                "error": str(e),
                "messages": messages,
                "generated_text": "",
                "usage": {}
            }
    
    def check_api_status(self) -> Dict[str, Any]:
        """检查API状态"""
        try:
            # 测试API可用性
            test_response = self.generate_text(
                prompt="测试",
                max_length=10,
                min_length=5
            )
            
            if test_response["success"]:
                return {
                    "available": True,
                    "message": "Qwen3-Coder模型可用",
                    "model": self.model_id
                }
            else:
                return {
                    "available": False,
                    "message": test_response.get("error", "模型不可用"),
                    "model": self.model_id
                }
                
        except Exception as e:
            return {
                "available": False,
                "message": f"连接失败: {str(e)}",
                "model": self.model_id
            }
    
    def get_model_info(self) -> Dict[str, Any]:
        """获取模型信息"""
        return {
            "model": self.model_id,
            "provider": "ModelScope",
            "type": "大语言模型",
            "features": [
                "文本生成",
                "对话理解",
                "代码生成",
                "创意写作",
                "知识问答"
            ],
            "languages": ["中文", "英文", "多语言"],
            "max_context_length": 32768
        }

# 创建全局实例
text_generator = TextGenerator()

def generate_text_api(prompt: str, max_length: int = 200, min_length: int = 50, 
                     temperature: float = 0.7, system_prompt: str = None) -> Dict[str, Any]:
    """
    API接口函数，供Flask调用
    
    Args:
        prompt: 输入提示文本
        max_length: 生成文本的最大长度
        min_length: 生成文本的最小长度
        temperature: 采样温度
        system_prompt: 系统提示词
        
    Returns:
        dict: 包含生成结果的响应
    """
    return text_generator.generate_text(
        prompt=prompt,
        max_length=max_length,
        min_length=min_length,
        temperature=temperature,
        system_prompt=system_prompt
    )

if __name__ == "__main__":
    # 测试代码
    print("🧪 测试ModelScope API文本生成...")
    
    # 测试API状态
    status = text_generator.check_api_status()
    print(f"API状态: {status}")
    
    # 测试生成
    test_prompts = [
        "今天天气很好，我打算去",
        "写一段关于人工智能发展的短文",
        "解释什么是区块链技术"
    ]
    
    for prompt in test_prompts:
        print(f"\n📝 测试提示: {prompt}")
        result = generate_text_api(prompt, max_length=100, temperature=0.8)
        
        if result["success"]:
            print(f"✅ 生成成功")
            print(f"📝 生成内容: {result['generated_text'][:100]}...")
            print(f"📊 使用统计: {result.get('usage', {})}")
        else:
            print(f"❌ 生成失败: {result['error']}")
        
        time.sleep(1)  # 避免请求过快