import json
import requests
import os
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# 添加对GGUF格式模型的支持
from llama_cpp import Llama

class OllamaClient:
    def __init__(self, base_url='http://localhost:11435', model_path=None):
        self.base_url = base_url
        self.model_path = model_path
        self.model = None
        self.tokenizer = None
        self.is_gguf = False
        self.using_local_model = False
        
        # 调试信息
        print(f"OllamaClient初始化: model_path={model_path}")
        
        # 如果提供了模型路径，则加载本地模型
        if self.model_path:
            self.using_local_model = True
            print("设置为使用本地模型")
            self._load_local_model()
        else:
            print("设置为使用Ollama服务")
    
    # 加载本地模型
    def _load_local_model(self):
        try:
            # 检查模型路径是否存在
            print(f"尝试加载本地模型: {self.model_path}")
            if not os.path.exists(self.model_path):
                raise Exception(f"模型路径不存在: {self.model_path}")
            
            # 获取模型路径的基本信息
            model_name = os.path.basename(self.model_path)
            print(f"模型名称: {model_name}")
            
            # 检查是否是GGUF格式模型
            is_gguf_model = False
            if os.path.isdir(self.model_path):
                print(f"模型路径是目录: {self.model_path}")
                # 检查目录下是否有.gguf文件
                gguf_files = [file for file in os.listdir(self.model_path) if file.lower().endswith('.gguf')]
                print(f"找到{len(gguf_files)}个GGUF文件")
                if gguf_files:
                    self.model_path = os.path.join(self.model_path, gguf_files[0])
                    is_gguf_model = True
                    print(f"选择GGUF文件: {self.model_path}")
                else:
                    # 检查是否是其他格式的模型目录
                    print(f"模型目录中没有GGUF文件，尝试作为PyTorch模型加载")
            elif self.model_path.lower().endswith('.gguf'):
                is_gguf_model = True
                print(f"模型是GGUF格式: {self.model_path}")
            
            # 加载模型
            if is_gguf_model:
                # 加载GGUF格式模型
                self.is_gguf = True
                print(f"开始加载GGUF模型: {self.model_path}")
                try:
                    self.model = Llama(model_path=self.model_path)
                    print(f"成功加载GGUF模型: {self.model_path}")
                except Exception as e:
                    print(f"GGUF模型加载失败: {str(e)}")
                    # 尝试使用其他参数加载
                    print("尝试使用低内存模式加载...")
                    self.model = Llama(model_path=self.model_path, n_ctx=2048, n_batch=512)
                    print(f"成功加载GGUF模型(低内存模式): {self.model_path}")
            else:
                # 加载PyTorch格式模型
                print(f"开始加载PyTorch模型: {self.model_path}")
                try:
                    self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)
                    print(f"成功加载tokenizer")
                    self.model = AutoModelForCausalLM.from_pretrained(
                        self.model_path,
                        torch_dtype=torch.float16,
                        device_map="auto"
                    )
                    print(f"成功加载PyTorch模型: {self.model_path}")
                except Exception as e:
                    print(f"PyTorch模型加载失败: {str(e)}")
                    # 尝试使用不同的参数
                    print("尝试使用低内存模式加载...")
                    self.model = AutoModelForCausalLM.from_pretrained(
                        self.model_path,
                        torch_dtype=torch.float16,
                        device_map="auto",
                        low_cpu_mem_usage=True
                    )
                    print(f"成功加载PyTorch模型(低内存模式): {self.model_path}")
        except Exception as e:
            print(f"加载本地模型时出错: {str(e)}")
            # 提供更详细的错误信息
            error_msg = f"加载本地模型失败: {str(e)}"
            if 'CUDA out of memory' in str(e):
                error_msg += "\n\n可能原因: 显存不足。请尝试更小的模型或增加系统内存。"
            elif 'tokenizer' in str(e).lower():
                error_msg += "\n\n可能原因: 无法加载tokenizer。请检查模型目录结构是否正确。"
            raise Exception(error_msg)
    
    # 调用模型生成文本
    def generate(self, model_name, prompt, stream=False, options=None):
        # 调试信息
        print(f"generate方法调用: using_local_model={self.using_local_model}, model={self.model is not None}")
        
        # 如果加载了本地模型，则使用本地模型
        if self.using_local_model:
            print("使用本地模型生成文本")
            if self.model:
                return self._generate_local(prompt, stream, options)
            else:
                raise Exception(f"本地模型加载失败，请检查模型路径: {self.model_path}")
        # 否则使用Ollama服务
        else:
            print("使用Ollama服务生成文本")
            return self._generate_ollama(model_name, prompt, stream, options)
    
    # 使用本地模型生成文本
    def _generate_local(self, prompt, stream=False, options=None):
        try:
            # 设置生成参数
            generate_kwargs = {
                "max_tokens": 1024,
                "temperature": 0.7,
                "top_p": 0.95,
                "stream": stream
            }
            
            # 更新用户提供的参数
            if options:
                generate_kwargs.update(options)
            
            # 根据模型类型进行生成
            if self.is_gguf:
                # 使用GGUF模型生成
                if stream:
                    # 流式生成
                    output = self.model(prompt, **generate_kwargs)
                    for chunk in output:
                        if 'choices' in chunk and len(chunk['choices']) > 0:
                            delta = chunk['choices'][0].get('delta', {})
                            if 'content' in delta:
                                yield delta['content']
                else:
                    # 非流式生成
                    output = self.model(prompt, **generate_kwargs)
                    if 'choices' in output and len(output['choices']) > 0:
                        return output['choices'][0].get('text', '').strip()
                    else:
                        return ''
            else:
                # 使用PyTorch模型生成
                # 编码输入
                inputs = self.tokenizer(prompt, return_tensors="pt")
                
                # 调整参数名称（transformers使用max_length而不是max_tokens）
                if 'max_tokens' in generate_kwargs:
                    generate_kwargs['max_length'] = generate_kwargs.pop('max_tokens')
                
                # 生成文本
                output = self.model.generate(**inputs, **generate_kwargs)
                
                # 解码输出
                response = self.tokenizer.decode(output[0], skip_special_tokens=True)
                
                # 移除提示部分
                response = response[len(prompt):].strip()
                
                if stream:
                    # 流式输出模拟
                    for chunk in response.split(' '):
                        yield chunk + ' '
                else:
                    return response
        except Exception as e:
            raise Exception(f"本地模型生成失败: {str(e)}")
    
    # 使用Ollama服务生成文本
    def _generate_ollama(self, model_name, prompt, stream=False, options=None):
        endpoint=f"{self.base_url}/api/generate"
        payload={
            'model': model_name,
            'prompt': prompt,
            'stream': stream
        }
        if options:
            payload['options']=options
        
        response=requests.post(
            endpoint,
            json= payload,
            stream= stream
        )
        if stream:
            return self._handler_stream_response(response)
        else:
            return self._hander_json_response(response)
        
    #接受流式输出结果
    def _handler_stream_response(self,response):
        if response.status_code != 200:
            raise Exception(f"请求失败{response.status_code}")
        for line in response.iter_lines():
            if line:
                decoded_line= json.loads(line.decode('utf-8'))
                yield decoded_line.get("response","")
    
    # 接受非流式输出结果
    def _hander_json_response(self,response):
        if response.status_code== 200:
            return response.json().get("response","")
        else:
            raise Exception(f"请求失败{response.status_code}")
    

if __name__ == '__main__':
    client= OllamaClient()
    '''
    response = client.generate(
        model_name='qwen3-30b-a3b-2507-thinking:256k',
        prompt='天空为什么是蓝色？',
        options={'temperature': 0.7}
    )
    print(response)
    '''

    for chunk in client.generate(
        model_name='qwen3-30b-a3b-2507-thinking:256k',
        prompt='天空为什么是蓝色？',
        options={'temperature': 0.4},
        stream=True
    ):
        print(chunk,end='',flush=True)
            
