from flask import jsonify, request, render_template, session as flask_session
from . import search_bp
from utils.auth import login_required
import os
import json
from datetime import datetime
import numpy as np
import cv2
import onnxruntime as ort
from scipy.spatial.distance import cosine
from werkzeug.utils import secure_filename
import traceback
import urllib.request
import ssl
import requests
from skimage.feature import local_binary_pattern
from sklearn.feature_extraction.text import TfidfVectorizer

# 全局变量
ort_session = None

def download_model():
    """下载 CLIP 模型"""
    model_dir = 'models'
    model_path = os.path.join(model_dir, 'clip.onnx')
    
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
    
    if not os.path.exists(model_path):
        print("正在下载 CLIP 模型...")
        
        # 使用 huggingface 镜像站下载 CLIP 模型
        url = "https://hf-mirror.com/jmzzomg/clip-vit-base-patch32-onnx/resolve/main/model.onnx"
        
        try:
            print(f"尝试从 {url} 下载...")
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
                'Accept': '*/*',
                'Connection': 'keep-alive'
            }
            
            response = requests.get(url, headers=headers, stream=True, timeout=30, verify=False)
            response.raise_for_status()
            
            total_size = int(response.headers.get('content-length', 0))
            
            with open(model_path, 'wb') as f:
                if total_size == 0:
                    f.write(response.content)
                else:
                    downloaded = 0
                    for data in response.iter_content(chunk_size=8192):
                        downloaded += len(data)
                        f.write(data)
                        done = int(50 * downloaded / total_size)
                        print(f"\r下载进度: [{'=' * done}{' ' * (50-done)}] {downloaded}/{total_size} bytes", end='')
            
            print("\n下载完成!")
            
            # 验证文件
            if os.path.getsize(model_path) < 100000:  # 小于100KB可能不是完整的模型文件
                print("下载的文件太小，可能不是完整的模型文件")
                os.remove(model_path)
                raise Exception("模型文件不完整")
            
            # 验证模型文件
            try:
                ort.InferenceSession(model_path)
                print("模型验证成功!")
            except Exception as e:
                print(f"模型验证失败: {str(e)}")
                os.remove(model_path)
                raise
                
        except Exception as e:
            print(f"模型下载或验证失败: {str(e)}")
            if os.path.exists(model_path):
                os.remove(model_path)
            raise
    
    return model_path

# 初始化模型
def init_model():
    global ort_session
    try:
        model_path = download_model()
        ort_session = ort.InferenceSession(model_path)
    except Exception as e:
        print(f"模型初始化失败: {str(e)}")
        raise

# 初始化模型
init_model()

def extract_features(input_data, mode='image'):
    """使用 CLIP 提取特征"""
    try:
        if mode == 'image':
            # 预处理图像
            if isinstance(input_data, str):
                img = cv2.imread(input_data)
                if img is None:
                    return None
            else:
                img = input_data
                
            # CLIP 图像预处理
            img = cv2.resize(img, (224, 224))
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img = img.astype(np.float32) / 255.0
            mean = np.array([0.48145466, 0.4578275, 0.40821073])
            std = np.array([0.26862954, 0.26130258, 0.27577711])
            img = (img - mean) / std
            img = np.transpose(img, (2, 0, 1))
            img = np.expand_dims(img, axis=0)
            
            # 获取图像特征
            inputs = {
                'input_ids': np.zeros((1, 77), dtype=np.int64),
                'pixel_values': img.astype(np.float32),
                'attention_mask': np.ones((1, 77), dtype=np.int64)
            }
            
            # 获取输出名称
            output_name = 'image_embeds'  # 使用固定的输出名称
            
            # 运行推理
            features = ort_session.run([output_name], inputs)[0]
            
        else:  # text mode
            if not isinstance(input_data, str):
                return None
                
            # 使用简单的分词
            tokens = input_data.lower().split()
            tokens = tokens[:75]  # 限制长度
            tokens = ['[CLS]'] + tokens + ['[SEP]']
            tokens = tokens + ['[PAD]'] * (77 - len(tokens))
            
            # 转换为输入格式
            input_ids = np.array([[1] + [i + 2 for i in range(len(tokens) - 2)] + [1]], dtype=np.int64)
            attention_mask = np.array([[1] * len(tokens)], dtype=np.int64)
            
            inputs = {
                'input_ids': input_ids,
                'pixel_values': np.zeros((1, 3, 224, 224), dtype=np.float32),
                'attention_mask': attention_mask
            }
            
            # 获取输出名称
            output_name = 'text_embeds'  # 使用固定的输出名称
            
            # 运行推理
            features = ort_session.run([output_name], inputs)[0]
        
        # L2 归一化
        features = features / (np.linalg.norm(features, axis=-1, keepdims=True) + 1e-8)
        return features.astype(np.float32)
        
    except Exception as e:
        print(f"特征提取错误: {str(e)}")
        traceback.print_exc()
        return None

def compute_similarity(features1, features2, mode='text'):
    """计算特征相似度
    mode: 'text' 或 'image'，指定相似度计算方式
    """
    try:
        if features1 is None or features2 is None:
            return 0.0
        
        if mode == 'image':
            # 使用L2距离计算图片相似度
            l2_distance = np.linalg.norm(features1 - features2)
            # 将距离转换为相似度分数
            max_distance = 10.0  # 根据实际特征分布调整
            similarity = max(0, 1 - l2_distance / max_distance)
        else:
            # 文本相似度使用余弦相似度
            features1 = features1 / (np.linalg.norm(features1, axis=-1, keepdims=True) + 1e-8)
            features2 = features2 / (np.linalg.norm(features2, axis=-1, keepdims=True) + 1e-8)
            similarity = np.sum(features1 * features2)
            similarity = (similarity + 1) / 2  # 从[-1,1]映射到[0,1]
        
        # 确保相似度在[0,1]范围内
        similarity = np.clip(similarity, 0, 1)
        
        return float(similarity)
    except Exception as e:
        print(f"相似度计算错误: {str(e)}")
        traceback.print_exc()
        return 0.0

@search_bp.route('/')
@login_required
def index():
    """多模态检索页面"""
    return render_template('search/index.html',
                         username=flask_session.get('username'))

@search_bp.route('/api/search', methods=['POST'])
@login_required
def search():
    """执行搜索"""
    try:
        search_type = request.form.get('search_type', 'text')
        query = request.form.get('query', '')
        similarity_threshold = float(request.form.get('similarity_threshold', 50)) / 100
        
        # 从文件读取所有文档
        data_file = os.path.join('workspace', 'data', 'documents.json')
        if not os.path.exists(data_file):
            return jsonify({
                'status': 'error',
                'message': '没有可搜索的文档'
            })

        with open(data_file, 'r', encoding='utf-8') as f:
            docs = json.load(f)

        results = []
        similarities = []

        # 根据搜索类型执行不同的搜索策略
        if search_type == 'text2text':  # 文搜文
            # 过滤出文本文档
            text_docs = [doc for doc in docs if doc['tags'].get('type') == 'text']
            
            if text_docs and query:
                vectorizer = TfidfVectorizer(
                    min_df=1, max_df=1.0,
                    analyzer='word',
                    token_pattern=r'(?u)\b\w+\b'
                )
                
                contents = [doc.get('content', '') for doc in text_docs]
                try:
                    tfidf_matrix = vectorizer.fit_transform([query] + contents)
                    similarities = (tfidf_matrix[0] * tfidf_matrix[1:].T).toarray()[0]
                    results = text_docs
                except Exception as e:
                    print(f"TF-IDF计算错误: {str(e)}")

        elif search_type == 'text2image':  # 文搜图
            # 过滤出图片文档
            image_docs = [doc for doc in docs if doc['tags'].get('type') == 'image']
            
            if image_docs and query:
                # 提取查询文本的特征
                query_features = extract_features(query, mode='text')
                if query_features is not None:
                    for doc in image_docs:
                        doc_path = os.path.join('static', doc.get('file_path', ''))
                        if os.path.exists(doc_path):
                            # 提取图片特征
                            image_features = extract_features(doc_path, mode='image')
                            if image_features is not None:
                                similarity = compute_similarity(query_features, image_features)
                                results.append(doc)
                                similarities.append(similarity)

        elif search_type == 'image2image':  # 图搜图
            if 'image' not in request.files:
                return jsonify({
                    'status': 'error',
                    'message': '请选择图片'
                })
            
            image_file = request.files['image']
            if not image_file:
                return jsonify({
                    'status': 'error',
                    'message': '无效的图片'
                })
            
            # 过滤出图片文档
            image_docs = [doc for doc in docs if doc['tags'].get('type') == 'image']
            
            # 保存并处理查询图片
            query_filename = os.path.join('static', 'uploads', 'query_' + secure_filename(image_file.filename))
            os.makedirs(os.path.dirname(query_filename), exist_ok=True)
            image_file.save(query_filename)
            
            try:
                # 提取查询图片特征
                query_features = extract_features(query_filename, mode='image')
                if query_features is not None:
                    for doc in image_docs:
                        doc_path = os.path.join('static', doc.get('file_path', ''))
                        if os.path.exists(doc_path):
                            # 提取图片特征
                            doc_features = extract_features(doc_path, mode='image')
                            if doc_features is not None:
                                # 使用L2距离计算相似度
                                similarity = compute_similarity(query_features, doc_features, mode='image')
                                results.append(doc)
                                similarities.append(similarity)
            finally:
                # 清理查询图片
                if os.path.exists(query_filename):
                    os.remove(query_filename)

        # 构建搜索结果
        matched_results = []
        for doc, similarity in zip(results, similarities):
            if similarity >= similarity_threshold:
                file_path = doc.get('file_path', '')
                if file_path:
                    file_path = file_path.replace('\\', '/')
                    if file_path.startswith('static/'):
                        file_path = file_path[7:]
                    elif file_path.startswith('/static/'):
                        file_path = file_path[8:]

                matched_results.append({
                    'id': doc['id'],
                    'text': doc['content'],
                    'uri': file_path,
                    'type': doc['tags'].get('type', 'other'),
                    'category': doc['tags'].get('category', ''),
                    'tags': doc['tags'].get('tags', []),
                    'similarity': float(similarity)
                })

        # 按相似度排序
        matched_results.sort(key=lambda x: x['similarity'], reverse=True)

        return jsonify({
            'status': 'success',
            'results': matched_results
        })

    except Exception as e:
        print(f'搜索错误: {str(e)}')
        traceback.print_exc()
        return jsonify({
            'status': 'error',
            'message': str(e)
        })

@search_bp.route('/text', methods=['POST'])
@login_required
def text_search():
    """文本搜索（支持搜索图片、文本、视频）"""
    try:
        data = request.get_json()
        query_text = data.get('query')
        target_type = data.get('target_type', 'all')
        search_mode = data.get('search_mode', 'semantic')
        similarity_threshold = float(data.get('similarity_threshold', 0.5))
        
        if not query_text:
            return jsonify({'status': 'error', 'message': '请输入搜索文本'})
        
        # 创建查询文档
        doc = Document(
            text=query_text,
            tags={
                'target_type': target_type,
                'search_mode': search_mode,
                'similarity_threshold': similarity_threshold
            }
        )
        
        # 执行搜索
        start_time = time.time()
        results = get_flow().post('/search', 
                                inputs=doc,
                                parameters={
                                    'limit': 20,
                                    'target_type': target_type,
                                    'search_mode': search_mode,
                                    'similarity_threshold': similarity_threshold
                                })
        search_time = int((time.time() - start_time) * 1000)  # 转换为毫秒
        
        # 格式化结果
        formatted_results = format_search_results(results)
        
        return jsonify({
            'status': 'success',
            'results': formatted_results,
            'search_time': search_time
        })
    
    except Exception as e:
        print(f'文本搜索错误: {str(e)}')
        return jsonify({'status': 'error', 'message': str(e)})

@search_bp.route('/image', methods=['POST'])
@login_required
def image_search():
    """图片搜索（支持搜索其他内容）"""
    try:
        if 'query_image' not in request.files:
            return jsonify({'status': 'error', 'message': '请选择图片'})
        
        # 获取目标类型
        target_type = request.form.get('target_type', 'all')  # all, image, text, video
        similarity_threshold = float(request.form.get('similarity_threshold', 0.5))
        
        file = request.files['query_image']
        if not file:
            return jsonify({'status': 'error', 'message': '无效的图片'})
        
        # 验证文件类型
        if not file.filename.lower().endswith(('.png', '.jpg', '.jpeg', '.gif')):
            return jsonify({'status': 'error', 'message': '不支持的图片格式'})
        
        # 保存查询图片
        filename = os.path.join('static', 'uploads', 'query_' + secure_filename(file.filename))
        os.makedirs(os.path.dirname(filename), exist_ok=True)
        file.save(filename)
        
        # 创建查询文档
        doc = Document(
            uri=filename,
            tags={
                'target_type': target_type,
                'similarity_threshold': similarity_threshold
            }
        )
        
        # 执行搜索
        start_time = time.time()
        results = get_flow().post('/search',
                                inputs=doc,
                                parameters={
                                    'limit': 20,
                                    'target_type': target_type,
                                    'similarity_threshold': similarity_threshold
                                })
        search_time = int((time.time() - start_time) * 1000)
        
        # 格式化结果
        formatted_results = format_search_results(results)
        
        return jsonify({
            'status': 'success',
            'results': formatted_results,
            'search_time': search_time
        })
    
    except Exception as e:
        print(f'图片搜索错误: {str(e)}')
        return jsonify({'status': 'error', 'message': str(e)})

def format_search_results(results):
    """格式化搜索结果"""
    formatted_results = []
    if results and len(results) > 0 and results[0].matches:
        for match in results[0].matches:
            result = {
                'id': match.id,
                'text': match.text,
                'type': match.tags.get('type', 'unknown'),
                'similarity': float(match.scores['cosine'].value) if match.scores else 0
            }
            
            # 添加媒体 URL
            if '_uri' in match.tags:
                result['media_url'] = match.tags['_uri']
            
            # 添加其他元数据
            result.update({
                'category': match.tags.get('category', ''),
                'created_at': match.tags.get('created_at', ''),
                'tags': match.tags.get('tags', [])
            })
            
            formatted_results.append(result)
    
    return formatted_results

def compute_text_embedding(text):
    """计算文本的特征向量"""
    vectorizer = TfidfVectorizer(max_features=128)
    return vectorizer.fit_transform([text]).toarray()[0]

def compute_image_embedding(image_path):
    """计算图片的特征向量"""
    img = Image.open(image_path)
    tensor = np.array(img)
    return np.mean(tensor, axis=(0,1)) 