#!/usr/bin/env python3

import cv2
import tensorflow as tf
from typing import Dict, List, Tuple
from urllib.parse import urlparse
from tensorflow.keras.applications.mobilenet_v2 import decode_predictions

class Note:
    def __init__(self, title: str, content: str):
        self.title = title
        self.content = content

class VideoAnalysisServer:
    def __init__(self):
        self.notes: Dict[str, Note] = {
            "1": Note("First Note", "This is note 1"),
            "2": Note("Second Note", "This is note 2")
        }
        self.model = None

    def load_model(self):
        """Load TensorFlow model for video analysis"""
        import os
        # Set correct working directory
        os.chdir('c:/Users/Administrator/Documents/Cline/MCP/video-analysis-server')
        model_path = os.path.abspath('model.h5')
        print(f"Loading model from: {model_path}")
        # Load and compile model
        self.model = tf.keras.models.load_model(model_path)
        if not self.model._is_compiled:
            self.model.compile(optimizer='adam',
                            loss='categorical_crossentropy',
                            metrics=['accuracy'])

    async def analyze_video(self, video_path: str, frame_skip: int = 5) -> List[str]:
        """Analyze video content and generate tags"""
        import os
        import time
        import json
        from concurrent.futures import ThreadPoolExecutor
        import numpy as np
        
        try:
            start_time = time.time()
            
            # Preload model in background
            if not self.model:
                with ThreadPoolExecutor() as executor:
                    model_future = executor.submit(self.load_model)
                    
            # Open video
            cap = cv2.VideoCapture(video_path)
            if not cap.isOpened():
                raise ValueError(f"无法打开视频文件: {video_path}")
                
            # Get video info
            total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            fps = cap.get(cv2.CAP_PROP_FPS)
            duration = total_frames / fps
            
            # Initialize progress tracking
            frames = []
            processed_frames = 0
            frame_counter = 0
            frame_times = []
            
            # Progress display function
            def show_progress():
                if not frame_times:
                    return
                    
                avg_time = np.mean(frame_times)
                remaining_frames = total_frames - processed_frames
                eta = remaining_frames * avg_time
                
                progress = (processed_frames / total_frames * 100)
                # Write progress to log file only
                progress_content = (f"Processing: {progress:.2f}% | "
                                  f"ETA: {eta:.1f}s | "
                                  f"Avg time/frame: {avg_time:.3f}s | "
                                  f"Elapsed: {time.time()-start_time:.1f}s\n")
                
                os.makedirs("logs", exist_ok=True)
                with open(f"logs/progress_{int(start_time)}.log", "a") as f:
                    f.write(progress_content)

            # Process frames
            while True:
                ret, frame = cap.read()
                if not ret:
                    break
                    
                # Skip frames
                frame_counter += 1
                if frame_counter % frame_skip != 0:
                    continue
                    
                # Ensure model is loaded
                if not self.model:
                    model_future.result()
                    
                # Process frame
                frame_start = time.time()
                
                # Resize and normalize
                resized_frame = cv2.resize(frame, (224, 224))
                tensor = tf.convert_to_tensor(resized_frame, dtype=tf.float32)
                normalized = tensor / 255.0
                batched = tf.expand_dims(normalized, axis=0)
                
                # Predict
                prediction = self.model.predict(batched, verbose=0)
                decoded_predictions = decode_predictions(prediction, top=1)[0]
                predicted_class = decoded_predictions[0][1]  # Use the human-readable label
                
                # Update tracking
                frames.append(predicted_class)
                processed_frames += 1
                frame_times.append(time.time() - frame_start)
                
                # Show progress every 10 frames
                if processed_frames % 10 == 0:
                    show_progress()

            cap.release()

            if not frames:
                return {
                    "status": "success",
                    "message": "No frames processed",
                    "tags": [],
                    "statistics": {}
                }
                
            unique_classes = list(set(frames))
            class_counts = {cls: frames.count(cls) for cls in unique_classes}

            total_time = time.time() - start_time

            result = {
                "status": "success",
                "message": f"Video analysis completed in {total_time:.2f} seconds",
                "tags": unique_classes,
                "statistics": class_counts
            }
            print(json.dumps(result, indent=2))
            return result
        except Exception as e:
            print(f"Error during video analysis: {str(e)}")
            import traceback
            traceback.print_exc()
            raise

    def list_notes(self):
        """List all notes as resources"""
        return [
            {
                "uri": f"note:///{id}",
                "mime_type": "text/plain",
                "name": note.title,
                "description": f"A text note: {note.title}"
            }
            for id, note in self.notes.items()
        ]

    def read_note(self, uri: str):
        """Read a specific note"""
        url = urlparse(uri)
        note_id = url.path.lstrip('/')
        note = self.notes.get(note_id)
        
        if not note:
            raise ValueError(f"Note {note_id} not found")

        return {
            "uri": uri,
            "mime_type": "text/plain",
            "text": note.content
        }

    def create_note(self, title: str, content: str):
        """Create a new note"""
        if not title or not content:
            raise ValueError("Title and content are required")

        note_id = str(len(self.notes) + 1)
        self.notes[note_id] = Note(title, content)
        return f"Created note {note_id}: {title}"

    async def analyze_video_tool(self, video_path: str):
        """Analyze video and store results as a note"""
        if not video_path:
            raise ValueError("Video path is required")

        try:
            tags = await self.analyze_video(video_path)
            note_id = str(len(self.notes) + 1)
            self.notes[note_id] = Note(
                f"Video Analysis {note_id}",
                f"Video Path: {video_path}\nTags: {', '.join(tags)}"
            )

            return f"Video analysis completed. Generated tags: {', '.join(tags)}\nResults stored as note {note_id}"
        except Exception as e:
            raise ValueError(f"Video analysis failed: {str(e)}")

    def summarize_notes(self):
        """Generate a summary of all notes"""
        embedded_notes = [
            {
                "type": "resource",
                "resource": {
                    "uri": f"note:///{id}",
                    "mime_type": "text/plain",
                    "text": note.content
                }
            }
            for id, note in self.notes.items()
        ]

        return {
            "messages": [
                {
                    "role": "user",
                    "content": {
                        "type": "text",
                        "text": "Please summarize the following notes:"
                    }
                },
                *[
                    {
                        "role": "user",
                        "content": note
                    }
                    for note in embedded_notes
                ],
                {
                    "role": "user",
                    "content": {
                        "type": "text",
                        "text": "Provide a concise summary of all the notes above."
                    }
                }
            ]
        }

from aiohttp import web

async def list_notes_handler(request):
    server = request.app['server']
    notes = server.list_notes()
    return web.json_response(notes)

async def read_note_handler(request):
    server = request.app['server']
    uri = request.match_info.get('uri')
    try:
        note = server.read_note(uri)
        return web.json_response(note)
    except ValueError as e:
        return web.json_response({'error': str(e)}, status=404)

async def create_note_handler(request):
    server = request.app['server']
    data = await request.json()
    try:
        result = server.create_note(data['title'], data['content'])
        return web.json_response({'message': result})
    except ValueError as e:
        return web.json_response({'error': str(e)}, status=400)

async def analyze_video_handler(request):
    server = request.app['server']
    try:
        # Check if direct file path is provided
        data = await request.json()
        if 'video_path' in data:
            result = await server.analyze_video_tool(data['video_path'])
            return web.json_response({'message': result})
        
        # Handle file upload
        reader = await request.multipart()
        field = await reader.next()
        
        if field.name != 'videoFile':
            return web.json_response({'error': 'Invalid field name'}, status=400)
        
        # Save uploaded file
        filename = f'uploads/{field.filename}'
        os.makedirs('uploads', exist_ok=True)
        
        with open(filename, 'wb') as f:
            while True:
                chunk = await field.read_chunk()
                if not chunk:
                    break
                f.write(chunk)
        
        result = await server.analyze_video_tool(filename)
        return web.json_response({'message': result})
    except Exception as e:
        return web.json_response({'error': str(e)}, status=500)

async def analyze_video_html_handler(request):
    server = request.app['server']
    try:
        data = await request.json()
        if 'video_path' not in data:
            return web.json_response({'error': 'video_path is required'}, status=400)
        
        result = await server.analyze_video_tool(data['video_path'])
        # Get tags directly from the result
        tags = result.get('tags', [])
        
        html_content = f"""
        <!DOCTYPE html>
        <html>
        <head>
            <title>Video Analysis Result</title>
            <style>
                body {{ font-family: Arial, sans-serif; padding: 20px; }}
                h1 {{ color: #333; }}
                .tags {{ display: flex; flex-wrap: wrap; gap: 10px; }}
                .tag {{ background: #f0f0f0; padding: 10px; border-radius: 5px; }}
            </style>
        </head>
        <body>
            <h1>Video Analysis Result</h1>
            <div class="tags">
                {"".join([f'<div class="tag">{tag}</div>' for tag in tags])}
            </div>
        </body>
        </html>
        """
        
        return web.Response(text=html_content, content_type='text/html')
    except Exception as e:
        return web.json_response({'error': str(e)}, status=500)

async def summarize_notes_handler(request):
    server = request.app['server']
    try:
        summary = server.summarize_notes()
        return web.json_response(summary)
    except Exception as e:
        return web.json_response({'error': str(e)}, status=500)

async def init_app():
    app = web.Application()
    app['server'] = VideoAnalysisServer()
    
    app.router.add_get('/', lambda request: web.Response(text="Welcome to Video Analysis Server"))
    app.router.add_get('/notes', list_notes_handler)
    app.router.add_get('/notes/{uri}', read_note_handler)
    app.router.add_post('/notes', create_note_handler)
    app.router.add_post('/analyze', analyze_video_handler)
    app.router.add_post('/analyze/html', analyze_video_html_handler)
    app.router.add_get('/summarize', summarize_notes_handler)
    
    return app

async def main():
    app = await init_app()
    runner = web.AppRunner(app)
    await runner.setup()
    site = web.TCPSite(runner, 'localhost', 8082)
    await site.start()
    print("Server started at http://localhost:8080")
    
    # Run forever
    await asyncio.Event().wait()

if __name__ == "__main__":
    import sys
    import asyncio
    
    if len(sys.argv) == 2:
        # Direct video analysis mode
        analyzer = VideoAnalysisServer()
        try:
            asyncio.run(analyzer.analyze_video_tool(sys.argv[1]))
        except Exception as e:
            print(f"Error: {str(e)}")
            sys.exit(1)
    else:
        # Server mode
        asyncio.run(main())