"""
Web
"""

import streamlit as st
from src.web.components.header import Header
from src.web.components.sidebar import Sidebar

class MainPage:
    """"""
    
    def render(self):
        """"""
        # 
        Header.render()
        
        # 
        settings = Sidebar.render()
        
        # 
        col1, col2 = st.columns([2, 1])
        
        with col1:
            st.header(" ")
            st.markdown("""
            ###  
            
            1. ** ** - 
            2. ** ** - 
            3. ** ** - 
            4. ** ** - 
            5. ** ** - 
            
            ###  
            
            -  ****: 
            -  **AI**: Whisper + Ollama
            -  ****: 
            -  ****: Web
            -  ****: 
            """)
            
            # 
            if st.button(" ", type="primary", use_container_width=True):
                st.session_state.current_page = 'upload'
                st.rerun()
        
        with col2:
            st.header(" ")
            
            # 
            self._check_system_status()
            
            st.header(" ")
            
            with st.expander(" "):
                st.markdown("""
                ****: MP4, AVI, MKV, MOV, WMV, FLV, WebM
                
                ****: MP3, WAV, FLAC, AAC, OGG, M4A
                """)
            
            with st.expander(" AI"):
                st.markdown("""
                **Whisper**:
                - Tiny: 
                - Base: 
                - Large: 
                
                **Ollama**:
                - Qwen2: 
                - Llama3: 
                """)
            
            with st.expander(" "):
                st.markdown("""
                - GPU
                - 
                - 2
                """)
    
    def _check_system_status(self):
        """"""
        import torch
        import requests
        
        # GPU
        if torch.cuda.is_available():
            gpu_name = torch.cuda.get_device_name(0)
            st.success(f" GPU: {gpu_name}")
        else:
            st.info(" CPU")
        
        # Ollama
        try:
            response = requests.get("http://localhost:11434/api/tags", timeout=2)
            if response.status_code == 200:
                models = response.json().get('models', [])
                st.success(f" Ollama ({len(models)})")
            else:
                st.warning(" Ollama")
        except:
            st.error(" Ollama")
            st.markdown("Ollama: `ollama serve`")
        
        # 
        import psutil
        memory = psutil.virtual_memory()
        memory_percent = memory.percent
        
        if memory_percent < 70:
            st.success(f"  ({memory_percent:.1f}%)")
        elif memory_percent < 85:
            st.warning(f"  ({memory_percent:.1f}%)")
        else:
            st.error(f"  ({memory_percent:.1f}%)")