"""
Web
"""

import streamlit as st
import json
from src.core.ollama_client import OllamaClient
from src.web.components.header import Header

class SettingsPage:
    """"""
    
    def render(self):
        """"""
        # 
        Header.render()
        
        st.header(" ")
        
        # 
        tab1, tab2, tab3, tab4 = st.tabs([" ", " ", " ", " "])
        
        with tab1:
            self._render_model_settings()
        
        with tab2:
            self._render_ui_settings()
        
        with tab3:
            self._render_file_settings()
        
        with tab4:
            self._render_advanced_settings()
        
        # 
        st.markdown("---")
        
        col1, col2, col3, col4 = st.columns(4)
        
        with col1:
            if st.button(" ", type="primary", use_container_width=True):
                self._save_settings()
                st.success(" ")
        
        with col2:
            if st.button(" ", use_container_width=True):
                self._reset_settings()
                st.success(" ")
                st.rerun()
        
        with col3:
            if st.button(" ", use_container_width=True):
                self._export_settings()
        
        with col4:
            if st.button(" ", use_container_width=True):
                if 'result' in st.session_state:
                    st.session_state.current_page = 'result'
                else:
                    st.session_state.current_page = 'main'
                st.rerun()
    
    def _render_model_settings(self):
        """"""
        st.subheader(" AI")
        
        col1, col2 = st.columns(2)
        
        with col1:
            st.markdown("#### Whisper")
            
            model_info = {
                'tiny': {'size': '39MB', 'speed': '', 'accuracy': ''},
                'base': {'size': '74MB', 'speed': '', 'accuracy': ''},
                'small': {'size': '244MB', 'speed': '', 'accuracy': ''},
                'medium': {'size': '769MB', 'speed': '', 'accuracy': ''},
                'large': {'size': '1550MB', 'speed': '', 'accuracy': ''}
            }
            
            selected_whisper = st.selectbox(
                "Whisper",
                options=list(model_info.keys()),
                index=list(model_info.keys()).index(st.session_state.settings['whisper_model'])
            )
            
            # 
            info = model_info[selected_whisper]
            st.info(f" : {info['size']} |  : {info['speed']} |  : {info['accuracy']}")
            
            st.session_state.settings['whisper_model'] = selected_whisper
        
        with col2:
            st.markdown("#### Ollama")
            # 动态检测本地 Ollama 模型
            host = st.session_state.settings.get('ollama_host', 'localhost')
            port = st.session_state.settings.get('ollama_port', 11434)
            try:
                client = OllamaClient(base_url=f"http://{host}:{port}")
                models = client.list_models()
                names = [m.get('name') for m in models if m.get('name')]
            except Exception:
                names = []

            if names:
                current = st.session_state.settings.get('ollama_model')
                selected_ollama = st.selectbox(
                    "Ollama",
                    options=names,
                    index=names.index(current) if current in names else 0
                )
                st.session_state.settings['ollama_model'] = selected_ollama
                st.info(f" {selected_ollama}")
            else:
                st.warning(" 未检测到 Ollama 模型，请先运行 `ollama serve` 并拉取模型")
        
        # Ollama
        st.markdown("#### Ollama")
        
        col_a, col_b = st.columns(2)
        
        with col_a:
            ollama_host = st.text_input(
                "Ollama",
                value=st.session_state.settings.get('ollama_host', 'localhost'),
                help="Ollama"
            )
            st.session_state.settings['ollama_host'] = ollama_host
        
        with col_b:
            ollama_port = st.number_input(
                "Ollama",
                value=st.session_state.settings.get('ollama_port', 11434),
                min_value=1,
                max_value=65535,
                help="Ollama"
            )
            st.session_state.settings['ollama_port'] = ollama_port
    
    def _render_ui_settings(self):
        """"""
        st.subheader(" ")
        
        col1, col2 = st.columns(2)
        
        with col1:
            st.markdown("#### ")
            
            theme = st.selectbox(
                "",
                options=['auto', 'light', 'dark'],
                index=['auto', 'light', 'dark'].index(st.session_state.settings.get('theme', 'auto')),
                format_func=lambda x: {'auto': ' ', 'light': ' ', 'dark': ' '}[x]
            )
            st.session_state.settings['theme'] = theme
            
            language_ui = st.selectbox(
                "",
                options=['zh-CN', 'en-US'],
                index=['zh-CN', 'en-US'].index(st.session_state.settings.get('language_ui', 'zh-CN')),
                format_func=lambda x: {'zh-CN': ' ', 'en-US': ' English'}[x]
            )
            st.session_state.settings['language_ui'] = language_ui
        
        with col2:
            st.markdown("#### ")
            
            auto_refresh = st.checkbox(
                "",
                value=st.session_state.settings.get('auto_refresh', True),
                help=""
            )
            st.session_state.settings['auto_refresh'] = auto_refresh
            
            show_advanced = st.checkbox(
                "",
                value=st.session_state.settings.get('show_advanced', False),
                help=""
            )
            st.session_state.settings['show_advanced'] = show_advanced
    
    def _render_file_settings(self):
        """"""
        st.subheader(" ")
        
        col1, col2 = st.columns(2)
        
        with col1:
            st.markdown("#### ")
            
            output_dir = st.text_input(
                "",
                value=st.session_state.settings.get('output_dir', './output'),
                help=""
            )
            st.session_state.settings['output_dir'] = output_dir
            
            filename_template = st.text_input(
                "",
                value=st.session_state.settings.get('filename_template', '{name}_{timestamp}'),
                help=": {name}, {timestamp}, {format}"
            )
            st.session_state.settings['filename_template'] = filename_template
        
        with col2:
            st.markdown("#### ")
            
            temp_dir = st.text_input(
                "",
                value=st.session_state.settings.get('temp_dir', './temp'),
                help=""
            )
            st.session_state.settings['temp_dir'] = temp_dir
            
            auto_cleanup = st.checkbox(
                "",
                value=st.session_state.settings.get('auto_cleanup', True),
                help=""
            )
            st.session_state.settings['auto_cleanup'] = auto_cleanup
    
    def _render_advanced_settings(self):
        """"""
        st.subheader(" ")
        
        col1, col2 = st.columns(2)
        
        with col1:
            st.markdown("#### ")
            
            device = st.selectbox(
                "",
                options=['auto', 'cpu', 'cuda'],
                index=['auto', 'cpu', 'cuda'].index(st.session_state.settings.get('device', 'auto')),
                format_func=lambda x: {'auto': ' ', 'cpu': ' CPU', 'cuda': ' GPU (CUDA)'}[x]
            )
            st.session_state.settings['device'] = device
            
            max_workers = st.slider(
                "",
                min_value=1,
                max_value=8,
                value=st.session_state.settings.get('max_workers', 2),
                help=""
            )
            st.session_state.settings['max_workers'] = max_workers
            
            chunk_duration = st.slider(
                "",
                min_value=300,
                max_value=1800,
                value=st.session_state.settings.get('chunk_duration', 600),
                step=300,
                help=""
            )
            st.session_state.settings['chunk_duration'] = chunk_duration
        
        with col2:
            st.markdown("#### ")
            
            log_level = st.selectbox(
                "",
                options=['DEBUG', 'INFO', 'WARNING', 'ERROR'],
                index=['DEBUG', 'INFO', 'WARNING', 'ERROR'].index(st.session_state.settings.get('log_level', 'INFO'))
            )
            st.session_state.settings['log_level'] = log_level
            
            save_intermediate = st.checkbox(
                "",
                value=st.session_state.settings.get('save_intermediate', False),
                help=""
            )
            st.session_state.settings['save_intermediate'] = save_intermediate
            
            enable_cache = st.checkbox(
                "",
                value=st.session_state.settings.get('enable_cache', True),
                help=""
            )
            st.session_state.settings['enable_cache'] = enable_cache
    
    def _save_settings(self):
        """"""
        try:
            import json
            from pathlib import Path
            
            config_dir = Path('./config')
            config_dir.mkdir(exist_ok=True)
            
            config_file = config_dir / 'settings.json'
            
            with open(config_file, 'w', encoding='utf-8') as f:
                json.dump(st.session_state.settings, f, ensure_ascii=False, indent=2)
            
            return True
        except Exception as e:
            st.error(f": {e}")
            return False
    
    def _reset_settings(self):
        """"""
        st.session_state.settings = {
            'whisper_model': 'base',
            'ollama_model': 'qwen2:7b',
            'language': 'auto',
            'summary_style': 'detailed',
            'output_formats': ['txt', 'srt', 'json'],
            'theme': 'auto',
            'language_ui': 'zh-CN',
            'auto_refresh': True,
            'show_advanced': False,
            'output_dir': './output',
            'temp_dir': './temp',
            'filename_template': '{name}_{timestamp}',
            'auto_cleanup': True,
            'device': 'auto',
            'max_workers': 2,
            'chunk_duration': 600,
            'log_level': 'INFO',
            'save_intermediate': False,
            'enable_cache': True,
            'ollama_host': 'localhost',
            'ollama_port': 11434
        }
    
    def _export_settings(self):
        """"""
        settings_json = json.dumps(st.session_state.settings, ensure_ascii=False, indent=2)
        
        st.download_button(
            label=" ",
            data=settings_json,
            file_name="video_summary_settings.json",
            mime="application/json"
        )