import requests
import json
import time
import random
from urllib.parse import quote
import mysql.connector
from typing import Dict, Optional
import logging

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class WordCrawler:
    def __init__(self, db_config: Dict):
        self.db_config = db_config
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        })

    def get_connection(self):
        """获取数据库连接"""
        return mysql.connector.connect(**self.db_config)

    def crawl_from_iciba(self, word: str) -> Optional[Dict]:
        """从爱词霸获取单词信息"""
        try:
            url = f"http://dict-co.iciba.com/api/dictionary.php?w={word}&key=0EAE08A016D6688F64AB3EBB2337BFB0&type=json"

            response = self.session.get(url, timeout=10)
            data = response.json()

            # 打印完整响应以便调试
            logger.debug(f"爱词霸响应: {json.dumps(data, ensure_ascii=False, indent=2)}")

            if 'symbols' in data and len(data['symbols']) > 0:
                symbol_data = data['symbols'][0]

                # 获取音标 - 优先美式音标，其次英式音标
                ph_am = symbol_data.get('ph_am', '')
                ph_en = symbol_data.get('ph_en', '')
                phonetic = ph_am if ph_am else ph_en
                symbol = f"[{phonetic}]" if phonetic else None

                # 获取发音 - 优先美式发音，其次英式发音
                sound_url = symbol_data.get('ph_am_mp3', '') or symbol_data.get('ph_en_mp3', '') or symbol_data.get(
                    'ph_tts_mp3', '')

                result = {
                    'name': word,
                    'symbol': symbol,
                    'sound': sound_url,
                    'desc': self._parse_meanings_fixed(symbol_data.get('parts', [])),
                    'word_type': self._extract_word_type_fixed(symbol_data.get('parts', []))
                }

                return result

        except Exception as e:
            logger.error(f"爱词霸爬取失败 {word}: {e}")
            # 打印详细错误信息
            import traceback
            logger.error(f"详细错误: {traceback.format_exc()}")

        return None

    def _parse_meanings_fixed(self, parts: list) -> str:
        """修复后的解析词性和释义方法"""
        meanings = []

        for part in parts:
            part_speech = part.get('part', '')
            part_means = part.get('means', [])

            if part_means and isinstance(part_means, list):
                # means 直接是字符串列表，不需要再取 word_mean
                mean_texts = [mean for mean in part_means if isinstance(mean, str)][:3]  # 只取前3个释义

                if mean_texts:
                    meanings.append(f"{part_speech} {', '.join(mean_texts)}")

        return '; '.join(meanings)

    def _extract_word_type_fixed(self, parts: list) -> str:
        """修复后的提取主要词性方法"""
        if not parts:
            return ''

        first_part = parts[0].get('part', '')
        return first_part

    def crawl_word_info(self, word: str) -> Optional[Dict]:
        """综合多个源爬取单词信息"""
        word = word.lower().strip()

        # 按优先级尝试不同源
        sources = [
            self.crawl_from_iciba
        ]

        for source_func in sources:
            try:
                result = source_func(word)
                if result and result.get('desc'):
                    logger.info(f"成功获取单词信息: {word}")
                    return result

                # 避免请求过快
                time.sleep(random.uniform(0.5, 1.5))

            except Exception as e:
                logger.error(f"爬取源执行失败 {word}: {e}")
                continue

        logger.warning(f"所有源都失败: {word}")
        return None

    def batch_crawl_from_file(self, file_path: str, class_id: str, class_title: str, course: str):
        """从文件批量爬取单词"""
        words_to_process = []

        # 读取单词列表
        with open(file_path, 'r', encoding='utf-8') as f:
            for line in f:
                word = line.strip().lower()
                if word and word.replace('-', '').replace("'", '').isalpha():  # 支持带连字符和撇号的单词
                    words_to_process.append(word)

        # 去重
        words_to_process = list(set(words_to_process))
        logger.info(f"准备处理 {len(words_to_process)} 个单词")

        # 批量处理
        success_count = 0
        failed_words = []

        conn = self.get_connection()
        cursor = conn.cursor()

        try:
            for i, word in enumerate(words_to_process, 1):
                try:
                    # 检查是否已存在
                    cursor.execute(
                        "SELECT id FROM word WHERE name = %s AND class_id = %s",
                        (word, class_id)
                    )

                    if cursor.fetchone():
                        logger.info(f"单词已存在，跳过: {word}")
                        continue

                    # 爬取单词信息
                    logger.info(f"[{i}/{len(words_to_process)}] 正在爬取: {word}")
                    word_info = self.crawl_word_info(word)

                    if word_info and word_info.get('desc'):
                        # 打印准备插入的数据
                        logger.info(f"准备插入数据: {word_info}")

                        # 插入数据库
                        cursor.execute("""
                            INSERT INTO word (name, symbol, sound, description, course, class_id, class_title, word_type, create_time, update_time)
                            VALUES (%s, %s, %s, %s, %s, %s, %s, %s, NOW(), NOW())
                        """, (
                            word_info['name'],
                            word_info.get('symbol', ''),
                            word_info.get('sound', ''),
                            word_info.get('desc', ''),
                            course,
                            class_id,
                            class_title,
                            word_info.get('word_type', '')
                        ))

                        # 检查影响的行数
                        affected_rows = cursor.rowcount
                        logger.info(f"SQL执行影响行数: {affected_rows}")

                        conn.commit()
                        success_count += 1
                        logger.info(
                            f"[{i}/{len(words_to_process)}] ✅ 成功插入: {word} - {word_info.get('desc', '')[:50]}...")
                    else:
                        failed_words.append(word)
                        logger.warning(f"[{i}/{len(words_to_process)}] ❌ 获取信息失败: {word}")

                    # 随机延迟，避免被封IP
                    time.sleep(random.uniform(1.5, 3.0))

                except mysql.connector.Error as db_error:
                    logger.error(f"❌ 数据库错误 {word}: {db_error}")
                    failed_words.append(word)
                    conn.rollback()
                except Exception as e:
                    logger.error(f"❌ 处理单词失败 {word}: {e}")
                    import traceback
                    logger.error(f"详细错误: {traceback.format_exc()}")
                    failed_words.append(word)
                    conn.rollback()

        finally:
            cursor.close()
            conn.close()

        logger.info(f"🎉 批量处理完成: 成功 {success_count}, 失败 {len(failed_words)}")

        # 输出失败的单词
        if failed_words:
            failed_file = f'failed_words_{class_id}.txt'
            with open(failed_file, 'w', encoding='utf-8') as f:
                for word in failed_words:
                    f.write(f"{word}\n")
            logger.info(f"失败的单词已保存到 {failed_file}")

        # 更新分类的单词数量统计
        self.update_category_word_count(class_id)

        return success_count, failed_words

    def insert_single_word(self, word: str, class_id: str, class_title: str, course: str):
        """插入单个单词到数据库"""
        try:
            # 爬取单词信息
            word_info = self.crawl_word_info(word)

            if not word_info or not word_info.get('desc'):
                logger.error(f"无法获取单词信息: {word}")
                return False

            conn = self.get_connection()
            cursor = conn.cursor()

            try:
                # 检查是否已存在
                cursor.execute(
                    "SELECT id FROM word WHERE name = %s AND class_id = %s",
                    (word, class_id)
                )

                if cursor.fetchone():
                    logger.info(f"单词已存在: {word}")
                    return False

                # 插入数据
                logger.info(f"插入单词: {word_info}")

                cursor.execute("""
                    INSERT INTO word (name, symbol, sound, description, course, class_id, class_title, word_type, create_time, update_time)
                    VALUES (%s, %s, %s, %s, %s, %s, %s, %s, NOW(), NOW())
                """, (
                    word_info['name'],
                    word_info.get('symbol', '') or '',
                    word_info.get('sound', '') or '',
                    word_info.get('desc', '') or '',
                    course,
                    class_id,
                    class_title,
                    word_info.get('word_type', '') or ''
                ))

                conn.commit()
                logger.info(f"✅ 成功插入单词: {word}")
                return True

            except mysql.connector.Error as db_error:
                logger.error(f"❌ 数据库插入失败: {db_error}")
                conn.rollback()
                return False
            finally:
                cursor.close()
                conn.close()

        except Exception as e:
            logger.error(f"❌ 插入单词异常: {e}")
            import traceback
            logger.error(f"详细错误: {traceback.format_exc()}")
            return False

    def update_category_word_count(self, class_id: str):
        """更新分类的单词数量统计"""
        try:
            conn = self.get_connection()
            cursor = conn.cursor()

            # 统计该分类下的单词数量
            cursor.execute("SELECT COUNT(*) FROM word WHERE class_id = %s", (class_id,))
            word_count = cursor.fetchone()[0]

            # 更新分类表的单词数量
            cursor.execute(
                "UPDATE word_category SET word_num = %s WHERE class_id = %s",
                (word_count, class_id)
            )

            conn.commit()
            logger.info(f"已更新分类 {class_id} 的单词数量: {word_count}")

        except Exception as e:
            logger.error(f"更新分类统计失败: {e}")
        finally:
            cursor.close()
            conn.close()

    def test_single_word(self, word: str):
        """测试单个单词的爬取"""
        logger.info(f"测试爬取单词: {word}")

        result = self.crawl_word_info(word)

        if result:
            print("=" * 50)
            print(f"单词: {result['name']}")
            print(f"音标: {result['symbol']}")
            print(f"发音: {result['sound']}")
            print(f"释义: {result['desc']}")
            print(f"词性: {result['word_type']}")
            print("=" * 50)
        else:
            print(f"爬取失败: {word}")

        return result


def main():
    """主函数 - 使用示例"""

    # 数据库配置
    db_config = {
        'host': '192.168.0.13',
        'port': 3306,
        'user': 'cfx2000',
        'password': 'cfx2000',
        'database': 'english',
        'charset': 'utf8mb4',
        'auth_plugin': 'mysql_native_password',  # 指定认证插件
        'autocommit': True,
        'connect_timeout': 30,
        'use_unicode': True
    }

    crawler = WordCrawler(db_config)

    # 从文件批量导入（取消注释使用）

    print("\n=== 从文件批量导入 ===")

    success_count, failed_words = crawler.batch_crawl_from_file(
        file_path='cet4_core.txt',
        class_id='CET4_CORE',
        class_title='四级核心词汇',
        course='1'
    )

    print(f"批量导入完成: 成功 {success_count} 个，失败 {len(failed_words)} 个")


if __name__ == "__main__":
    main()