# -*- coding: utf-8 -*-
import psutil
from django.core.management.base import BaseCommand
from django.conf import settings
import os
import csv
import time
import numpy as np
import dashscope
import socket
from http import HTTPStatus
from DrissionPage import WebPage, ChromiumOptions
from crawler.models import HotWord
from django.utils import timezone

# 阿里云千问API配置
DEFAULT_API_KEY = "sk-204590a345b64fa9bfa933a50d7558c8"

# 分类配置参数 - 降低阈值以提高分类成功率
CLASSIFICATION_THRESHOLD = 0.1  # 从0.4降低到0.3
API_RETRY_TIMES = 3  # API调用重试次数
API_RETRY_DELAY = 2  # API重试延迟(秒)

# 优化后的分类类别 - 增强描述的区分度和关键词密度
CATEGORIES = {
    "娱乐": "明星、跳舞、唱歌、演唱会、金鸡奖、女配、女主、男配、男主、电影、综艺、红毯、饭圈、热搜、选秀、恋情塌房、八卦绯闻、演员、歌手、男团、女团、内娱、韩娱、电视剧、网剧票房综艺、爱豆、粉丝应援、影视金曲",
    "科技": "AI大模型、芯片制造、智能手机、笔记本电脑、自动驾驶、SpaceX、量子计算、iOS安卓系统、4G5G6G通信、元宇宙区块链机器人、无人机技术、VRAR设备、新能源科技",
    "体育": "足球世界杯、篮球NBA、CBA联赛、梅西C罗、奥运会、电竞S赛、世锦赛冠军、比赛直播、运动员转会、球队战绩、体育彩票、健身教练、运动装备",
    "财经": "股市行情、A股港股美股、美联储政策、汇率波动、基金理财、房企债务比特币、GDP增速、油价调整、关税政策、通胀数据降息加息、银行理财投资技巧",
    "社会": "打人事件、山火救援、高考分数线、高铁晚点、政策解读、退休年龄、网红直播、城管执法、社会冲突、事故现场、维权事件、就业数据、职场压力、性别议题婚姻家庭",
    "健康": "新冠疫情、疫苗接种减肥方法、医保政策猝死预防、癌症筛查、抑郁症、症状自查医院挂号、医生建议、药品价格、养生常识、健身教程、饮食健康睡眠质量",
    "教育": "双减政策、考研人数、学区房价格、清北录取、高考作文、数学竞赛、校园霸凌、在线教育平台、学校排名教师工资学生减负作业改革、考试大纲、录取通知书毕业典礼",
    "美食": "烧烤夜市、奶茶新品、预制菜争议、米其林餐厅、山姆会员店草莓季、大学食堂、火锅底料、外卖优惠、食材选购、烹饪技巧、菜谱教程、地方小吃网红零食",
    "旅游": "民宿推荐、堵车路况、特种兵旅游免签国家专列旅游迪士尼攻略、酒店优惠景区门票、机票打折旅游攻略打卡景点假期出行、城市漫游、旅行vlog",
    "游戏": "原神新版本王者荣耀活动LOL赛事Steam折扣网易暴雪游戏电竞战队防沉迷政策、版本更新英雄调整皮肤上架、电竞赛事外挂举报手游端游主机游戏",
    "国际": "美国大选、俄乌冲突、联合国会议G7峰会一带一路、各国签证政策、国际关系、国际组织全球经济气候峰会、外交动态、海外疫情、跨国企业、难民危机",
    "天气": "暴雨预警、高温橙色预警、台风路径、沙尘暴、寒潮蓝色预警、暴雪预警、季节交替、气候异常、厄尔尼诺、拉尼娜、极端天气、天气预报空气质量",
    "军事": "国防白皮书、航母下水、导弹试射、军演动态、武器装备、退伍军人政策、国际军备竞赛军事科技、边防巡逻、军事演习、军工企业、战争冲突",
    "文化": "非遗传承传统节日、文化遗产博物馆展览、考古发现、文学作品影视改编、艺术展览民族习俗、文化交流汉服复兴国潮品牌、文化政策",
    "汽车": "新能源汽车电动车续航、自动驾驶技术、汽车召回油价调整、车展新车发布特斯拉比亚迪、汽车保养、充电桩建设政策补贴、汽车销量",
    "国内": "全国两会、政府工作报告、部委新规、五年规划、乡村振兴、共同富裕、南水北调、西电东送、GDP数据、CPI数据、PMI指数、央企动态、国产大飞机、国产航母、国产芯片、国产操作系统、数字人民币、雄安新区、成渝双城经济圈、长三角一体化、粤港澳大湾区、京津冀协同发展、自贸试验区、进博会、广交会、服贸会、消费扶贫、新基建、东数西算、碳达峰碳中和、长江黄河生态保护、三北防护林",
}


def kill_process_on_port(port):
    """终止占用指定端口的进程"""
    try:
        for proc in psutil.process_iter(['pid', 'name']):
            try:
                connections = proc.net_connections()
                for conn in connections:
                    if conn.laddr.port == port:
                        print(f"发现端口 {port} 被进程 {proc.info['name']} (PID: {proc.info['pid']}) 占用，正在终止...")
                        proc.kill()
                        time.sleep(2)
                        return True
            except (psutil.AccessDenied, psutil.ZombieProcess):
                pass
        return False
    except Exception as e:
        print(f"检查端口占用时出错: {e}")
        return False


def find_available_port(start_port=9222, max_attempts=10):
    """查找可用的端口号"""
    for port in range(start_port, start_port + max_attempts):
        with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
            try:
                s.bind(('localhost', port))
                return port
            except OSError:
                if kill_process_on_port(port):
                    try:
                        s.bind(('localhost', port))
                        return port
                    except OSError:
                        continue
    return None


class QwenAPI:
    def __init__(self, api_key=None):
        self.api_key = api_key or os.getenv("DASHSCOPE_API_KEY") or DEFAULT_API_KEY
        if not self.api_key:
            raise ValueError("未找到有效的API密钥，请检查配置")

    def get_embedding(self, text):
        """获取文本的嵌入向量带重试机制"""
        for attempt in range(API_RETRY_TIMES):
            try:
                response = dashscope.TextEmbedding.call(
                    model=dashscope.TextEmbedding.Models.text_embedding_v1,
                    input=text,
                    api_key=self.api_key
                )
                if response.status_code == HTTPStatus.OK:
                    return response.output["embeddings"][0]["embedding"]
                else:
                    print(f"获取文本嵌入向量失败(尝试 {attempt + 1}/{API_RETRY_TIMES}): {response.message}")
                    if attempt < API_RETRY_TIMES - 1:
                        time.sleep(API_RETRY_DELAY)
            except Exception as e:
                print(f"获取文本嵌入向量异常(尝试 {attempt + 1}/{API_RETRY_TIMES}): {e}")
                if attempt < API_RETRY_TIMES - 1:
                    time.sleep(API_RETRY_DELAY)
        return None


def cosine_similarity(vec1, vec2):
    """计算两个向量余弦相似度"""
    v1 = np.array(vec1)
    v2 = np.array(vec2)

    # 增加输入验证
    if len(v1) == 0 or len(v2) == 0:
        print("余弦相似度计算失败: 输入向量为空")
        return 0
    if len(v1) != len(v2):
        print(f"余弦相似度计算失败: 向量长度不匹配 ({len(v1)} vs {len(v2)})")
        return 0
    if np.all(v1 == 0) or np.all(v2 == 0):
        print("余弦相似度计算失败: 输入向量为零向量")
        return 0

    dot_product = np.dot(v1, v2)
    norm_v1 = np.linalg.norm(v1)
    norm_v2 = np.linalg.norm(v2)

    if norm_v1 == 0 or norm_v2 == 0:
        print("余弦相似度计算失败: 向量范数为零")
        return 0

    return dot_product / (norm_v1 * norm_v2)


def get_category_keywords():
    """提取每个类别关键词列表使用更丰富关键词集"""
    category_keywords = {}
    for category, keywords_str in CATEGORIES.items():
        # 提取描述中关键词（按逗号分割）
        keywords = [kw.strip() for kw in keywords_str.split(',') if kw.strip()]
        category_keywords[category] = keywords
        print(f"为 {category}类别提取到 {len(keywords)}个关键词")
    return category_keywords


def keyword_based_classify(keyword, category_keywords):
    """改进基于关键词匹配备用分类方法带详细日志"""
    keyword_lower = keyword.lower()
    max_matches = 0
    best_category = "未分类"
    match_details = {}

    for category, keywords in category_keywords.items():
        matches = []
        for kw in keywords:
            if kw.lower() in keyword_lower:
                matches.append(kw)

        match_count = len(matches)
        match_details[category] = {'count': match_count, 'matches': matches}

        if match_count > max_matches:
            max_matches = match_count
            best_category = category

    # 打印匹配详情便于调试
    print(f"[关键词匹配调试] {keyword}:")
    for category, details in match_details.items():
        if details['count'] > 0:
            print(f"  {category}: {details['count']}个匹配 ({', '.join(details['matches'])})")

    return best_category if max_matches > 0 else "未分类"


def get_category_embeddings(qwen_api):
    """获取所有类别嵌入向量带详细日志"""
    category_embeddings = {}
    print("正在获取分类类别嵌入向量...")

    for category, keywords_str in CATEGORIES.items():
        success = False
        # 使用类别名称+关键词作为嵌入文本提高区分度
        embedding_text = f"{category}: {keywords_str}"
        for attempt in range(API_RETRY_TIMES):
            embedding = qwen_api.get_embedding(embedding_text)
            if embedding:
                category_embeddings[category] = embedding
                print(f"成功获取 '{category}' 类别嵌入向量 (维度: {len(embedding)})")
                success = True
                break
            else:
                print(f"获取 '{category}' 类别嵌入向量失败(尝试 {attempt + 1}/{API_RETRY_TIMES})")
                if attempt < API_RETRY_TIMES - 1:
                    time.sleep(API_RETRY_DELAY)

        if not success:
            print(f"警告:无法获取 '{category}' 类别嵌入向量将影响该类别分类准确性")

    return category_embeddings


def classify_keyword(keyword, category_embeddings, category_keywords, qwen_api, threshold=CLASSIFICATION_THRESHOLD):
    """将关键词分类到最相似类别带备用匹配机制"""
    # 1. 首先尝试基于嵌入向量分类
    if category_embeddings:
        keyword_embedding = qwen_api.get_embedding(keyword)
        if keyword_embedding:
            similarities = {}
            for category, embedding in category_embeddings.items():
                sim = cosine_similarity(keyword_embedding, embedding)
                similarities[category] = sim

            # 记录所有相似度值便于调试
            sorted_sims = sorted(similarities.items(), key=lambda x: x[1], reverse=True)
            print(f"[分类调试] {keyword}: {[(cat, f'{sim:.3f}') for cat, sim in sorted_sims[:3]]}")

            max_similarity = max(similarities.values()) if similarities else 0
            best_category = max(similarities, key=similarities.get) if similarities else "未分类"

            if max_similarity >= threshold:
                return best_category, max_similarity

    # 2. 如果嵌入向量分类失败或未达阈值使用关键词匹配作为备用方案
    print(f"[备用分类] {keyword} 使用关键词匹配进行分类")
    keyword_category = keyword_based_classify(keyword, category_keywords)
    return keyword_category, 0  # 备用分类相似度为0





def insert_hot_data(hot_data, category_embeddings, category_keywords, qwen_api):
    """插入热点数据到数据库（带分类功能，使用Django ORM）"""
    if not hot_data:
        return False

    if not category_embeddings:
        print("警告:无法获取分类嵌入向量将使用关键词匹配分类")

    try:
        classification_stats = {
            'total': 0,
            'embedding_based': 0,
            'keyword_based': 0,
            'unclassified': 0,
            'categories': {}
        }

        created_count = 0
        for item in hot_data:
            rank_num, keyword, hot_value = item
            category = "未分类"
            similarity = 0
            classification_stats['total'] += 1

            # 执行分类
            category, similarity = classify_keyword(
                keyword, category_embeddings, category_keywords, qwen_api, CLASSIFICATION_THRESHOLD
            )

            # 更新分类统计
            if category != "未分类":
                if similarity > 0:
                    classification_stats['embedding_based'] += 1
                else:
                    classification_stats['keyword_based'] += 1

                if category in classification_stats['categories']:
                    classification_stats['categories'][category] += 1
                else:
                    classification_stats['categories'][category] = 1
            else:
                classification_stats['unclassified'] += 1

            # 使用Django ORM创建或更新数据
            hotword, created = HotWord.objects.get_or_create(
                keyword=keyword.strip(),
                defaults={
                    'category': category,
                    'hot_value': hot_value,
                    'similarity': similarity
                }
            )
            
            if created:
                created_count += 1
            else:
                # 如果已存在，更新分类、热度和相似度
                hotword.category = category
                hotword.hot_value = hot_value
                hotword.similarity = similarity
                hotword.save()
            
            print(f"已分类: '{keyword.strip()}' -> {category} (相似度: {similarity:.3f})")

        # 打印分类统计
        print("\n[分类统计]")
        print(f"总分类数: {classification_stats['total']}")
        print(f"嵌入向量分类: {classification_stats['embedding_based']}")
        print(f"关键词匹配分类: {classification_stats['keyword_based']}")
        print(f"未分类: {classification_stats['unclassified']}")
        print("类别分布:")
        for category, count in classification_stats['categories'].items():
            print(f"  {category}: {count} ({count / classification_stats['total'] * 100:.1f}%)")

        print(f"\n成功插入/更新 {created_count} 条新数据（含分类信息）")
        return True
    except Exception as e:
        print(f"数据插入失败: {e}")
        return False


class Command(BaseCommand):
    help = '抖音热点数据爬取命令'

    def handle(self, *args, **options):
        try:
                print("抖音热点爬取程序（带分类优化功能v2）开始执行...")

                # 查找可用端口
                port = find_available_port(9222)
                if not port:
                    self.stdout.write(self.style.ERROR("无法找到可用端口"))
                    return

                print(f"找到可用端口: {port}")

                # 浏览器配置 - 自动检测系统中的Chrome/Edge浏览器路径
                browser_paths = [
                    r"C:\Program Files (x86)\Microsoft\Edge\Application\msedge.exe",
                    r"C:\Program Files\Microsoft\Edge\Application\msedge.exe",
                    r"C:\Program Files\Google\Chrome\Application\chrome.exe",
                    r"C:\Program Files (x86)\Google\Chrome\Application\chrome.exe"
                ]

                browser_path = None
                for path in browser_paths:
                    if os.path.exists(path):
                        browser_path = path
                        break

                if not browser_path:
                    print("未找到浏览器路径请手动安装Chrome或Edge浏览器")
                    return

                print(f"使用浏览器路径: {browser_path}")

                # 初始化浏览器
                try:
                    co = ChromiumOptions()
                    co.set_browser_path(browser_path)
                    co.set_argument(f"--remote-debugging-port={port}")
                    co.set_argument("--no-sandbox")
                    co.set_argument("--disable-dev-shm-usage")
                    co.set_argument("--disable-gpu")
                    co.set_argument("--disable-extensions")
                    co.headless(True)

                    print("浏览器配置成功正在初始化浏览器...")
                    page = WebPage(chromium_options=co)
                    print("浏览器初始化成功")

                except Exception as e:
                    print(f"浏览器初始化失败: {str(e)}")
                    print("请尝试以下解决方案:")
                    print(f"1. 确认浏览器路径正确: {browser_path}")
                    print(f"2. 检查端口 {port} 是否被占用可尝试重启电脑")
                    print(f"3. 手动启动浏览器并添加参数:")
                    print(f"   {browser_path} --remote-debugging-port={port} --headless")
                    return

                # 初始化API和分类数据
                try:
                    qwen_api = QwenAPI()
                    category_embeddings = get_category_embeddings(qwen_api)
                    category_keywords = get_category_keywords()  # 提取关键词用于备用分类
                except Exception as e:
                    print(f"初始化分类系统失败: {e}")
                    page.quit()
                    return

                print("使用Django ORM存储数据...")

                try:
                    # 爬取热点数据
                    print("正在访问抖音热点页面...")
                    page.get('https://www.douyin.com/hot')

                    # 等待页面加载完成
                    page.wait.load_start()
                    try:
                        hot_list = page.wait.ele('#hot-search-list', timeout=20)
                        print("热点列表加载成功")
                    except:
                        print("未找到热点列表元素继续执行...")

                    time.sleep(5)

                    # 切换到正确的模式
                    if hasattr(page, 'mode') and page.mode != 'd':
                        page.change_mode()

                    # API请求获取热点数据
                    api_url = 'https://www.douyin.com/aweme/v1/web/hot/search/list/'
                    params = {
                        'device_platform': 'webapp',
                        'aid': '6383',
                        'channel': 'channel_pc_web',
                        'detail_list': '1',
                        'source': '6',
                        'pc_client_type': '1',
                        'version_code': '170400',
                        'version_name': '17.4.0'
                    }

                    try:
                        print("尝试获取API数据...")
                        response = page.get(api_url, params=params)

                        # 解析API响应
                        json_data = {}
                        if hasattr(response, 'json'):
                            json_data = response.json()
                        elif hasattr(page, 'json'):
                            json_data = page.json
                        else:
                            import json
                            json_data = json.loads(response.text) if hasattr(response, 'text') else {}

                    except Exception as e:
                        print(f"API请求失败: {str(e)}")
                        json_data = {}

                    # 提取热点数据
                    hot_data = []
                    if json_data and json_data.get('data') and 'word_list' in json_data['data']:
                        for item in json_data['data']['word_list']:
                            rank_num = item.get('position', 0)
                            keyword = item.get('word', '未知')
                            hot_value = item.get('hot_value', 0)

                            try:
                                rank_num = int(rank_num)
                                hot_value = int(hot_value)
                                hot_data.append((rank_num, keyword.strip(), hot_value))
                                print(f"爬取: 排名 {rank_num} - {keyword[:20]}...")
                            except ValueError:
                                print(f"跳过异常数据: 排名={rank_num}, 热度={hot_value}")
                                continue

                    # 如果API获取失败尝试从页面元素获取
                    if not hot_data:
                        print("尝试从页面元素中获取热点数据...")
                        try:
                            hot_items = page.eles('.bQNkHmPk')
                            for item in hot_items:
                                try:
                                    rank_elem = item.ele('.bQNkHmPk-index')
                                    keyword_elem = item.ele('.bQNkHmPk-title')
                                    hot_elem = item.ele('.bQNkHmPk-hotValue')

                                    if rank_elem and keyword_elem:
                                        rank_num = int(rank_elem.text.strip('.'))
                                        keyword = keyword_elem.text
                                        hot_value = int(hot_elem.text) if hot_elem else 0

                                        hot_data.append((rank_num, keyword.strip(), hot_value))
                                        print(f"从页面获取: 排名 {rank_num} - {keyword[:20]}...")
                                except:
                                    continue
                        except Exception as e:
                            print(f"从页面元素获取数据失败: {e}")

                    # 处理和保存热点数据
                    if hot_data:
                        # 保存到CSV（带分类和相似度）
                        with open('抖音热点热词分类.csv', 'w', encoding='utf-8-sig', newline='') as f:
                            writer = csv.writer(f)
                            writer.writerow(['排名', '热词', '热度值', '分类', '相似度'])

                            for item in hot_data:
                                rank_num, keyword, hot_value = item
                                category, similarity = classify_keyword(
                                    keyword, category_embeddings, category_keywords, qwen_api
                                )
                                writer.writerow([rank_num, keyword, hot_value, category, f"{similarity:.3f}"])

                        print(f"已保存 {len(hot_data)} 条带分类和相似度数据到CSV")

                        # 插入数据库
                        insert_hot_data(hot_data, category_embeddings, category_keywords, qwen_api)
                    else:
                        print("未获取到有效热点数据")

                except Exception as e:
                    print(f"爬取过程失败: {str(e)}")
                    import traceback
                    traceback.print_exc()
                finally:
                    print(f"爬取完成共获取 {len(hot_data)} 条热点数据")
                    page.quit()

        except Exception as e:
            print(f"爬虫执行过程中发生未捕获的异常: {str(e)}")
            import traceback
            traceback.print_exc()
            raise  # 重新抛出异常，让视图函数能捕获到

        finally:
            print("抖音热点爬取程序执行完毕")


if __name__ == '__main__':
    Command().handle()
