print("Hello, 下载视频以及轮播图片")

# pip install DrissionPage

# 先配置环境 将chrome.exe设置到默认浏览器中
# from DrissionPage import ChromiumOptions
# path = r'C:\Program Files\Google\Chrome\Application\chrome.exe' # 自己电脑内chrom浏览器exe所在位置
# ChromiumOptions().set_browser_path(path).save()

import requests
from DrissionPage import ChromiumPage

import re
import os
from datetime import datetime

# pip install pymysql
import pymysql

headers = {
    'referer': 'https://www.douyin.com/',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36'
}
dp = ChromiumPage()  # 自动打开chrome浏览器
# 监听数据包 请求路径是：https://www.douyin.com/aweme/v1/web/aweme/post/？xxx
dp.listen.start('aweme/v1/web/aweme/post/')

# 复制一个博主的主页url到此处
name_page_url = "演绎超@@https://www.douyin.com/user/MS4wLjABAAAAqQU0t7Zh_2HTHQgBxE9e9Fjqlu-vzhQxYskSOjTdTH0"
# name_page_url = "凹菟嫚@@https://www.douyin.com/user/MS4wLjABAAAAvTYdZqPuDlLpSz5eSjGuY3IU-FzBFqWHjP0SZjRC2V0"
# name_page_url = "汤圆🍒可甜了@@https://www.douyin.com/user/MS4wLjABAAAAnh3F_7QpVj7ECOKV1zLyQuqjwJD_jsKbpoHk9_6mtbE"
# name_page_url = "佚柔姐姐（微胖版）@@https://www.douyin.com/user/MS4wLjABAAAAPRCMGPAFM1VGcJrxRuvTXgJp0Sk95EW1DynNmbKSPg8"
# name_page_url = "大反派（@@https://www.douyin.com/user/MS4wLjABAAAAF73Rad0pwAr01H-e0N2SIgws3uDz_7R-lE4aexCD1ui5yhdXbW4fR20DdfYdtu5u"
# name_page_url = "来点塔斯汀🍔@@https://www.douyin.com/user/MS4wLjABAAAAnPDk1Tcx4T2PZXnUdNv47D1SPbW7iFKUGQP46GPBjoM"
# name_page_url = "涵儿摄影室@@https://www.douyin.com/user/MS4wLjABAAAAVEQ0ZKhBxFwQNAN08u9P6P-8-uO8rA5pw7EPASZilfc"
# name_page_url = "小熙oni@@https://www.douyin.com/user/MS4wLjABAAAAFmwG-bgIJswLRdpEGoDBZRf50VtRRlffdzYZ3iZqBtcxXF0uRYRnLd2KevNZlwLK"
# name_page_url = "小优优子@@https://www.douyin.com/user/MS4wLjABAAAAOgbrBdbGlLM6DhllNzgj7pI6_cEEzkm5hxIA2vyQrO8"
# name_page_url = "俄洛伊@@https://www.douyin.com/user/MS4wLjABAAAAs54OHCNTgC6zKJ5bjJ7oVnOvyDj2v_uoRlv-AhGw9N0"
# name_page_url = "王德芙@@https://www.douyin.com/user/MS4wLjABAAAAiXeL8UUfi0KfVrjbpc2LJKSGiPXEBomMz5i_DCbDsSYXhCJ6PZm9c7DUE1KCQ2cy"
# name_page_url = "Miya林夕夕@@https://www.douyin.com/user/MS4wLjABAAAAjqdWtXICLxzgI7SxxEWwVo-uMqfoR1SsO78WXRUQlPg"
# name_page_url = "猜迷语@@https://www.douyin.com/user/MS4wLjABAAAAZAIOMroKRRZk9b3DNNKnlJ8gnIOjCPOY730jeofINnb1H4GquOS3pz777wGN8Ww7"
# name_page_url = "兔子快点@@https://www.douyin.com/user/MS4wLjABAAAAxGUJOXa-llDInDvSKL5nzIu-SbaDfAD4OGP7WHXmnXIMzZYOEBRPphTqRLKz66_4"
# name_page_url = "鱼蛋爱吃甜@@https://www.douyin.com/user/MS4wLjABAAAAAjdvq_hnAsEPrZNTvENw_-DInq2PevVXpzue7FNeH6yh3cPAmDiN7-dPxaW4eEaj"
# name_page_url = "大鹅（减肥版）@@https://www.douyin.com/user/MS4wLjABAAAAIjvD83wZh5vKC6iO0q1vSfkfYGQLgsis8_-nYzaBkGmBNggZ1jWkXJwTZKH7hpf7"
# name_page_url = "是小七呀@@https://www.douyin.com/user/MS4wLjABAAAALhtCJQG_SY8DMyn4tX2yFfXahUPUH2aGKqXUETY_Bo4WYwJ5_w0HE60X96720AdG"
# name_page_url = "阿椒啊@@https://www.douyin.com/user/MS4wLjABAAAAbGcziADDQoxHP1b8Dd7y0M2Q8vJB4CWhMixCQ0GHQTU"
# name_page_url = "徐櫻菡@@https://www.douyin.com/user/MS4wLjABAAAAINFYZ3-Dc7WA5Dh4lOgQv7QS7vl6B5LhGdzX8-PqYPY"
# name_page_url = "千歳@@https://www.douyin.com/user/MS4wLjABAAAAFqfiim2EPsO6jl_qPwk6IBrEo5Tkm2kCFYAAcK_o_6ipSLi-Cc2UY0Ueurck2tmN"
name_page_url = "MH@@https://www.douyin.com/user/MS4wLjABAAAA_s1xTRuXrZa2rqgwpSQ6b1IKz53L94H0U7w80tCC3JrHsw4ph8yZ42l4QwEmXuJK"


# 博主名称
name = name_page_url.split("@@")[0]
# 博主主页url
page_url = name_page_url.split("@@")[1]



dp.get(page_url)
save_dir = r'D:\douyin\video' + '\\' + name
# 检查文件夹是否存在，如果不存在则创建
if not os.path.exists(save_dir):
    os.makedirs(save_dir)

def clean_filename(filename):
    # 替换文件名中的特殊字符为下划线
    return re.sub(r'[<>:"/\\?*\s]', "_", filename).rstrip(' .')[:20]

# 使用当前时间生成 格式为2025-01-01_22_22_22的日期字符串
def get_date_str():
    return datetime.now().strftime("%Y-%m-%d_%H_%M_%S")

def video_id_exists(video_id, create_time):
    """
    检查video_id是否已经存在于数据库中
    :param video_id: 视频的ID
    :return: 如果存在返回True，否则返回False
    """
    conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='root', charset='utf8mb4', db='video')
    cursor = conn.cursor()
    sql = "SELECT * FROM art WHERE aweme_id = %s AND create_time = %s"
    cursor.execute(sql, [video_id, create_time])
    result = cursor.fetchone()
    cursor.close()
    conn.close()
    return result is not None

def save_to_mysql(author_user_id,name, title, video_id):
    # 过滤掉所有非ASCII字符
    # clean_title =  re.sub(r'[^\x00-\x7F]+', '', title)
    try:
        conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='root', charset='utf8mb4', db='video')
        cursor = conn.cursor()

        add_time = datetime.now()
        # 插入数据的SQL语句
        insert_query = "INSERT INTO art (author_user_id, user_name, title, aweme_id, create_time, add_time) VALUES (%s, %s, %s, %s, %s, %s)"
        cursor.execute(insert_query, (author_user_id, name, title, video_id, create_time, add_time))

        # 提交事务
        conn.commit()
        print(f'{name} {title} {video_id} 已保存到数据库')

    except conn.Error as err:
        print(f"插入数据时出错: {err}")

    finally:
        # 关闭游标和数据库连接
        if cursor:
            cursor.close()
        if conn:
            conn.close()


# 页码
page_i = 1

# 构建翻页
#for page in range(1, 70):
while True:

    print("\r\n")
    print(f'正在采集{page_i}页的内容')

    try:
        resp = dp.listen.wait(timeout=10)

        json_data = resp.response.body
        ## print(json_data)
        info_list = json_data['aweme_list']
        print(f'当前页码 {page_i} 页 共 {len(info_list)} 条数据')
        # 这里是判断是否有数据--永远进不来，因为这里是卡住了监听数据包，没数据的时候，这里一直卡住进不来啊，得改方式判断到底了，退出循环
        # if info_list is None:
        #     # 没有数据
        #     print(f'没有数据了 当前页码 {page_i} 页，退出循环')
        #     break

        if not info_list:
            print(f'没有数据了 当前页码 {page_i} 页，退出循环')
            break

        for index in info_list:
            title = index['desc']
            video_id = index['aweme_id']
            create_time = index['create_time']
            author_user_id = index['author_user_id']

            # 根据video_id去查mysql库，如果已经存在，则跳过
            # 这里是查询数据库的操作
            if video_id_exists(video_id, create_time):
                print(f'{name} {title} {video_id} 已经存在')
                continue
            else:
                # 保存入mysql库
                print(f'{name} {title} {video_id} 开始下载')
                save_to_mysql(author_user_id, name, title, video_id)


            media_type = index['media_type']
            if media_type == 4:
                try:
                    # 这里是视频的地址，如果是相册的话，这里是个mp3
                    video_url = index['video']['play_addr']['url_list'][0]
                    # print(title, video_url)
                    # 清理文件名
                    clean_title = clean_filename(title)
                    print(name, clean_title, video_url)

                    video_content = requests.get(url=video_url, headers=headers).content
                    with open(f'{save_dir}\\' + clean_title + '-' + video_id + '.mp4', 'wb') as f:
                        f.write(video_content)
                except Exception as e:
                    print('没有视频')
                    print(e)
                    continue
            if media_type == 2:
                try:
                    # 清理文件名
                    clean_title = clean_filename(title)
                    # 这里是图片的地址
                    image_list = index['images']
                    num = 0
                    # 生成 2025-01-01-22-01-09-001 格式的图片名称
                    dateStr = get_date_str()
                    for index_pic in image_list:
                        num = num + 1
                        name_index = f"{name}-{dateStr}-{num:03d}"
                        image_url = index_pic['url_list'][0]
                        print(f'图片地址 {name} {image_url}')
                        # 图片地址 https://p3-pc-sign.douyinpic.com/tos-cn-i-0813/ogIAyk8AHCXAPg9JaJQAGeDA9fnBY2AFKbux3N~tplv-dy-aweme-images:q75.webp?lk3s=138a59ce&x-expires=1749222000&x-signature=gi%2FXNiSpR7VW09eVKJikoXPYfM4%3D&from=327834062&s=PackSourceEnum_PUBLISH&se=false&sc=image&biz_tag=aweme_images&l=202505072307365885563EA8DBBB19265C
                        picResp = requests.get(url=image_url, headers=headers)
                        with open(f'{save_dir}\\' + clean_title + '-' + name_index + '.webp', 'wb') as f:
                            f.write(picResp.content)
                except Exception as e:
                    print('没有图片')
                    print(e)
                    continue

        # 定位底部元素标签
        down = dp.ele('css:.ayFW3zux')
        # 滚动操作
        dp.scroll.to_see(down)

        page_i = page_i + 1

    except Exception as e:
        print(e)
        print(f'超时了，没有收到数据 当前页码 {page_i} 页，退出循环')
        dp.quit()
        break

