#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :china_cnr_gdgg.py
# @Time      :2024/3/21 
# @Author    :CL
# @email     :1037654919@qq.com
# 实现对中国之声·每日滚动新闻广播的爬取  url="https://china.cnr.cn/gdgg/index.html"
from  datetime import datetime
import requests
from bs4 import BeautifulSoup
from retrying import retry

from utils import mongo_manager, proxies
import os
import re
import shutil
import subprocess
tts_china_cnr_gdgg = mongo_manager("tts_china_cnr_gdgg", db='public_data')


# 下载.m3u8链接中的音频文件
def download_m3u8_audio(folder_name,m3u8_url):
    # 下载.m3u8文件内容
    response = requests.get(m3u8_url)
    m3u8_content = response.text

    # 匹配所有的.ts文件链接并添加正确的schema
    base_url = '/'.join(m3u8_url.split('/')[:-1]) + '/'

    ts_urls = [base_url + line.strip() for line in m3u8_content.split('\n') if line.endswith('.ts')]
    # print(ts_urls)

    # 下载每个.ts文件
    for index, ts_url in enumerate(ts_urls, start=1):
        response = requests.get(ts_url,proxies=proxies)
        file_path = os.path.join(folder_name, f"audio_{index}.ts")
        with open(file_path, 'wb') as f:
            f.write(response.content)

    print("音频文件下载完成")
# 合并所有的.ts文件为单个音频文件
def merge_ts_files(input_folder, output_file):
    ts_files = [f for f in os.listdir(input_folder) if f.endswith('.ts')]
    ts_files.sort()  # 按文件名排序，确保顺序正确
    file_list_path = os.path.join(input_folder, 'file_list.txt')

    # 将.ts文件列表写入文件
    with open(file_list_path, 'w') as f:
        for ts_file in ts_files:
            f.write(f"file '{ts_file}'\n")

    # 使用ffmpeg合并.ts文件为单个音频文件
    # subprocess.run(['ffmpeg', '-f', 'concat', '-safe', '0', '-i', file_list_path, '-c:a', 'aac', '-b:a', '256k', '-y',
    #                 output_file])
    subprocess.run(['ffmpeg', '-f', 'concat', '-safe', '0', '-i', file_list_path, '-c', 'copy', '-bsf:a', 'aac_adtstoasc', '-y',
                    output_file])
    # '-c', 'copy', '-bsf:a', 'aac_adtstoasc',
    # 删除中间文件
    os.remove(file_list_path)

    # 删除保存.ts文件的文件夹
    shutil.rmtree(input_folder)
@retry(stop_max_attempt_number=3, wait_fixed=10000)
def get_gdgg(url="https://china.cnr.cn/gdgg/index.html"):  # 获取每日滚动新闻  #index:page 1   index_1 :page2
    headers = {
        "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:124.0) Gecko/20100101 Firefox/124.0",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
        "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
        "Accept-Encoding": "gzip, deflate, br",
        "Connection": "keep-alive",
        "Referer": "https://china.cnr.cn/gdgg/index_3.html",
        "Upgrade-Insecure-Requests": "1",
        "Sec-Fetch-Dest": "document",
        "Sec-Fetch-Mode": "navigate",
        "Sec-Fetch-Site": "same-origin",
        "Sec-Fetch-User": "?1"
    }
    cookies = {
        "cna": "UIiCHpaUcDkCAXrhkm7kH4bF",
        "wdcid": "548d7e29ea4a95a3",
        "wdlast": "1711069946",
        "wdses": "4fa1713066c1a992"
    }
    response = requests.get(url, headers=headers, cookies=cookies,proxies=proxies)
    response.encoding = 'gb2312'
    requests.session().close()
    # print(response.text)
    print(response.url, response)
    return response.text


def get_gdgg_day():  # 获取当日所有滚动新闻
    page = 0

    while True:
        if page == 0:
            url = "https://china.cnr.cn/gdgg/index.html"
        else:
            url = f"https://china.cnr.cn/gdgg/index_{page}.html"

        soups = BeautifulSoup(get_gdgg(url=url), 'html.parser')
        datas = soups.find('div', class_='articleList').find_all('div', class_='item url_http')
        for data in datas:
            href = data.find('a').get('href')

            try:
                tts_china_cnr_gdgg.insertOne({'_id': href, 'title': data.text.strip(), 'href': href, 'date':  datetime.now()})
            except Exception as e:
                print(e)
        if len(datas) < 10:
            break
        page += 1

#下载音频和文本
def down_video2txt(url = "https://china.cnr.cn/gdgg/20240317/t20240317_526629919.shtml"):
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "Pragma": "no-cache",
        "Sec-Fetch-Dest": "document",
        "Sec-Fetch-Mode": "navigate",
        "Sec-Fetch-Site": "none",
        "Sec-Fetch-User": "?1",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
        "sec-ch-ua": "\"Not_A Brand\";v=\"8\", \"Chromium\";v=\"120\", \"Google Chrome\";v=\"120\"",
        "sec-ch-ua-mobile": "?0",
        "sec-ch-ua-platform": "\"Linux\""
    }
    cookies = {
        "wdcid": "527af4fb80183ff4",
        "wdlast": "1704330276",
        "cna": "MvIcHr9qzCgCAXjo69I/84Ng"
    }
    response = requests.get(url, headers=headers, cookies=cookies)
    response.encoding = 'gb2312'
    print(response.url, response)
    soups = BeautifulSoup(response.text, 'html.parser')
    datas = soups.find('div', class_='article-content')
    texts_mapping = {}
    name = ''
    today = datetime.now().strftime('%Y%m%d')
    path = f"/media/chenglei3/77D014CED257D1411/china_cnr/china.cnr.gdgg_{today}/{url.split('/')[-1].split('.')[0]}/"
    os.makedirs(path, exist_ok=True)
    for tag in datas.children:
        if tag.name == 'p': #文本节点
            if name: #如果有音频，把文本保存到对应音频字典
                texts_mapping[name].append(tag.text)
        elif tag.name == 'div' and tag.has_attr('style'):  #音频节点
            #current_url是包含音频文件的URL
            try:
                current_url = tag.find('div', class_='HiRadioPlayer')['data-url']
            except:
                continue
            name = current_url.split('/')[-1].split('?')[0]
            if name.endswith('.m3u8'):
                # 下载.m3u8链接中的音频文件
                # 创建保存临时音频文件的文件夹
                folder_name =f"{path}{name.split('.m3u8')[0]}/"
                os.makedirs(folder_name,exist_ok=True)
                download_m3u8_audio(folder_name, current_url)
                # 如果需要，可以将.ts文件合并为单个音频文件,并且删除folder_name下的ts文件
                merge_ts_files(input_folder=folder_name, output_file=path + name.replace('.m3u8', '.aac'))
                # 删除临时文件夹folder_name 和其中的文件
                os.rmdir(folder_name)
            elif name.endswith('.mp3'): #直接保存音频
                response = requests.get(current_url)
                if response.status_code == 200:
                    # 写入音频文件
                    with open( path + name, 'wb') as file:
                        file.write(response.content)

            print(f'Current URL: {current_url}, Name: {name}')
            texts_mapping[name] = []

    #遍历完数据后保存文本
    for name, texts in texts_mapping.items():
        txt_name = name.split('.')[0] + '.txt'
        with open(path + txt_name, "w", encoding="utf-8") as file:
            file.write("\n".join(texts))
def main():
    get_gdgg_day()

    seeds = tts_china_cnr_gdgg.findAll({'status': None})
    for seed in seeds:
        print(seed)
        url = seed['href']
        today = datetime.now().strftime('%Y%m%d')
        path = f"/home/chenglei3/work/data/china.cnr.gdgg_{today}/{url.split('/')[-1].split('.')[0]}/"
        # print('path:',path)
        os.makedirs(path, exist_ok=True)
        try:
            down_video2txt(url =url)
            seed['status']= 'done'
        except  Exception as e:
            print(e)
            #删除path以及path的文件

            shutil.rmtree(path)
            seed['status']= 'error'
        tts_china_cnr_gdgg.updateOne({'_id': seed['_id']}, seed)
if __name__ == '__main__':
    print()
    # tts_china_cnr_gdgg.updateMany({'status': 'error'}, {'status': None})
    main()

    tts_china_cnr_gdgg.close()
