import requests, re, pymysql
import concurrent.futures
from dbutils.pooled_db import PooledDB
# from bs4 import BeautifulSoup # type: ignore

token = "20240920iQVVUN8CTAmTZ17PkCg"
cookie = "acw_tc=0b32823a17270537397871843e0bcbe198059221f0e97269a284a1d52bc45a; SERVERID=bb67fa45d81410ac7830efa9e139446e|1727053805|1727053805"
headers = {
    "User-Agent": "KKWeight_Android",
    "X-DUSHU-APP-PLT": "2",
    "X-DUSHU-BUILD-IDENTIFIER": "com.kebida.dushu",
    "X-DUSHU-APP-VER": "6.3.0",
    "X-DUSHU-APP-SYSVER": "14",
    "X-DUSHU-APP-DEVTOKEN": "ArqYvGYExFidAt5yYrKHDWMQErDznwFamqfVcyhy2uQg",
    "X-DUSHU-APP-CHN": "oppo",
    "Content-Type": "application/json;charset=UTF-8",
    "Cookie": cookie
}


def remove_html_tags(text):
    # 定义正则表达式模式，去除HTML标签、JavaScript代码块和CSS样式等
    pattern = r'<!--.*?-->|<script.*?>.*?</script>|<style.*?>.*?</style>|<[^>]+>'
    # 使用sub方法替换所有匹配的模式为空字符串，去除HTML标签、CSS和JavaScript代码
    cleaned_text = re.sub(pattern, '', text, flags=re.DOTALL)

    # 查找并删除"读完本文约需35分钟"之前的所有内容
    match = re.search(r'读完本文约需.*', cleaned_text)
    if match:
        cleaned_text = match.group(0)
    return cleaned_text

# 获取图片链接
def get_image_link(content):
    pattern = r'<img src="([^"]*)">'
    match = re.search(pattern, content)
    if match:
        return match.group(1)
    else:
        return None


def get_book_info(bookId):
    url = "http://gw1-ipv6.dushu365.com/resource-orchestration-system/book/v101/content"

    data = {
        "appId": "2001",
        "token": "20240920iQVVUN8CTAmTZ17PkCg",
        "bookId": bookId
    }
    response = requests.post(url, headers=headers, json=data)
    # 检查请求是否成功
    if response.status_code == 200:
        data = response.json()
        return data['data']  # 返回JSON格式的响应内容
    else:
        return f"Error: {response.status_code}"


def get_lists():
    url = "https://gw1-ipv6.dushu365.com/resource-orchestration-system/book/classify/v100/listClassifyBook"
    data = {
        "publishYear": 0,
        "sortType": 1,
        "classifyIds": "",  # [10054]亲子关系
        "pageNo": 1,
        "appId": "2001",
        "pageSize": 1000,
        "businessType": 1,
        "token": "20240920iQVVUN8CTAmTZ17PkCg",
        "bookReadStatus": -1
    }
    response = requests.post(url, headers=headers, json=data)
    # 检查请求是否成功
    if response.status_code == 200:
        return response.json()  # 返回JSON格式的响应内容
    else:
        return f"Error: {response.status_code}"


def get_book_text(fragmentId):
    url = "https://gw1-ipv6.dushu365.com/resource-orchestration-system/book/v101/content"
    data = {
        "appId": "2001",
        "fragmentId": fragmentId,
        "prekey": "",
        "token": token
    }
    response = requests.post(url, headers=headers, json=data)
    # 检查请求是否成功
    if response.status_code == 200:
        data = response.json()
        data = remove_html_tags(data['data']['content'])
        return data  # 返回JSON格式的响应内容
    else:
        return f"Error: {response.status_code}"

def get_book_mind(fragmentId):
    url = "https://gw1-ipv6.dushu365.com/resource-orchestration-system/book/v101/content"
    data = {
        "appId": "2001",
        "fragmentId": fragmentId,
        "prekey": "",
        "token": token
    }
    response = requests.post(url, headers=headers, json=data)
    # 检查请求是否成功
    if response.status_code == 200:
        data = response.json()
        d = get_image_link(data['data']['content'])
        if d:
            print(d)
        else:
            print(data)
        return d  # 返回JSON格式的响应内容
    else:
        return f"Error: {response.status_code}"

# 插入数据函数
# 插入或更新书籍信息到数据库
def save_book_to_db(book):
    try:
        # 每次获取新的连接
        connection = pool.connection()
        with connection.cursor() as cursor:
            sql = """
            INSERT INTO books (id, title, author, bookCoverUrl, speaker, bookCategoryName, recommendInfo, audioUrl, videoUrl, fragmentId, bookText,publishTime,score,videoCoverUrl,mindMapUrl)
            VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
            ON DUPLICATE KEY UPDATE 
                title=VALUES(title), 
                author=VALUES(author), 
                bookCoverUrl=VALUES(bookCoverUrl), 
                speaker=VALUES(speaker), 
                bookCategoryName=VALUES(bookCategoryName), 
                recommendInfo=VALUES(recommendInfo), 
                audioUrl=VALUES(audioUrl), 
                videoUrl=VALUES(videoUrl), 
                fragmentId=VALUES(fragmentId), 
                bookText=VALUES(bookText),
                publishTime=VALUES(publishTime),
                score=VALUES(score),
                videoCoverUrl=VALUES(videoCoverUrl),
                mindMapUrl=VALUES(mindMapUrl)
            """
            cursor.execute(sql, (
                book['id'],
                book['title'],
                book['author'],
                book['bookCoverUrl'],
                book['speaker'],
                book['bookCategoryName'],
                book['recommendInfo'],
                book['audioUrl'],
                book['videoUrl'],
                book.get('fragmentId', None),
                book['bookText'],
                int(book.get('publishTime', 0)) if book.get('publishTime') else None,  # 转换 publishTime 为 int 或 None
                int(book.get('score', 0)) if book.get('score') else None,  # 转换 score 为 int 或 None
                book.get('videoCoverUrl', None),
                book.get('mindMapUrl', None)
            ))
        connection.commit()
        print(f"Book {book['title']} (ID: {book['id']}) 保存成功")
    except Exception as e:
        print(f"Book {book['title']} (ID: {book['id']}) 保存失败: {e}")
        connection.rollback()
    finally:
        connection.close()





# 连接到 MySQL 数据库
# 创建数据库连接池
pool = PooledDB(
    creator=pymysql,  # 使用 pymysql 作为连接库
    maxconnections=10,  # 连接池最大连接数
    mincached=2,  # 初始化时的连接数
    blocking=True,  # 当连接池没有可用连接时等待
    host='localhost',
    user='root',
    password='wxmp2014',
    database='fdds',
    charset='utf8mb4',
    cursorclass=pymysql.cursors.DictCursor
)

# 调用函数
response_data = get_lists()
print("一共获取：", len(response_data['data']), "本书")
books_data = []

# 处理书籍的函数
def process_book(item):
    try:
        bookinfo = get_book_info(item["id"])
        book = {}
        book['id'] = item["id"]
        book['title'] = item["title"]
        book['author'] = bookinfo['bookInfo']['author']
        book['bookCoverUrl'] = bookinfo['bookInfo']['bookCoverUrl']
        book['speaker'] = bookinfo['bookInfo']['speaker']
        book['publishTime'] = int(bookinfo['bookInfo']['publishTime'])/1000
        book['score'] = bookinfo['bookInfo']['score']
        book['bookCategoryName'] = bookinfo['bookInfo']['bookCategoryName']
        book['recommendInfo'] = bookinfo['recommendVO']['recommendInfo']
        book['audioUrl'] = bookinfo['audioInfo']['audioUrl']
        for i in bookinfo['articles']:
            if i['moduleName'] == "文字稿":
                book['fragmentId'] = i['fragmentId']
            if i['moduleName'] == "思维导图":
               mindId = i['fragmentId']

        if 'content' in bookinfo:
            book['mindMapUrl'] = get_book_mind(mindId)
            # book['mindMapUrl'] = get_image_link(bookinfo['content'])
        else:
            return
            # book['mindMapUrl'] = get_book_mind(mindId)
        if not book['mindMapUrl']:
            print(book['title'])
            book['mindMapUrl'] = get_book_mind(mindId)

        if 'videoInfo' in bookinfo:
            book['videoUrl'] = bookinfo['videoInfo'].get('videoUrl', None)
            book['videoCoverUrl'] = bookinfo['videoInfo'].get('cover', None)
        else:
            book['videoUrl'] = None

        book['bookText'] = get_book_text(book['fragmentId'])
        # 保存当前书籍信息到数据库
        save_book_to_db(book)
    except Exception as e:
        print(f"Error processing book {item['id']}{book['title']}: {e}")

# 使用 ThreadPoolExecutor 进行多线程处理
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
    executor.map(process_book, response_data['data'])
