import urllib.request
from urllib.parse import quote
from lxml import etree
import json
import logging
from MySqlHelper import MySqlHelper
import neocities

# 配置日志记录
logging.basicConfig(
    level=logging.INFO,
    filename="app.log",  # 指定日志文件路径和名称
    filemode="a",  # 使用追加模式，将日志写入到文件末尾
    format="%(asctime)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)

TABLE_VIDEO = 'videos'
TABLE_VIDEO_DETAIL = 'video_details'
DB_HOST = 'mysql-tramper.sqlpub.com'
DB_USER = 'tookit-tramper'
DB_PASSWORD = '5ff868f73852edae-tramper'
DB_NAME = 'tookit-tramper'
NEO_API_KEY = 'ec5ee03afb9a3a082bd0b02d82f6156e-tramper'

async def getFromDuanJu(djName):
    # 发送 GET 请求
    print('外部请求参数为：' + djName)
    baseUrl = 'https://duanju.one'
    html = await get_web_html(baseUrl + '/vodsearch/-------------.html?wd=' + quote(djName))
    # 使用 lxml 和 XPath 解析 HTML
    tree = etree.HTML(html)
    elements = tree.xpath("//div[@class='module-search-item']")  # 替换为你要选择的元素路径，这里选择了 body 下的 div 下的 span 元素的文本内容
    videos = []
    for element in elements:
        print('************列表item信息(多个搜索结果)开始************')
        # print(element.xpath('//a[contains(@class, "video-serial")]/@title'))
        
        # 立即播放
        djLinks = element.xpath('//a[contains(@class, "btn-important")]/@href')
        
        print('获取每个搜索结果中的视频连接信息（立即播放）')
        if len(djLinks) > 0 :
            # 初始化数据库表 {TABLE}
            await initTable()
            itemHtml = await get_web_html(baseUrl + '/' + djLinks[0])
            itemTree = etree.HTML(itemHtml)
            # 所有剧集
            details = itemTree.xpath('//div[@class="module-blocklist scroll-box scroll-box-y"]/div[@class="scroll-content"]')
            # 获取该短剧中的所有集数，并遍历获取每一集的地址
            for detail in details:
                hrefs = detail.xpath('./a/@href')
                titles = detail.xpath('./a/@title')
                # print(hrefs)
                # print(titles)
                # 遍历每一集的链接
                for href in hrefs:
                    hrefHtml = await get_web_html(baseUrl + '/' + href)
                    hrefTree = etree.HTML(hrefHtml)
                    videoUrl = hrefTree.xpath('//div[@class="player-info"]/a[@id="bfurl"]/@href')
                    logger.info(f"采集到视频链接为： {videoUrl[0]}")
                    
                    title = hrefTree.xpath('//div[@class="video-info-header"]/h1/a/@title')
                    logger.info(f"采集到视频标题为： {title[0]}")
                    
                    name = hrefTree.xpath('//div[@class="video-info-header"]/span/text()')
                    logger.info(f"采集到视频集数为： {name[0]}")
                    
                    data = {"name": title[0], "no": name[0], "url": videoUrl[0]}
                    videos.append(data)
        
        print('************列表item信息(多个搜索结果)结束************')
    
    # 将数据先写入数据库
    await saveVideos(videos)
    return videos
    
async def get_web_html(url):
    # 发送 GET 请求
    response = urllib.request.urlopen(url)
    html = response.read()
    return html
    
async def initTable():
    # 实例化MySQL Helper对象
    helper = MySqlHelper(DB_HOST, 3306, DB_USER, DB_PASSWORD, DB_NAME)
    # 创建表（示例）
    fields = "id INT PRIMARY KEY AUTO_INCREMENT, name VARCHAR(255) UNIQUE"
    helper.create_table(TABLE_VIDEO, fields)
    
    fields = "id INT PRIMARY KEY AUTO_INCREMENT, vid INT(11), no VARCHAR(255), url VARCHAR(255) UNIQUE"
    helper.create_table(TABLE_VIDEO_DETAIL, fields)
    
async def saveVideos(data):
    # 实例化MySQL Helper对象
    helper = MySqlHelper(DB_HOST, 3306, DB_USER, DB_PASSWORD, DB_NAME)
    if len(data) <= 0:
        return []
        
    name = data[0]['name']
    videos = helper.select(TABLE_VIDEO, 'id, name', 'name = "' + name + '"')
    if len(videos) <= 0:
        # 不存在
        video = {'name': name}
        vid = helper.insert(TABLE_VIDEO, video)
    else:
       vid = videos[0][0]
       helper.delete(TABLE_VIDEO_DETAIL, 'vid = ' + str(vid))
       
    for item in data:
        data = {'vid': vid, 'no': item['no'], 'url': item['url']}
        helper.insert(TABLE_VIDEO_DETAIL, data)
        
async def getVideos():
    helper = MySqlHelper(DB_HOST, 3306, DB_USER, DB_PASSWORD, DB_NAME)
    sql = f"SELECT vd.vid,v.NAME,group_concat(vd.url) FROM video_details AS vd LEFT JOIN videos AS v ON v.id = vd.vid GROUP BY vd.vid,v.NAME;";
    videos = helper.getBySql(sql)
    # 关闭连接
    helper.close()
    
    print(videos)
    result = {"code": 0, "data": [], "count": len(videos)}
    data = []
    for video in videos:
        print(video[1])
        item = {"id": video[0], "name": video[1], "url": '<br/>'.join(video[2].split(','))}
        print(item)
        data.append(item)

    result['data'] = data
    return result
        
async def pushToNc():
    helper = MySqlHelper(DB_HOST, 3306, DB_USER, DB_PASSWORD, DB_NAME)
    fields = 'vid, group_concat(url)'
    condition = ''
    groupBy = 'vid'
    rows = helper.select(TABLE_VIDEO_DETAIL, fields, condition, groupBy)
    print(rows)
    index = 1
    dj = {}
    for row in rows:
        urlList = row[1].split(',')  # 将 GROUP_CONCAT 结果拆分成列表
        
        urls = []
        # 处理每个 group 的结果
        for url in urlList:
            # 进行你的操作，比如打印结果
            djItem = {'url': url}
            print(f"{url}")
            urls.append(djItem)
        
        dj[index] = urls
        index += 1
    
    # 指定文件路径和名称
    filePath = "dj.json"
    ncFileName = "data.json"
    # 打开文件并写入内容
    with open(filePath, "w") as file:
        file.write(json.dumps(dj))
        
    nc = neocities.NeoCities(api_key='ec5ee03afb9a3a082bd0b02d82f6156e')
    response = nc.upload((filePath, ncFileName),)    
    
    return rows
    