import random
import re
import time

import pandas as pd
from lxml import etree
import requests
from requests.adapters import HTTPAdapter
import urllib3

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

# 发送请求获取代理列表
response = requests.get(
    "http://api.xiequ.cn/VAD/GetIp.aspx?act=get&uid=146487&vkey=FF9B14700D36FA974A3F5C301DFB4DD9&num=200&time=30&plat=0&re=0&type=0&so=1&ow=1&spl=1&addr=&db=1")
proxy_data = response.json()

# 检查响应是否成功
if proxy_data["code"] == 0:
    # 从 JSON 数据中提取代理列表
    proxy_list = proxy_data['data']
    print(proxy_list)
    # 将代理列表转换为可以被 `requests` 使用的格式
    proxies = [{"HTTPS": f"HTTP://{proxy['IP']}:{proxy['Port']}"} for proxy in proxy_list] + \
              [{"HTTP": f"HTTP://{proxy['IP']}:{proxy['Port']}"} for proxy in proxy_list]
    print(proxies)


    def getData(url, headers, proxy):
        sess = requests.Session()
        sess.mount('HTTP://', HTTPAdapter(max_retries=3))
        sess.mount('HTTPS://', HTTPAdapter(max_retries=3))
        sess.keep_alive = False  # 关闭多余连接

        try:
            rs = sess.get(url, headers=headers, timeout=(5, 5), stream=True, proxies=proxy, verify=False)
            rs.raise_for_status()
            html = etree.HTML(rs.text)
            title = html.xpath('string(//div[@class="video-info-title-inner"]/h1)').strip('\n') or 'N/A'
            must_see = html.xpath('string(//div[@class="honor-text"])').strip('\n') or 'N/A'
            view_counts = html.xpath('string(//div[@class="view-text"])') or 'N/A'
            danmu = html.xpath('string(//div[@class="dm-text"])') or 'N/A'
            video_time = html.xpath('string(//div[@class="pubdate-ip-text"])') or 'N/A'
            like = html.xpath('string(//div[@class="video-like video-toolbar-left-item"]/span)') or 'N/A'
            insert_coins = html.xpath('string(//div[@class="video-coin video-toolbar-left-item"]/span)') or 'N/A'
            collect = html.xpath('string(//div[@class="video-fav video-toolbar-left-item"]/span)') or 'N/A'
            transmit = html.xpath('string(//span[@class="video-share-info video-toolbar-item-text"])').strip(
                '\n') or 'N/A'
            intro = html.xpath('string(//span[@class="desc-info-text"])').strip().strip('\n') or 'N/A'
            video_type = str(html.xpath('//a[@class="tag-link"]/text()')).replace('\\n', '').replace("'", "").replace(
                '[', '').replace(']', '').replace(' ', '').replace(',', '/') or 'N/A'

            comment = re.search(r'"reply":(\d+),', rs.text).group(1) or 'N/A'
            name = html.xpath('string(//a[@class="up-name vip"])').strip().strip('\n')
            if not name:
                name = html.xpath('string(//a[@class="up-name"])').strip().strip('\n') or 'N/A'

            attention = html.xpath(
                'string(//div[@class="default-btn follow-btn b-gz not-follow"]/span)').strip() or 'N/A'

            return {
                "title": title, "must_see": must_see, "view_counts": view_counts, "danmu": danmu,
                "video_time": video_time, "like": like, "insert_coins": insert_coins, "collect": collect,
                "transmit": transmit, "intro": intro, "video_type": video_type, "comment": comment,
                "name": name, "attention": attention
            }

        except Exception as e:
            print(f"Error fetching data from {url}: {e}")
            return None


    if __name__ == '__main__':
        USER_AGENTS = [
            "Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
            "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre",
            "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
            "Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10",
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36 Edg/129.0.0.0",
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0',
        ]
        headers = {'User-Agent': random.choice(USER_AGENTS)}

        df = pd.read_csv('video_url1.csv')
        url_list = df['videoUrl'].tolist()

        data_list = []

        for i in range(len(url_list)):
            proxy = proxies[i % len(proxies)]  # 循环使用代理
            data = getData(url_list[i], headers, proxy)
            if data:
                data_list.append(data)
                print(f'爬取第{i}个视频完成!')
                time.sleep(0.1)

        # 将数据存储到 Excel 文件中
        df = pd.DataFrame(data_list)
        df.to_excel('data/video_data1.xlsx')
else:
    print("未能成功获取代理列表。请检查 API 调用是否正确。")
