from bs4 import BeautifulSoup
import re
import ast
import requests


def main(args):
    pass



def get_element_1(html_content, wanna_type):

    # 使用Beautiful Soup解析HTML
    soup = BeautifulSoup(html_content, 'html.parser')

    # 查找指定<meta>标签
    meta_tag = soup.find('meta', {'name': wanna_type})

    if meta_tag:
        # 提取content属性的值
        og_url_content = meta_tag.get('content')
        print(wanna_type + " content:", og_url_content)
        return og_url_content
    else:
        print("未找到指定的<meta>标签")
        return ""



def get_last_update_time(html_content):

    # 使用正则表达式提取lastUpdateTime的值
    pattern = re.compile(r'"lastUpdateTime":\s*(\d+)')
    match = pattern.search(html_content)
    print(match)
    if match:
        last_update_time_value = match.group(1)
        print("lastUpdateTime value:", last_update_time_value)
        return last_update_time_value
    else:
        print("未找到lastUpdateTime的值")
        return ""

    #pattern = re.compile(r'"lastUpdateTime"')
    #match = pattern.search(html_content)

    #if match:
    #    print("网页源码中包含lastUpdateTime")
    #    return last_update_time_value
   # else:
   #     print("网页源码中未找到lastUpdateTime")
    #    return ""





def get_ip_location(html_content):

    pattern = r'"ipLocation":"(.*?)"'
    match = re.search(pattern, html_content)

    if match:
        ip_location = match.group(1)
        print(ip_location)
        return ip_location
    else:
        print("未找到匹配的字符串")
        return ""





def get_comment_num(html_content):

    # 使用Beautiful Soup解析HTML
    soup = BeautifulSoup(html_content, 'html.parser')

    # 查找包含特定class和data属性的<span>标签
    chat_wrapper_tag = soup.find('span', {'class': 'chat-wrapper', 'data-v-3eba1daa': True})

    if chat_wrapper_tag:
        # 在<span class="chat-wrapper">标签中查找包含特定class和data属性的<span>标签
        count_tag = chat_wrapper_tag.find('span', {'class': 'count', 'data-v-3eba1daa': True})

        if count_tag:
            # 提取内部<span>标签的文本内容
            count_value = count_tag.text
            print("count value:", count_value)
            return count_value
        else:
            print("未在<span class='chat-wrapper'>中找到符合条件的<span class='count'>标签")
            return ""
    else:
        print("未找到符合条件的<span class='chat-wrapper'>标签")
        return ""



def get_collect_num(html_content):

    # 使用Beautiful Soup解析HTML
    soup = BeautifulSoup(html_content, 'html.parser')

    # 查找包含特定class和data属性的<span>标签
    collect_wrapper_tag = soup.find('span', {'class': 'collect-wrapper', 'data-v-a0c2bbd8': True})

    if collect_wrapper_tag:
        # 在<span class="collect-wrapper">标签中查找包含特定class和data属性的<span>标签
        count_tag = collect_wrapper_tag.find('span', {'class': 'count', 'data-v-a0c2bbd8': True})

        if count_tag:
            # 提取内部<span>标签的文本内容
            count_value = count_tag.text
            print("count value:", count_value)
            return count_value
        else:
            print("未在<span class='collect-wrapper'>中找到符合条件的<span class='count'>标签")
            return ""
    else:
        print("未找到符合条件的<span class='collect-wrapper'>标签")
        return ""


def get_like_num(html_content):

    # 使用Beautiful Soup解析HTML
    soup = BeautifulSoup(html_content, 'html.parser')

    # 查找包含特定class和data属性的<span>标签
    like_lottie_tag = soup.find('span', {'class': 'like-lottie', 'data-v-6138c4f4': True})

    if like_lottie_tag:
        # 在<span class="like-lottie">标签中查找包含特定class和data属性的<span>标签
        count_tag = like_lottie_tag.find('span', {'class': 'count', 'data-v-6138c4f4': True})

        if count_tag:
            # 提取内部<span>标签的文本内容
            count_value = count_tag.text
            print("count value:", count_value)
            return count_value
        else:
            print("未在<span class='like-lottie'>中找到符合条件的<span class='count'>标签")
            return ""
    else:
        print("未找到符合条件的<span class='like-lottie'>标签")
        return ""



def get_note_author_info(html_content):

    # 使用Beautiful Soup解析网页
    soup = BeautifulSoup(html_content, 'html.parser')

    # 找到包含作者信息的<div>元素
    author_container = soup.find('div', class_='author-container')

    # 提取作者名字和链接
    if author_container:
        author_name = author_container.find('span', class_='username').text
        author_link = author_container.find('a', class_='name')['href']

        print("作者名字:", author_name)
        print("作者链接:", author_link)
        return author_name, author_link
    else:
        print("未找到作者信息")
        return "", ""




def get_pic_url_list(html):

    # 正则表达式模式，匹配包含traceId的infoList内容
    pattern = r'"traceId":"(.*?)","infoList":\[(.*?)\]'
    #pattern = r'""traceId":"(.*?)"'

    # 使用正则表达式进行匹配
    matches = re.findall(pattern, html)
    traceId_list = []
    # 打印匹配结果
    for match in matches:
        trace_id = match[0]
        info_list = match[1]
        print(f"Trace ID: {trace_id}")
        print(f"infoList: {info_list}")
        # 正则表达式模式，匹配所有url

        pattern = r'"url":"(.*?)"'

        # 使用findall方法查找所有匹配项
        urls = re.findall(pattern, info_list)
        print('\n')
        print(urls)
        print('\n')
        # 打印所有的url
        for url in urls:
            print(f"URL: {url}")
            #text = "http:\\u002F\\u002Fsns-webpic-qc.xhscdn.com\\u002F202311241815\\u002F075537ea299d76f32df2edf2c9add01b\\u002F1040g2sg30rq4h6p42e605p7494eg8ggot385an8!nd_whgt34_webp_prv_1"
            text = url
            text = text.replace('\\u002F', '\\')

            # 使用正则表达式提取所需部分
            match = re.search(r'\\([^\\]+)!', text)

            if match:
                extracted_content = match.group(1)
                print(extracted_content)
            else:
                print("未找到匹配的内容")
                extracted_content = ""

            traceId_list.append(extracted_content)
    print(urls)
    print(traceId_list)
    print('\n')
    print('当前图片个数为:')
    print(len(set(traceId_list)))
    print('\n')
    return set(traceId_list)




# 解析视频链接
def get_video_url(html, file_path, file_name):
    # The provided source code snippet
    source_code = html

    pattern = r'"consumer":\s*{\s*"originVideoKey":\s*"([^"]+)"'
    # Using regular expression to find the 'originVideoKey'
    match = re.search(pattern, source_code)

    # Extracting the 'originVideoKey' if it's found
    origin_video_key = match.group(1) if match else None

    video_url = "http://sns-video-bd.xhscdn.com/" + origin_video_key
    video_url = video_url.encode().decode('unicode-escape')
    print("video_url: ", video_url)
    r = requests.get(video_url, stream=True) 

    with open(file_path + '/' + 'video' + '/' + file_name +".mp4", "wb") as f:
        for chunk in r.iter_content(chunk_size=512):
            f.write(chunk)
    return origin_video_key



def get_three_data(source_code):
    # 步骤 2: 定义源码字符串
    # source_code = 'liked":false,"likedCount":"13602","collected":false,"collectedCount":"16736","commentCount":"71","shareCount":"118","followed":false,"relation":"none"}'

    # 步骤 3: 使用正则表达式搜索和提取数字
    liked_count_match = re.search(r'"likedCount":"(\d+)"', source_code)
    collected_count_match = re.search(r'"collectedCount":"(\d+)"', source_code)
    comment_count_match = re.search(r'"commentCount":"(\d+)"', source_code)
    share_count_match = re.search(r'"shareCount":"(\d+)"', source_code)

    # 提取数字，如果没有匹配到则为 None
    liked_count = liked_count_match.group(1) if liked_count_match else None
    collected_count = collected_count_match.group(1) if collected_count_match else None
    comment_count = comment_count_match.group(1) if comment_count_match else None
    share_count = share_count_match.group(1) if share_count_match else None
    print(liked_count)
    print(collected_count)
    print(comment_count)
    print(share_count)
    # 步骤 4: 输出结果
    return [liked_count, collected_count, comment_count, share_count]



def extract_image_ids(html_string):
    """
    从HTML字符串中只提取图片ID
    """
    # 使用正则表达式匹配所有og:image的内容
    pattern = r'<meta name="og:image" content="(http://sns-webpic-qc\.xhscdn\.com/[^"]+)"'
    urls = re.findall(pattern, html_string)
    
    # 提取图片ID
    image_ids = []
    for url in urls:
        match = re.search(r'/([^/]+)!', url)
        if match:
            image_id = match.group(1)
            image_ids.append(image_id)
    
    return image_ids