import json
import time
import requests
import re
from lxml import html
import random
# from setting import *
# from .tools_utils import *
def fet_article_content(url, group_name):
    sleep_time = random.uniform(5, 10)
    print(f"等待 {sleep_time:.2f} 秒...")
    time.sleep(sleep_time)
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }

        response = requests.get(url, headers=headers, timeout=15)

        response.encoding = 'utf-8'
        return response.text if response.status_code == 200 else ""
    except Exception as e:
        print(f"请求错误: {e}")
        return ""

def clean_text(text):
    if not text:
        return ""
    text = re.sub(r'\s+', ' ', text)
    text = text.strip()
    text = re.sub(r'[\x00-\x1f\x7f-\x9f]', '', text)
    return text

def extract_urls_from_text(text):
    """从文本中提取URL"""
    url_pattern = r'https?://[^\s]+'
    urls = re.findall(url_pattern, text)
    return urls

def parse_single_article(html_content, url, group_name, item_id=None):
    """解析单个文章的内容"""
    if not html_content:
        return None
        
    tree = html.fromstring(html_content)
    
    try:
        avatar = 'https://img3.doubanio.com/icon/up120152310-13.jpg'
        
        username_patterns = [
            '//span[@class="author"]/a/text()',
            '//a[@class="author"]/text()',
            '//div[@class="author"]/a/text()'
        ]
        
        userName = "未知用户"
        for pattern in username_patterns:
            elements = tree.xpath(pattern)
            if elements:
                userName = clean_text(elements[0])
                break
        
        publicTime = "未知时间"
        time_elements = tree.xpath('//time[@class="time"]/@title')
        if time_elements:
            publicTime = clean_text(time_elements[0])

        title = "未知标题"
        title_elements = tree.xpath("//article//h1[@class='art-title']/text()")
        if title_elements:
            title = clean_text(title_elements[0])

        # 获取文章内容容器
        content_nodes = tree.xpath('//div[@class="art-content"]//div[@class="article-content"]')
        if content_nodes:
            structured_contents = parse_article_content(content_nodes[0], group_name)
        else:
            structured_contents = []

        article_data = {
            'id': item_id,
            'title': title,
            'userName': userName,
            'publicTime': publicTime,
            'avatar': avatar,
            'contents': structured_contents,
            'url': url,
            'group_name': group_name,
        }
        
        print(f"抓取链接文章: {title}")
        print(f"用户: {userName}")
        print(f"时间: {publicTime}")
        print(f"内容块数: {len(structured_contents)}")
        
        return article_data
        
    except Exception as e:
        print(f"解析链接文章时出错: {e}")
        return None

def parse_article_content(content_node, group_name):
    """解析文章内容节点，返回结构化数据"""
    structured_content = []
    
    # 遍历所有子节点
    for element in content_node.xpath('./*|./text()'):
        if isinstance(element, str):  # 文本节点
            text = clean_text(element)
            if text:
                # 检查文本中是否包含URL
                urls = extract_urls_from_text(text)
                if urls:
                    # 如果有URL，将文本分割并分别处理
                    parts = re.split(r'(https?://[^\s]+)', text)
                    for part in parts:
                        if part:
                            if part.startswith('http'):
                                # 这是URL，创建链接
                                structured_content.append({
                                    'type': 'link', 
                                    'content': part, 
                                    'url': part
                                })
                            else:
                                # 这是普通文本
                                structured_content.append({
                                    'type': 'text', 
                                    'content': part
                                })
                else:
                    # 没有URL，直接添加文本
                    structured_content.append({
                        'type': 'text', 
                        'content': text
                    })
        else:  # 元素节点
            tag = element.tag.lower()
            
            if tag == 'img':  # 图片
                src = element.get('src', '')
                if src:
                    if src.startswith('//'):
                        src = 'https:' + src
                    elif src.startswith('/'):
                        src = 'https://news.xianbao.fun' + src
                    structured_content.append({
                        'type': 'image', 
                        'url': src
                    })
            
            elif tag == 'a':  # 链接
                href = element.get('href', '')
                link_text = clean_text(element.text_content())
                if href:
                    if href.startswith('//'):
                        href = 'https:' + href
                    elif href.startswith('/'):
                        href = 'https://news.xianbao.fun' + href
                    
                    # 检查是否是豆瓣买组文章的链接
                    if 'douban-maizu' in href and href.endswith('.html'):
                        print(f"发现文章链接: {link_text} -> {href}")
                        # 抓取链接文章的内容
                        linked_article_html = fet_article_content(href, group_name)
                        if linked_article_html:
                            # 从URL中提取文章ID
                            article_id_match = re.search(r'/(\d+)\.html', href)
                            article_id = int(article_id_match.group(1)) if article_id_match else None
                            
                            linked_article = parse_single_article(linked_article_html, href, group_name, article_id)
                            if linked_article:
                                structured_content.append({
                                    'type': 'linked_article',
                                    'content': linked_article
                                })
                            else:
                                # 如果抓取失败，保留原始链接
                                structured_content.append({
                                    'type': 'link', 
                                    'content': link_text, 
                                    'url': href
                                })
                        else:
                            # 如果抓取失败，保留原始链接
                            structured_content.append({
                                'type': 'link', 
                                'content': link_text, 
                                'url': href
                            })
                    else:
                        # 不是文章链接，保持原样
                        structured_content.append({
                            'type': 'link', 
                            'content': link_text, 
                            'url': href
                        })
            
            else:  # 其他标签，递归处理子节点
                child_elements = element.xpath('./*|./text()')
                for child in child_elements:
                    if isinstance(child, str):  # 文本节点
                        text = clean_text(child)
                        if text:
                            # 检查文本中是否包含URL
                            urls = extract_urls_from_text(text)
                            if urls:
                                # 如果有URL，将文本分割并分别处理
                                parts = re.split(r'(https?://[^\s]+)', text)
                                for part in parts:
                                    if part:
                                        if part.startswith('http'):
                                            # 这是URL，创建链接
                                            structured_content.append({
                                                'type': 'link', 
                                                'content': part, 
                                                'url': part
                                            })
                                        else:
                                            # 这是普通文本
                                            structured_content.append({
                                                'type': 'text', 
                                                'content': part
                                            })
                            else:
                                # 没有URL，直接添加文本
                                structured_content.append({
                                    'type': 'text', 
                                    'content': text
                                })
                    else:  # 元素节点
                        child_tag = child.tag.lower()
                        if child_tag == 'img':
                            src = child.get('src', '')
                            if src:
                                if src.startswith('//'):
                                    src = 'https:' + src
                                elif src.startswith('/'):
                                    src = 'https://news.xianbao.fun' + src
                                structured_content.append({
                                    'type': 'image', 
                                    'url': src
                                })
                        elif child_tag == 'a':
                            href = child.get('href', '')
                            link_text = clean_text(child.text_content())
                            if href:
                                if href.startswith('//'):
                                    href = 'https:' + href
                                elif href.startswith('/'):
                                    href = 'https://news.xianbao.fun' + href
                                
                                # 检查是否是豆瓣买组文章的链接
                                if 'douban-maizu' in href and href.endswith('.html'):
                                    print(f"发现文章链接: {link_text} -> {href}")
                                    # 抓取链接文章的内容
                                    linked_article_html = fet_article_content(href, group_name)
                                    if linked_article_html:
                                        # 从URL中提取文章ID
                                        article_id_match = re.search(r'/(\d+)\.html', href)
                                        article_id = int(article_id_match.group(1)) if article_id_match else None
                                        
                                        linked_article = parse_single_article(linked_article_html, href, group_name, article_id)
                                        if linked_article:
                                            structured_content.append({
                                                'type': 'linked_article',
                                                'content': linked_article
                                            })
                                        else:
                                            # 如果抓取失败，保留原始链接
                                            structured_content.append({
                                                'type': 'link', 
                                                'content': link_text, 
                                                'url': href
                                            })
                                    else:
                                        # 如果抓取失败，保留原始链接
                                        structured_content.append({
                                            'type': 'link', 
                                            'content': link_text, 
                                            'url': href
                                        })
                                else:
                                    # 不是文章链接，保持原样
                                    structured_content.append({
                                        'type': 'link', 
                                        'content': link_text, 
                                        'url': href
                                    })
                        else:
                            # 对于其他标签，提取其文本内容
                            text = clean_text(child.text_content())
                            if text:
                                # 检查文本中是否包含URL
                                urls = extract_urls_from_text(text)
                                if urls:
                                    # 如果有URL，将文本分割并分别处理
                                    parts = re.split(r'(https?://[^\s]+)', text)
                                    for part in parts:
                                        if part:
                                            if part.startswith('http'):
                                                # 这是URL，创建链接
                                                structured_content.append({
                                                    'type': 'link', 
                                                    'content': part, 
                                                    'url': part
                                                })
                                            else:
                                                # 这是普通文本
                                                structured_content.append({
                                                    'type': 'text', 
                                                    'content': part
                                                })
                                else:
                                    # 没有URL，直接添加文本
                                    structured_content.append({
                                        'type': 'text', 
                                        'content': text
                                    })
    
    return structured_content

def fet_article_details(urls, group_name):
    results = []
    
    for item_id, url in urls.items():
        try:
            print(f"处理文章 {item_id}: {url}")
            html_content = fet_article_content(url, group_name)
            if not html_content:
                print(f"获取文章 {item_id} 内容失败")
                continue

            article_data = parse_single_article(html_content, url, group_name, item_id)
            if article_data:
                results.append(article_data)
                
            time.sleep(1)
            
        except Exception as e:
            print(f"处理文章 {item_id} 时发生错误: {e}")
            continue
            
    return results

# 推送到远端数据库（原样保留）
def toDatabase(data):
    # print('推送数据开始')
    # print(data)
    # userId=data['id']
    # write_log(data,userId)
    # print('推送数据结束')  
    temp_str_data = json.dumps(data, ensure_ascii=False)

    # # 进行文本替换
    # temp_str_data = temp_str_data.replace("淘宝联盟", "星返")
    # temp_str_data = temp_str_data.replace("京粉", "星返")
    # temp_str_data = temp_str_data.replace("一淘", "星返")

    data = json.loads(temp_str_data)

    REMOTE_DB_PUSH_URL =[
        "https://xf-162-test.lottefuture.com/api/doubanpushnew",
        #"https://xf-162.lottefuture.com/api/doubanpushnew",

    ]
    urls = REMOTE_DB_PUSH_URL
          
    for url in urls:
        req = requests.post(url, json=data, verify=False)

    if "success" in req.content.decode() or "接收成功" in req.content.decode():
        print(f"推送成功:  URL = {data['url']} 完成采集,{str(data)[:300]}...")
        #logger.info(f" URL = {data['article_url']} 完成采集,{str(data)[:300]}...")
        return True
    else:
        print(f"推送请求失败: ", data)
        #logger.error(f" 推送请求失败: {data['article_url']} ")

        return False

    

if __name__ == "__main__":
    urls = {
        #5335565: 'https://news.xianbao.fun/douban-maizu/5335565.html', 
        '5335514':'https://news.xianbao.fun/douban-maizu/5335514.html'
    }
    results = fet_article_details(urls, '买组')
    
    print(f"\n处理完成，共获取 {len(results)} 篇文章")
    for result in results:
        results=json.dumps(result, ensure_ascii=False, indent=2)
        toDatabase(result)
        print(results)
