#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
from docx import Document #先引入python-docx
from datetime import datetime
import io
from PIL import Image
import urllib.request
import os
def fetch_page(url):
    """获取网页内容"""
    headers = {'User-Agent': 'Mozilla/5.0 (compatible; YourBot/0.1)'}
    try:
        response = requests.get(url, headers=headers, timeout=10)
        response.raise_for_status()
        return BeautifulSoup(response.content, "html.parser")
    except requests.exceptions.RequestException as e:
        print(f"Error fetching {url}: {e}")
        return None

# def parse_article(soup):
#     """解析文章内容"""
#     str_content = all_content(soup)
#     article_data = {
#         'title': soup.find('h1').text.strip() if soup.find('h1') else "No Title",
#         'content': "\n".join(str_content) #[p.text.strip() for p in soup.find_all('p')]
#     }
#     return article_data

def all_content(soup,path,video_src):
    doc = Document()
    sequence = []
    article_data = {
        'title': soup.find('h1').text.strip() if soup.find('h1') else "No Title",
        #'content': "\n".join(str_content) #[p.text.strip() for p in soup.find_all('p')]
    }
    if video_src == " ":
        None
    else:
        video_url = video_src
        print("视频链接" + video_url)
        urllib.request.urlretrieve(video_url,path + '\\' + article_data['title'] + '.mp4')

    #按照p标签顺序，处理图片或文字 
    doc.add_heading(article_data['title'], level=1)
    for p in soup.find_all('p'):
        #判断每一个p是不是含img标签
        if p.find('img') != None: #说明该p中有图片
            try:
                tag = p.find('img')
                img_name = tag['src']
                prefix1 = img_name[0:8]
                print(prefix1)
                prefix2 = img_name.split("_")[0][8:]
                print(prefix2)
                img_url = r'https://www.news.cn/politics/leaders/' + prefix1 + r"/" + prefix2 + r'/'+ img_name
                print(img_url)
                img_response = requests.get(img_url)
                img_bytes = io.BytesIO(img_response.content)
                img = Image.open(img_bytes)
                img.save(f'temp_{img_url.split("/")[-1]}')  # 临时保存图片到文件系统，以便添加到Word文档中
                doc.add_picture(f'temp_{img_url.split("/")[-1]}')  # 添加图片到文档中，可以根据需要调整大小
            except:
                print(article_data['title'] + "图片获取失败")
                continue
        else:
            # 处理文本段落和标题
            text = p.text.strip()
            if text:
                doc.add_paragraph(text)
    
    # 保存文档
    name = str(article_data['title']).replace("|"," - ")
    print(name)
    word_filename = f'{name}.docx'
    output_path = path + '\\'+ word_filename
    doc.save(output_path)
    print(f"Article saved to {output_path}")
    # 清理临时图片文件（可选）
    import os
    for file in os.listdir('.'):
        if file.startswith('temp_'):
            os.remove(file)
        else:
            sequence.append(p.text.strip())


        

# def save_to_word(article_data, filename):
#     """将文章数据保存为Word文档"""
#     doc = Document()
#     doc.add_heading(article_data['title'], level=1)
#     doc.add_paragraph(article_data['content'])
#     doc.save(filename)
#     print(f"Article saved to {filename}")

# def save_to_pdf(article_data, filename):
#     """将文章数据保存为PDF文档"""
#     html_content = f"<h1>{article_data['title']}</h1><p>{article_data['content']}</p>"
#     pdfkit.from_string(html_content, filename)
#     print(f"Article saved to {filename}")

def main(base_url,path,video_src):

    if not os.path.exists(path): #判断所在目录下是否有该文件名的文件夹
        os.mkdirs(path)
    soup = fetch_page(base_url)
    if soup:
        all_content(soup,path,video_src)
        #timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")

        #pdf_filename = f'article_{timestamp}.pdf'
        #save_to_word(article_data,word_filename)
        #save_to_pdf(article_data, pdf_filename)

# if __name__ == "__main__":
#     main()
