
import requests
from bs4 import BeautifulSoup
import re

class Article:
    def __init__(self, title=None, href=None, content=None, images=None):
        self.title = title
        self.href = href
        self.content = content
        self.images = images

    def __str__(self):
        return self.title + '\t' + self.href

    def get_href(self):
        return self.href

    def get_title(self):
        return self.title

    def get_content(self):
        return self.content

    def get_images(self):
        return self.images


# 主帖目录
post_index = 'https://www.taoguba.com.cn/user/blog/moreTopic?userID=6530171'

base_url = 'https://www.taoguba.com.cn/'

headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
    'Cookie':'gdp_user_id=gioenc-81ac6902%2C3e66%2C51g3%2Ca7e6%2C7be400ae36c6; 893eedf422617c96_gdp_sequence_ids=%7B%22globalKey%22%3A566%2C%22PAGE%22%3A317%2C%22CUSTOM%22%3A214%2C%22VISIT%22%3A37%7D; agree=enter; Actionshow=1693929600000; tgbuser=8539162; tgbpwd=e6b7f699ff03a0af5f7dc32c70e67644f81aa02b8ce0e62d09486f3ec15b2671f4mvt5zx7oaw6x9; 893eedf422617c96_gdp_gio_id=gioenc-9428073; 893eedf422617c96_gdp_cs1=gioenc-9428073; creatorStatus8539162=true; Actionshow2=true; acw_tc=0a5cc92616942167608696562e92ec3cdea25a02f182ce951f30e4b3b8b8f0; JSESSIONID=MTQ5NTdiYzMtNjRlMi00MTZjLTlkNTAtYjE1ZTA5NWRhZmI2; 893eedf422617c96_gdp_session_id=a8bc463a-5b77-4f8d-b3d5-328707638099; Hm_lvt_cc6a63a887a7d811c92b7cc41c441837=1693920531,1694216763; wsStatus=true; showStatus8539162=true; 893eedf422617c96_gdp_sequence_ids=%7B%22globalKey%22%3A939%2C%22PAGE%22%3A563%2C%22CUSTOM%22%3A308%2C%22VISIT%22%3A70%7D; 893eedf422617c96_gdp_session_id_a8bc463a-5b77-4f8d-b3d5-328707638099=true; Hm_lpvt_cc6a63a887a7d811c92b7cc41c441837=1694216791'
}

file = '难得狂跌主帖.md'

def get_articles(url):
    response = requests.get(url=url, headers=headers)
    if response.status_code != 200:
        return
    content = response.content.decode('utf-8')
    soup = BeautifulSoup(content, 'lxml')
    # print(soup.prettify())
    articles = []
    for item in soup.select('td .suh'):
        title = item.select('a')[0].attrs['title']
        href = item.select('a')[0].attrs['href']
        article = Article(title, base_url + href)
        print(article)
        articles.append(article)
    return articles

def parse_article(article):
    response = requests.get(article.get_href(), headers=headers)
    if response.status_code != 200:
        return
    content = response.content.decode('utf-8')
    soup = BeautifulSoup(content, 'lxml')
    # print(soup.prettify())
    article_content = soup.select('#first')[0]
    # print(article_content)
    # reg = 'id="first" style="">(.*?)<div align="center">'
    # text_content = re.search(reg, str(article_content), re.DOTALL).group(1).replace(' ', '').replace('<br/>', '<br/>\n')
    text_content = article_content.getText().replace(' ', '').replace('\n', '').replace('<br/>', '<br/>\n')
    # print(text_content) # 文本内容
    article.content = text_content
    images = []
    for img_item in article_content.select('img'):
        img_url = img_item.attrs['src2']
        images.append(img_url)
    article.images = images
    return article

def write_markdown(articles):
    markdown_content = []
    for article in articles:
        title = '# ' + article.title
        content = article.content
        paragraph = [title, content]
        for img in article.images:
            img_content = '![](%s)' % img
            paragraph.append(img_content)
        paragraph = '\n'.join(paragraph) + '\n'
        markdown_content.append(paragraph)
    with open(file, mode='w', encoding='utf-8') as f:
        f.writelines(markdown_content)


articles = get_articles(post_index)

articles2 = []
for article in articles:
    articles2.append(parse_article(article))
    print('解析完文档： %s' % article.title)
write_markdown(articles2)