import requests
from bs4 import BeautifulSoup
import chardet

# Step 1: 获取文章链接

headers = {
    'Authority': 'basic.10jqka.com.cn',
    'Referer': 'https://basic.10jqka.com.cn/002517/index.html',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
}
def get_article_links():
    url = 'https://www.oschina.net/'
    response = requests.get(url)
    soup = BeautifulSoup(response.text, 'html.parser')

    article_links = [1]

    for link in soup.find_all('a'):
        href = link.get('href')
        if href and 'article' in href:
            article_links.append(href)

    return article_links

# Step 2: 获取文章内容和标题
def get_article_content(article_links):
    ##articles = []

    for link in article_links:
        response = requests.get(link,headers=headers)
        text =response.content
        # print(text)
        encoding = chardet.detect(text)['encoding']
        print(encoding)
        soup = BeautifulSoup(text, 'html.parser',from_encoding=encoding)

        content = soup.find('div', class_='viewport')
        listtr = content.find_all("tr")
        for tr in listtr:
            #print(tr.find_all('td')[0].text+"->"+tr.find('a', class_='newtaid skipto').text.strip())
            date = tr.find_all('td')[0].text
            anode = tr.find('span').find("a")
            trcontent = anode.text.strip() if anode else ''
            print(date + "->" + trcontent)



        ##articles.append((title, content))

    return content

# Step 3: 写入文章内容到指定文件
def write_articles_to_files(articles):
    for title, content in articles:
        with open(f'F:/tmp/1/2/{title}.txt', 'w', encoding='utf-8') as file:
            file.write(title)
            file.write('\n')
            file.write(content)


if __name__ == '__main__':
    #article_links = get_article_links()
    dd = {'https://basic.10jqka.com.cn/002517/index.html'}
    articles = get_article_content(dd)
    # write_articles_to_files(articles)
    #print(articles)