import bs4
import requests
from urllib.parse import urlencode
from bs4 import BeautifulSoup
import time
import os
import random

base_website = 'https://www.tgb.cn/'

base_url = 'https://www.tgb.cn/user/blog/moreReplyMod?'



params = {
    'time': '2025-07-06',
    'userID' : '6530171',
    'pageNo': None
}

headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
    'Cookie':'gdp_user_id=gioenc-3157gge1%2Cd1ea%2C5986%2Ccg74%2Caa40dccdg564; Hm_lvt_cc6a63a887a7d811c92b7cc41c441837=1744355905,1744358863,1744473374,1744551457; loginStatus=phone; tgbuser=11475917; tgbpwd=6442e6a9cc449a8b20a4c1f4e428105e80a6f56e7cc9db91c5d6b06b6eab42c08yomea7qikm3u16; 893eedf422617c96_gdp_gio_id=gioenc-00564806; 893eedf422617c96_gdp_cs1=gioenc-00564806; creatorStatus11475917=true; Actionshow2=true; acw_tc=0a45646a17517616196832158efeaa8440c21dc304606a718a50ebe1a12668; JSESSIONID=ZTZmMTAxMTUtNDhhOC00MDYyLWI2NmMtNzZmZDE3ODk2N2Jm; 893eedf422617c96_gdp_session_id=afd40d62-f068-453a-adb8-df58be5174e9; wsStatus=true; showStatus11475917=true; 893eedf422617c96_gdp_sequence_ids=%7B%22globalKey%22%3A3969%2C%22VISIT%22%3A255%2C%22PAGE%22%3A2428%2C%22CUSTOM%22%3A1288%7D; 893eedf422617c96_gdp_session_id_afd40d62-f068-453a-adb8-df58be5174e9=true; tfstk=geK-3qgGPjclYs-RnLu0thfHClMmSqvrDQJ_x6fuRIdvCdPlR6xlJEdBi_alZY7BMBdfZQjWEBQBseYltB4k0BdNVw8lt_fdvpRtnfmijLJPYwGisc0N3mtdY6ihAqjfOrjvxfmijLg5UMvjsHYl7QCcdM65O9_jH9XQNWOCFtMAd9V7RuOIHxBhKWa7P6_bhOX1A6OCAxIfg9sCFBsINEMRg06avF0CPI3QYsZQAnBRlqv5Gk62DTQR1Lt8YkJNeaC6FsGaz0E1lp7pYJrdJLTwTOO7NfbydEdAeMn0PG9vRCB2VcVR3UvXIs9TLkJO29Q1AZeQAKIPaNtcfcNA3E9kR3R8pk6yqhbd_ZHQYwjXjNT6wJkwhg6BTN-n_u5Wdd-wSGn0PG9vRCLd4FxMX4Yzs1B3PxHY8y753mg_fjvowVT1H1D5PyzFWnWAsxM_8y7mbtCinQUU8ZjC.'
}

class follow_post():
    def __init__(self, author=None,
                 response_time = None,
                 text_reply = None,
                 imgs=None,
                 quote_name=None,
                 quote_content=None):
        self.author = author
        self.response_time = response_time
        self.text_reply = text_reply
        self.imgs = imgs
        self.quote_name = quote_name
        self.quote_content = quote_content

    def set_author(self, author):
        self.author = author

    def set_response_time(self, response_time):
        self.response_time = response_time

    def set_text_reply(self, text_reply):
        self.text_reply = text_reply

    def set_imgs(self, imgs):
        self.imgs = imgs

    def set_quote_name(self, quote_name):
        self.quote_name = quote_name

    def set_quote_content(self, quote_content):
        self.quote_content = quote_content

    def print(self):
        print(self.author + ':' + self.response_time)
        print(self.text_reply)
        if self.imgs is not None:
            print(self.imgs)
        if self.quote_name is not None:
            print(self.quote_name+':')
        if self.quote_content is not None:
            print(self.quote_content)


def parse_page(page_no, temp_list):
    params['pageNo'] = page_no
    url = base_url +  urlencode(params).replace('%3D', '=')
    print(url)
    response = requests.get(url, headers=headers)
    print(response)
    if response.status_code != 200:
        return
    soup = BeautifulSoup(response.content.decode('utf-8'), 'lxml')
    replyList = soup.select('#replyTable')[0]
    for blogReply in replyList.select('.blogReply'):
        reply_id = blogReply.select('div.blogReply-top > a')[0].attrs['href']
        href = base_website + reply_id
        reply_resp = requests.get(href, headers=headers) # 单条的回复
        if reply_resp.status_code != 200:
            continue
        post = follow_post()
        reply_content = reply_resp.content.decode('utf-8')
        reply_soup = BeautifulSoup(reply_content, 'lxml')
        id = '#reply' + reply_id.split('#')[-1]
        if len(reply_soup.select(id)) == 0:
            continue
        reply_content = reply_soup.select(id)[0]
        # 回复时间
        resp_time = reply_content.parent.select('div.comment-data-user > div.left > div > span.pcyclspan.left.c999')[0].getText().replace('\n', '')
        quote_person = reply_content.parent.select('div.comment-data-quote > div.data-quote-right.left > div:nth-child(1) > a')
        post.set_author('难得狂跌')
        post.set_response_time(resp_time)
        quote_name = None
        if quote_person is not None and len(quote_person) == 1:
            quote_person = quote_person[0].getText()
            quote_name = quote_person.replace(' ', '').replace('\n', '')

        text_reply = reply_content.getText().replace(' ', '').strip()
        post.set_text_reply(text_reply)
        imgs_reply = []
        for img_item in reply_content.select('img'):
            if 'src2' in img_item.attrs and img_item.attrs['src2'] is not None:
                img_url = img_item.attrs['src2']
            elif 'data-original' in img_item.attrs and img_item.attrs['data-original'] is not None:
                img_url = img_item.attrs['data-original']
            elif 'src' in img_item.attrs and img_item.attrs['src'] is not None:
                img_url = img_item.attrs['src']
            else:
                img_url = 'null'
            imgs_reply.append(img_url)
        if len(imgs_reply) > 0:
            post.set_imgs(imgs_reply)
        quote = []
        if quote_name is not None:
            post.set_quote_name(quote_name)
        quote_content = ''
        for quote_item in reply_content.next_siblings:
            if not isinstance(quote_item, bs4.Tag):
                continue
            paragraphs = quote_item.select('p')
            if len(paragraphs) == 0:
                continue
            for p in paragraphs:
                quote_content = quote_content + p.getText() + '\n'
        if len(quote_content) > 0:
            post.set_quote_content(quote_content)
        temp_list.append(post)
        post.print()
        print('*' * 40)


def write_to_file(out_path, post_list):
    with open(out_path, mode='w', encoding='utf-8') as f:
        lines = []
        for r in post_list:
            line = r.author + ': ' + r.response_time + '\n'
            line = line + r.text_reply + '\n'
            if r.imgs is not None:
                img_tmp = ''
                for img in r.imgs:
                    img_tmp = img_tmp + '![](%s)' % img + '\n'
                line = line + img_tmp
            if r.quote_name is not None:
                temp = '> ' + r.quote_name + ':\n'
                if r.quote_content is not None:
                    temp = temp + r.quote_content + '\n'
                line = line + temp + '***\n'
            lines.append(line)
        f.writelines(lines)

book_index = -1
temp_list = []
for i in range(21, 26):
    save_dir = './data/'
    if os.path.exists(save_dir) is False:
        os.mkdir(save_dir)
    output_path = save_dir + str(i) + '.md'
    temp_list = []
    parse_page(i, temp_list)
    write_to_file(output_path, temp_list)
    time.sleep(random.uniform(1, 5))
    print('处理完 page %d' % i + '---' * 20)

