import html
import requests,re,os,csv
from bs4 import BeautifulSoup
from translate import Translator

# 翻译函数
def translate_to_english(text):
    # 创建翻译器对象
    translator = Translator(from_lang="ZH",to_lang="EN")
    # 将文本翻译成英文
    translated_text = translator.translate(text)
    # 返回翻译结果
    return translated_text

def spider_info():
    data = []
    info_list = []
    headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Connection': 'keep-alive',
    'Referer': 'https://www.douban.com',
    'Sec-Fetch-Dest': 'document',
    'Sec-Fetch-Mode': 'navigate',
    'Sec-Fetch-Site': 'same-origin',
    'Sec-Fetch-User': '?1',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36',
    'origin': 'https://www.douban.com',
    'sec-ch-ua': '"Google Chrome";v="123", "Not:A-Brand";v="8", "Chromium";v="123"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
}
    url = 'https://m.douban.com/rexxar/api/v2/search?q=%E9%85%B1%E9%A6%99%E6%8B%BF%E9%93%81&type=&loc_id=&start=0&count=10&sort=relevance'
    respon = requests.get(url=url,headers=headers)
    all_info = respon.json()['contents']['items']
    for item in all_info:
        xiaozu = item['target']['owner']
        title = item['target']['title']
        abstract = item['target']['abstract']
        target_id = item['target_id']
        commit_url = f'https://www.douban.com/group/{target_id}'
        respon_commit = requests.get(url=commit_url,headers=headers)
        html_content = respon_commit
        soup = BeautifulSoup(html_content, 'html.parser')
        # commit_contents = soup.find_all('p', class_='reply-content')
        ul_tags = soup.find_all('ul',id=["comments","popular-comments"])
        # 遍历每个 <ul> 标签下的 <li> 标签
        for ul_tag in ul_tags:
            li_tags = ul_tag.find_all('li')
            for li_tag in li_tags:
                auther = img_tag.get('alt')
                time_tag = li_tag.find('span', {'class': 'pubtime'})
                addrestime = time_tag.get_text()
                # 按空格分割字符串
                time_place = addrestime.split(' ')
                committime = time_place[0]
                # print(committime)
                commitaddres = time_place
                # print(commitaddres)
                # print(auther,committime,commitaddres)
                commit_tag = li_tag.find('p')
                commit = commit_tag.get_text()
                # encommit = translate_to_english(commit)
                info_list = [xiaozu,title,auther,committime,commitaddres,commit]
                data.append(info_list)
                info_list = []
    #             break
    return data



if __name__ == '__main__':
    # print(spider_info())
    current_dir = os.getcwd()
    filepath = os.path.join(current_dir,'spider_db.csv')
    with open(filepath,'w', newline='',encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['豆瓣小组名称', '标题', '作者','评论时间','评论地点','评论内容','英文评论'])
        for row in spider_info():
            writer.writerow(row)
    print("数据写入完成！")
    
