'''
方案：Web段信息爬取
目标：酱香拿铁跨界融合
实现：代码如下

'''
import requests,re,csv,os

def spider_date():
    data = []
    info_list = []
    # "酱香拿铁跨界融合"内容URL
    connext_url = 'https://m.weibo.cn/api/container/getIndex?containerid=100103type%3D1%26q%3D%E9%85%B1%E9%A6%99%E6%8B%BF%E9%93%81%E8%B7%A8%E7%95%8C%E8%9E%8D%E5%90%88&page_type=searchall'
    respon = requests.get(url=connext_url)
    try:
        info_all = respon.json()['data']['cards']
        for item in info_all:
            if item['card_type'] == 9:
                screen_name = item['mblog']['screen_name'] # 用户名字
                # print("微博账号名称：",screen_name)
                # print(item['actionlog']['ext'])
                id = re.findall(r'(?<=&mid=).*?(?=&)',str(item['actionlog']['ext']))[0] # 关联详情页的请求id
                # 评论URL
                commit_url = f'https://m.weibo.cn/comments/hotflow?id={id}&mid={id}&max_id_type=0'
                try:
                    commit_connect = requests.get(url=commit_url)
                    commit_text = commit_connect.json()['data']['data'] # 评论字典信息
                    # print(commit_text)
                    # print("评论条数：",len(commit_text))
                    for subcommit in commit_text:
                        # print(subcommit)
                        subcommit_text = subcommit['text'] # 每条评论
                        # print("评论内容：",subcommit_text)
                        info_list.append(screen_name)
                        info_list.append(len(commit_text))
                        info_list.append(subcommit_text)
                        data.append(info_list)
                        info_list = []

                except:
                    # print(commit_connect.json()['msg']) # 打印的请求返回信息，没有人评论
                    info_list.append(screen_name)
                    info_list.append(len(commit_text))
                    info_list.append(commit_connect.json()['msg'])
                    data.append(info_list)
                    info_list = []
            else:
                sub_info = item['card_group']
                # print(sub_info)
                sub_screen_name = sub_info['mblog']['user']['screen_name'] # 用户名字
                # print("微博账号名称：",sub_screen_name)
                # id = re.findall(r'(?<=&mid=).*?(?=&)',str(item['actionlog']['ext']))[0] # 关联详情页的请求id
                id = sub_info['mblog']['id']
                # 评论URL
                commit_url = f'https://m.weibo.cn/comments/hotflow?id={id}&mid={id}&max_id_type=0'
                try:
                    commit_connect = requests.get(url=commit_url)
                    commit_text = commit_connect.json()['data']['data'] # 评论字典信息
                    # print(commit_text)
                    # print("评论条数：",len(commit_text))
                    for subcommit in commit_text:
                        # print(subcommit)
                        subcommit_text = subcommit['text'] # 每条评论
                        # print("评论内容：",subcommit_text)
                        info_list.append(sub_screen_name)
                        info_list.append(len(commit_text))
                        info_list.append(subcommit_text)
                        data.append(info_list)
                        info_list = []
                except:
                    # print(commit_connect.json()['msg']) # 打印的请求返回信息，没有人评论
                    info_list.append(sub_screen_name)
                    info_list.append(len(commit_text))
                    info_list.append(commit_connect.json()['msg'])
                    data.append(info_list)
                    info_list = []
    except:
        # print("数据拿取完成")
        pass
    
    return data

if __name__ == '__main__':
    current_dir = os.getcwd()
    filepath = os.path.join(current_dir,'spider_wb.csv')
    with open(filepath,'w', newline='',encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['微博账号名称', '评论内容'])
        # print(spider_date())
        for row in spider_date():
            writer.writerow(row)
    print("数据写入完成！")




