from InfoCrawler import WeiboInfoCrawler
import pandas as pd
import time

headers = {
    'authority': 'weibo.com',
    'x-requested-with': 'XMLHttpRequest',
    'sec-ch-ua-mobile': '?0',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'content-type': 'application/x-www-form-urlencoded',
    'accept': '*/*',
    'sec-fetch-site': 'same-origin',
    'sec-fetch-mode': 'cors',
    'sec-fetch-dest': 'empty',
    'referer': 'https://weibo.com/1192329374/KnnG78Yf3?filter=hot&root_comment_id=0&type=comment',
    'accept-language': 'zh-CN,zh;q=0.9,en-CN;q=0.8,en;q=0.7,es-MX;q=0.6,es;q=0.5',
    'cookie': 'SCF=Ang1PsM9AoPOH6r2JMfsf_cG-DF5ce8cSUt-u4AhK1i5wQRp7tw8QyalBIidf_baiVUx4v-heMf80G-h4cl7hGc.; '
              'UOR=www.baidu.com,weibo.com,www.baidu.com; SINAGLOBAL=6060355668604.674.1709552585531; '
              'XSRF-TOKEN=uCLI3Tk_ivtQ_fDeY4XJO4vn; PC_TOKEN=ad3dca5ec7; '
              'login_sid_t=09f7d91860d66dcdcab3ad828aab6233; cross_origin_proto=SSL; WBStorage=267ec170|undefined; '
              '_s_tentry=-; Apache=83995416395.35046.1715139103403; '
              'ULV=1715139103409:5:1:1:83995416395.35046.1715139103403:1711421204088; '
              'WBtopGlobal_register_version=2024050811; ALF=1717731171; '
              'SUB=_2A25LPoIzDeRhGeNI7FEQ-CbIyD2IHXVoNZv7rDV8PUJbkNB-LWeskW1NSCMrRmzODeSGO6pgDIRp-UKrfx8u5aaC; '
              'SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWSJ_7WUJKD_-HWMMWH1YY_5JpX5KzhUgL.Fo'
              '-cS0ep1hnXe022dJLoIEBLxKqL1KnL1-qLxK-L1h-L1h.LxK-LB.-LB--LxKqLB-BLBK-t; '
              'WBPSESS=DZfzo8PBycpQXciQLyzIdlhjpu3GSrUjHdDzGdeTxn6_FTe0MIZZN5H_tmED0x6_DyEheg9EuSsRBmW52A16ei1fd8lcFQDc1p3LI_DsxBzG-Q5fdf7BZOJO_t5Q4OW-UqMdVyPGgnJ-Rph9oe0shA=='
}
path = r'D:\CodeRepo\python\weibo-search\结果文件\董宇辉\董宇辉.csv'

start_time = time.time()
crawler = WeiboInfoCrawler(headers=headers, path=path)
print('初始化成功...')
uids = crawler.get_uid()
print('获取uid成功...')
proxies = crawler.getValidProxies(page_range=3)
print('获取代理ip成功...')
print('length of proxies:', len(proxies))
print(proxies)
ip_location_dict,gender_dict = crawler.getIPlocationGenderDict(uids, proxies)
print('获取ip地理位置和性别成功...')
print('开始写入文件...')
# df = pd.read_csv(path, encoding='utf-8')
df = crawler.raw_data
df['IP_location'] = df['user_id'].map(ip_location_dict)
df['gender'] = df['user_id'].map(gender_dict)
print('IP空值数量：',df['IP_location'].isnull().sum())
df.to_csv(r'D:\CodeRepo\python\weibo-search\结果文件\董宇辉\董宇辉1.csv', index=False, encoding='utf-8')
print('写入文件成功...')
end_time = time.time()
print('耗时：', end_time - start_time)