import requests
from lxml import etree
import threading
import pandas as pd
import time
from concurrent.futures import ThreadPoolExecutor
# import urllib3
# urllib3.disable_warnings()
# import os
# os.environ['NO_PROXY'] = 'weibo.com'



headers = {
    'authority': 'weibo.com',
    'x-requested-with': 'XMLHttpRequest',
    'sec-ch-ua-mobile': '?0',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'content-type': 'application/x-www-form-urlencoded',
    'accept': '*/*',
    'sec-fetch-site': 'same-origin',
    'sec-fetch-mode': 'cors',
    'sec-fetch-dest': 'empty',
    'referer': 'https://weibo.com/1192329374/KnnG78Yf3?filter=hot&root_comment_id=0&type=comment',
    'accept-language': 'zh-CN,zh;q=0.9,en-CN;q=0.8,en;q=0.7,es-MX;q=0.6,es;q=0.5',
    'cookie': 'SCF=Ang1PsM9AoPOH6r2JMfsf_cG-DF5ce8cSUt-u4AhK1i5wQRp7tw8QyalBIidf_baiVUx4v-heMf80G-h4cl7hGc.; '
              'UOR=www.baidu.com,weibo.com,www.baidu.com; SINAGLOBAL=6060355668604.674.1709552585531; '
              'ULV=1709552585538:1:1:1:6060355668604.674.1709552585531:; XSRF-TOKEN=Up1y5N9BMS-OEZE2f1nAmtO6; '
              'ALF=1712824665; SUB=_2A25I9GQJDeRhGeNI7FEQ-CbIyD2IHXVriPnBrDV8PUJbkNB-LXjhkW1NSCMrRn'
              '-09qYuh7cnbvie_tTPr1BM4rl5; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWSJ_7WUJKD_-HWMMWH1YY_5JpX5KMhUgL'
              '.Fo-cS0ep1hnXe022dJLoIEBLxKqL1KnL1-qLxK-L1h-L1h.LxK-LB.-LB--LxKqLB-BLBK-t; '
              'WBPSESS=DZfzo8PBycpQXciQLyzIdlhjpu3GSrUjHdDzGdeTxn6_FTe0MIZZN5H_tmED0x6_DyEheg9EuSsRBmW52A16epCDHtMn95z8untpF5c1czvTkOBDuN6f0LIVsm6ZiF-rygc-UjWhLBHxdfhdrdJ0zA=='
}
headers1 = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.84 Safari/537.36 HBPC/12.1.3.306',
    'Connection': 'close',
}
def parseUid(uid):
    response = requests.get(url=f'https://weibo.com/ajax/profile/info?custom={uid}',headers=headers)
    try:
        return response.json()['data']['user']['id']
    except:
        return None
def getUserInfo(uid):
    try:
        uid = int(uid)
    except:
        # 说明是 xiena 这样的英文串
        uid = parseUid(uid)
        if not uid:
            return None
    response = requests.get(url=f'https://weibo.com/ajax/profile/detail?uid={uid}',headers=headers)
    if response.status_code == 400:
        return {
            'errorMsg': '用户可能注销或者封号',
            'location': None,
            'user_link': f'https://weibo.com/{uid}'
        }
    resp_json = response.json().get('data', None)
    if not resp_json:
        return None
    sunshine_credit = resp_json.get('sunshine_credit', None)
    if sunshine_credit:
        sunshine_credit_level = sunshine_credit.get('level', None)
    else:
        sunshine_credit_level = None
    education = resp_json.get('education', None)
    if education:
        school = education.get('school', None)
    else:
        school = None
    ip_location = resp_json.get('ip_location', None)
    location = resp_json.get('location', None)
    gender = resp_json.get('gender', None)

    birthday = resp_json.get('birthday', None)
    created_at = resp_json.get('created_at', None)
    description = resp_json.get('description', None)
    # 我关注的人中有多少人关注 ta
    followers = resp_json.get('followers', None)
    if followers:
        followers_num = followers.get('total_number', None)
    else:
        followers_num = None
    return {
        'sunshine_credit_level': sunshine_credit_level,
        'school': school,
        'location': location,
        'gender': gender,
        'birthday': birthday,
        'created_at': created_at,
        'description': description,
        'followers_num': followers_num,
        'ip_location': ip_location
    }

def getIPlocation(uid):
    time.sleep(1)
    response = requests.get(url=f'https://weibo.com/ajax/profile/detail?uid={uid}',headers=headers)
    if response.status_code != 200:
        # return {
        #     'errorMsg': '用户可能注销或者封号',
        #     'location': None,
        #     'user_link': f'https://weibo.com/{uid}'
        # }
        print(response.status_code)
    resp_json = ""
    try:
        resp_json = response.json().get('data', None)
    except Exception as e:
        print(response.status_code)
    if not resp_json:
        return None
    ip_location = resp_json.get('ip_location', None)
    if ip_location==None:
        return None
    ip_locationRes = ''
    if '：' in ip_location:
        ip_locationRes = ip_location.split('：')[1]
    else:
        ip_locationRes = ip_location
    print(f'{uid}的ip_location是：{ip_locationRes}')
    # return ip_locationRes
class MultiThreadCrawler:
    def __init__(self,uids):
        self.uids = uids

    def start(self):
        threads = []
        for uid in self.uids:
            t = threading.Thread(target=getIPlocation,args=(uid,))
            threads.append(t)
            t.start()

        for t in threads:
            t.join()

def get_uid(path):
    source = pd.read_csv(path)
    uid_list = source['user_id'].tolist()
    return uid_list

def get_IPlocation_proxy(uid,proxy):
    # requests.adapters.DEFAULT_RETRIES = 5
    # time.sleep(0.5)
    response = requests.get(url=f'https://weibo.com/ajax/profile/detail?uid={uid}',headers=headers,proxies={'http':proxy},timeout=10)
    if response.status_code != 200:
        # return {
        #     'errorMsg': '用户可能注销或者封号',
        #     'location': None,
        #     'user_link': f'https://weibo.com/{uid}'
        # }
        print(response.status_code)
    resp_json = ""
    try:
        resp_json = response.json().get('data', None)
    except Exception as e:
        print(response.status_code,e)
    if not resp_json:
        return None
    response.close()
    ip_location = resp_json.get('ip_location', None)
    if ip_location == None:
        return None
    ip_locationRes = ''
    if '：' in ip_location:
        ip_locationRes = ip_location.split('：')[1]
    else:
        ip_locationRes = ip_location
    print(f'{uid}的ip_location是：{ip_locationRes}')
    # return ip_locationRes

def assign_urls_with_proxy(uid_subset,proxy):
    for uid in uid_subset:
        # time.sleep(0.5)
        get_IPlocation_proxy(uid,proxy)

def remove_newlines_tabs(input_string):
    return input_string.replace('\n', '').replace('\t', '')

def remove_return(input_string):
    return input_string.replace('\r', '')
#从89ip获取代理ip
def get_IP_list_from_89():
    url = 'https://www.89ip.cn/'
    response = requests.get(url=url)
    html = response.text
    html_element = etree.HTML(html)
    ip_list = html_element.xpath('//table[@class="layui-table"]/tbody/tr/td[1]/text()')
    port_list = html_element.xpath('//table[@class="layui-table"]/tbody/tr/td[2]/text()')
    proxy_list = [f'https://{ip}:{port}' for ip,port in zip(ip_list,port_list)]
    return [remove_newlines_tabs(proxy) for proxy in proxy_list]

def get_IP_list_from_3366():
    url = 'http://www.ip3366.net/'
    response = requests.get(url=url)
    html = response.text
    html_element = etree.HTML(html)
    ip_list = html_element.xpath('//table[@class="table table-bordered table-striped"]/tbody/tr/td[1]/text()')
    port_list = html_element.xpath('//table[@class="table table-bordered table-striped"]/tbody/tr/td[2]/text()')
    proxy_list = [f'https://{ip}:{port}' for ip, port in zip(ip_list, port_list)]
    return [remove_newlines_tabs(proxy) for proxy in proxy_list]

def get_IP_list_from_zhima():
    url = 'http://webapi.http.zhimacangku.com/getip?neek=321a408a&num=400&type=1&time=1&pro=0&city=0&yys=0&port=1&pack=0&ts=0&ys=0&cs=0&lb=1&sb=&pb=4&mr=1&regions='
    responses = requests.get(url,headers=headers)
    html = responses.content.decode().strip().split('\n')
    proxy_list = [f'http://{remove_return(proxy)}' for proxy in html if proxy!='']
    return proxy_list

def proxy_test(proxy):
    url = 'https://www.baidu.com'
    try:
        response = requests.get(url=url,proxies={'http':proxy},timeout=5)
        if response.status_code == 200:
            print(f'{proxy}可用')
        else:
            print(f'{proxy}不可用')
    except requests.exceptions.RequestException as e:
        print(f'{proxy}不可用.错误{e}')


def main():
    uids = get_uid(r'D:\CodeRepo\python\weibo-search\结果文件\董宇辉\董宇辉.csv')
    # print(uids)
    #多线程(线程池)
    # with ThreadPoolExecutor(max_workers=25) as executor:
    #     features =[executor.submit(getIPlocation, uid) for uid in uids]
    #     for feature in features:
    #         print(feature.result())
    #多线程和代理ip
    proxies = get_IP_list_from_3366()
    print(proxies)
    for proxy in proxies:
        proxy_test(proxy)
    # num_threads = len(proxies)
    # uids_per_thread = len(uids) // num_threads
    # uid_subsets = [uids[i:i + uids_per_thread] for i in range(0, len(uids), uids_per_thread)]
    #
    # with ThreadPoolExecutor(max_workers=num_threads) as executor:
    #     for uid_subset,proxy in zip(uid_subsets,proxies):
    #         feature = executor.submit(assign_urls_with_proxy, uid_subset,proxy)
    #         print(feature.result())
    # 线程queen
    threads = []
    for i in range(len(uids)):
        t = threading.Thread(target=get_IPlocation_proxy, args=(uids[i],proxies[i%len(proxies)]))
        threads.append(t)
    for t in threads:
        t.start()
    for t in threads:
        t.join()




if __name__ == '__main__':
    strat_time = time.time()
    # uids = get_uid(r'D:\CodeRepo\python\weibo-search\结果文件\荣耀100\荣耀100.csv')
    # crawler = MultiThreadCrawler(uids)
    # crawler.start()
    main()
    end_time = time.time()
    elapsed_time = end_time - strat_time
    print(f'程序运行时间：{elapsed_time}秒')
