import requests
from lxml import etree
import threading
import pandas as pd


class WeiboInfoCrawler(object):

    def __init__(self, headers, path):
        self.headers = headers
        self.path = path
        self.iplocation_dict = {}
        self.gender_dict = {}
        self.valid_proxies = []
        self.raw_data = self.get_raw_data(self.path)

    def get_raw_data(self, path):
        return pd.read_csv(path, encoding='utf-8')

    # 从文件获取uid列表
    def get_uid(self):
        uid_list = self.raw_data['user_id'].tolist()
        return uid_list

    def parse_uid(self, uid):
        response = requests.get(url=f'https://weibo.com/ajax/profile/info?custom={uid}', headers=self.headers)
        try:
            return response.json()['data']['user']['id']
        except:
            return None

    # 获取用户信息
    def get_user_info(self, uid):
        try:
            uid = int(uid)
        except:
            # 说明是 xiena 这样的英文串
            uid = self.parse_uid(uid)
            if not uid:
                return None
        response = requests.get(url=f'https://weibo.com/ajax/profile/detail?uid={uid}', headers=self.headers)
        if response.status_code == 400:
            return {
                'errorMsg': '用户可能注销或者封号',
                'location': None,
                'user_link': f'https://weibo.com/{uid}'
            }
        resp_json = response.json().get('data', None)
        if not resp_json:
            return None
        sunshine_credit = resp_json.get('sunshine_credit', None)
        if sunshine_credit:
            sunshine_credit_level = sunshine_credit.get('level', None)
        else:
            sunshine_credit_level = None
        education = resp_json.get('education', None)
        if education:
            school = education.get('school', None)
        else:
            school = None
        ip_location = resp_json.get('ip_location', None)
        location = resp_json.get('location', None)
        gender = resp_json.get('gender', None)

        birthday = resp_json.get('birthday', None)
        created_at = resp_json.get('created_at', None)
        description = resp_json.get('description', None)
        # 我关注的人中有多少人关注 ta
        followers = resp_json.get('followers', None)
        if followers:
            followers_num = followers.get('total_number', None)
        else:
            followers_num = None
        return {
            'sunshine_credit_level': sunshine_credit_level,
            'school': school,
            'location': location,
            'gender': gender,
            'birthday': birthday,
            'created_at': created_at,
            'description': description,
            'followers_num': followers_num,
            'ip_location': ip_location
        }

    # 使用代理IP获取IP地址、性别
    def get_iplocation_gender_proxy(self, uid, proxy):
        response = requests.get(url=f'https://weibo.com/ajax/profile/detail?uid={uid}', headers=self.headers,
                                proxies={'http': proxy}, timeout=10)
        if response.status_code != 200:
            self.iplocation_dict[uid] = None
        resp_json = ""
        try:
            resp_json = response.json().get('data', None)
        except Exception as e:
            print(e)
        if not resp_json:
            return None
        ip_location = resp_json.get('ip_location', None)
        ip_location_res = ''
        if '：' in ip_location:
            ip_location_res = ip_location.split('：')[1]
        else:
            ip_location_res = ip_location
        self.iplocation_dict[uid] = ip_location_res
        gender = resp_json.get('gender', None)
        self.gender_dict[uid] = gender

    # 从89ip获取代理ip
    def get_ip_list_from_89(self, page_range):
        def single_page_parse(page_num):
            url = f'https://www.89ip.cn/index_{page_num}.html'
            response = requests.get(url=url)
            html = response.text
            html_element = etree.HTML(html)
            ip_list = html_element.xpath('//table[@class="layui-table"]/tbody/tr/td[1]/text()')
            port_list = html_element.xpath('//table[@class="layui-table"]/tbody/tr/td[2]/text()')
            proxy_list = [f'https://{ip}:{port}' for ip, port in zip(ip_list, port_list)]
            return [self.remove_newlines_tabs(proxy) for proxy in proxy_list]

        return [proxy for page in range(0, page_range + 1) for proxy in single_page_parse(page)]

    # 从3366ip获取代理ip
    def get_ip_list_from_3366(self):
        url = 'http://www.ip3366.net/'
        response = requests.get(url=url)
        html = response.text
        html_element = etree.HTML(html)
        ip_list = html_element.xpath('//table[@class="table table-bordered table-striped"]/tbody/tr/td[1]/text()')
        port_list = html_element.xpath('//table[@class="table table-bordered table-striped"]/tbody/tr/td[2]/text()')
        proxy_list = [f'https://{ip}:{port}' for ip, port in zip(ip_list, port_list)]
        return [self.remove_newlines_tabs(proxy) for proxy in proxy_list]

    # 从芝麻代理获取代理ip
    def get_ip_list_from_zhima(self):
        url = 'http://webapi.http.zhimacangku.com/getip?neek=321a408a&num=400&type=1&time=1&pro=0&city=0&yys=0&port=1&pack=0&ts=0&ys=0&cs=0&lb=1&sb=&pb=4&mr=1&regions='
        responses = requests.get(url, headers=self.headers)
        html = responses.content.decode().strip().split('\n')
        proxy_list = [f'http://{self.remove_return(proxy)}' for proxy in html if proxy != '']
        return proxy_list

    # 移除换行符和制表符
    def remove_newlines_tabs(self, proxy):
        return proxy.replace('\n', '').replace('\t', '')

    # 移除回车符
    def remove_return(self, proxy):
        return proxy.replace('\r', '').replace('\n', '')

    # 有效代理检查
    def valid_proxy_check(self, proxy):
        url = 'https://www.baidu.com'
        try:
            response = requests.get(url=url, proxies={'http': proxy}, timeout=5)
            if response.status_code == 200:
                self.valid_proxies.append(proxy)
                return True
            else:
                return False
        except requests.exceptions.RequestException as e:
            return False

    # 多线程过滤有效代理
    def get_valid_proxies(self, page_range):
        proxies = self.get_ip_list_from_89(page_range=page_range)
        threads = []
        for i in range(len(proxies)):
            t = threading.Thread(target=self.valid_proxy_check, args=(proxies[i],))
            threads.append(t)
        for t in threads:
            t.start()
        for t in threads:
            t.join()
        return self.valid_proxies

    # 获取IP地址和性别的字典
    def get_iplocation_gender_dict(self, uids, proxies):
        threads = []
        for i in range(len(uids)):
            t = threading.Thread(target=self.get_iplocation_gender_proxy, args=(uids[i], proxies[i % len(proxies)]))
            threads.append(t)
        for t in threads:
            t.start()
        for t in threads:
            t.join()
        return self.iplocation_dict, self.gender_dict
