import requests
from twitter.scraper import Scraper
import time
import csv
import pandas as pd

"""获取时间"""


def get_time():
    return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) + " | "


"""添加换行"""


def add_space(value):
    return str(value) + '\t'


class Followers:
    def __init__(self, config_path):
        """ 配置文件路径 """
        self.config_path = config_path

        """ 邮箱 """
        self.email = ''
        """ 用户名 """
        self.username = ''
        """ 密码 """
        self.password = ''
        """ 机构名称 """
        self.organization_username = ''
        """ 代理端口 """
        self.proxy = ''

        self.url_followers_only_id_v1 = "https://api.twitter.com/1.1/followers/ids.json"

        self.url_following_only_id_v1 = "https://api.twitter.com/1.1/friends/ids.json"

        self.params_only_id_v1 = {
            "user_id": "",
            "count": 5000,
            "stringify_ids": True,
            "cursor": -1
        }
        """ 代理 """
        self.proxies = {
            'http': 'http://' + self.proxy,
            'https': 'http://' + self.proxy
        }
        """ 请求头 """
        self.headers_v1 = {
            "x-csrf-token": "31ce2be3340c1d57fb67faa049aae9999f69852087db12c9cfd1f40229ac751511cfc01e9d95ac750d3c352cf8cc267f613c2c6160356da1d1e7d4ecca806f4340404f9e03fe5c40709d83003de1cce9",
            "x-twitter-active-user": "yes",
            "x-twitter-auth-type": "OAuth2Session",
            "x-twitter-client-language": "en",
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36 Edg/109.0.1518.55",
            "sec-fetch-site": "same-origin",
            "accept": "*/*",
            "accept-encoding": "gzip, deflate, br",
            "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
            # "authorization": "Bearer AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejRCOuH5E6I8xnZz4puTs=1Zv7ttfk8LF81IUq16cHjhLTvJu4FA33AGWWjCpTnA",
            "authorization": "Bearer AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejRCOuH5E6I8xnZz4puTs%3D1Zv7ttfk8LF81IUq16cHjhLTvJu4FA33AGWWjCpTnA",
            "sec-ch-ua": "\" Not;A Brand\";v=\"99\", \"Microsoft Edge\";v=\"103\", \"Chromium\";v=\"103\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\";",
            "sec-fetch-dest": "empty",
            "sec-fetch-mode": "cors",
            "cookie": "guest_id_marketing=v1%3A167353305004005851; guest_id_ads=v1%3A167353305004005851; personalization_id=\"v1_fhGTytiCvIIAc73r3eWVsg==\"; guest_id=v1%3A167353305004005851; des_opt_in=Y; _gcl_au=1.1.2128905904.1673533085; kdt=BVEOudjwXLaylztM4MZMyQuQZn4dSZTtxCyip13x; auth_token=033a43abd193b0ce0115cad8d2f29b7d4b4e64d0; ct0=31ce2be3340c1d57fb67faa049aae9999f69852087db12c9cfd1f40229ac751511cfc01e9d95ac750d3c352cf8cc267f613c2c6160356da1d1e7d4ecca806f4340404f9e03fe5c40709d83003de1cce9; twid=u%3D1613557794900447232; dnt=1; external_referer=padhuUp37zixoA2Yz6IlsoQTSjz5FgRcKMoWWYN3PEQ%3D|0|8e8t2xd8A2w%3D; _ga_BYKEBDM7DS=GS1.1.1673575671.1.1.1673575796.0.0.0; at_check=true; _gid=GA1.2.1299708304.1673929741; lang=en; mbox=PC#b3d8896cfa664f74b3b0eb9380f68c17.35_0#1737175748|session#d2a04d8c02f041bab0bea1c8b5c5111c#1673932808; _ga=GA1.1.1977811539.1673533064; _ga_34PHSZMC42=GS1.1.1673929754.5.1.1673931101.0.0.0"
        }
        """ flag """
        self.has_title = False
        """ 输出CSV文件的名字 """
        self.filename_output = ''
        """ 异常重试次数 """
        self.retry_times = 10

    """ 读取配置文件 """

    def read_file(self):
        with open(self.config_path, "r") as fs:
            content = fs.read().strip().split('\n')
        if len(content) >= 1:
            self.email = content[0].strip()
        if len(content) >= 2:
            self.username = content[1].strip()
        if len(content) >= 3:
            self.password = content[2].strip()
        if len(content) >= 4:
            self.organization_username = content[3].strip()
        if len(content) >= 5:
            self.proxy = content[4].strip()
        self.proxies = {
            'http': 'http://' + self.proxy,
            'https': 'http://' + self.proxy
        }

        return self.email, self.username, self.password, self.organization_username, self.proxy

    """ 写入CSV文件 """

    def write_csv(self, list_one_person):
        f = open(self.filename_output, 'a', encoding='utf-8-sig')
        header = ['id', 'type']
        writer = csv.writer(f, lineterminator='\n')
        # global has_title
        if not self.has_title:
            writer.writerow(header)
            self.has_title = True
        for inner_list in list_one_person:
            for json in inner_list:
                writer.writerow([json['id'] + '\t', json['type']])

        # 删除重复id
        # 读取 CSV 文件
        df = pd.read_csv(self.filename_output, header=0)

        # 删除重复的id
        df = df.drop_duplicates(subset=['id'])
        df['id'] = df['id'].apply(add_space)
        # 将处理后的数据写回到文件中
        df.to_csv(self.filename_output, index=False)
        print(f"{self.filename_output}写入完成")

    """ 通过id在twitter上获取followers """

    def request_to_twitter_for_followers_only_id_by_v1(self, user_id):
        self.params_only_id_v1["user_id"] = user_id
        self.params_only_id_v1["cursor"] = -1
        list_one_person = []
        retry_error_flag = False
        while True:
            response = None
            while True:
                try:
                    response = requests.get(url=self.url_followers_only_id_v1,
                                            proxies=self.proxies,
                                            headers=self.headers_v1,
                                            params=self.params_only_id_v1)
                    self.retry_times = 100
                    break
                # except requests.exceptions.ProxyError:
                except:
                    if self.retry_times == 0:
                        retry_error_flag = True
                        break
                    self.retry_times = self.retry_times - 1
                    print(get_time() + f"网络异常，正在重试，重试{10 - self.retry_times}次，重试10次后结束！")
                    time.sleep(1)
                    continue
            if retry_error_flag:
                break
            response_json = response.json()
            user_json_list = []
            if "ids" in response_json:
                id_list = response_json["ids"]
                for id in id_list:
                    user_json = {}
                    user_json["id"] = id
                    user_json["name"] = ""
                    user_json["username"] = ""
                    user_json["type"] = "followers"
                    user_json["derived_id"] = ''
                    user_json["derived_name"] = 'name'
                    user_json["derived_username"] = ''
                    user_json_list.append(user_json)
                list_one_person.append(user_json_list)
                print(get_time() + user_id + "已完成一轮only id_V1.0")
                if "next_cursor_str" in response_json and response_json["next_cursor_str"] != "0":
                    self.params_only_id_v1["cursor"] = response_json["next_cursor_str"]
                else:
                    return list_one_person

    """ 通过id在twitter上获取following """

    def request_to_twitter_for_following_only_id_by_v1(self, user_id):
        self.params_only_id_v1["user_id"] = user_id
        self.params_only_id_v1["cursor"] = -1
        list_one_person = []
        retry_error_flag = False

        while True:
            response = None

            while True:
                try:
                    response = requests.get(url=self.url_following_only_id_v1,
                                            proxies=self.proxies,
                                            headers=self.headers_v1,
                                            params=self.params_only_id_v1)
                    self.retry_times = 100
                    break
                # except requests.exceptions.ProxyError:
                except:
                    # print(get_time() + user_id + "网络异常！1s后重试")
                    if self.retry_times == 0:
                        break
                    self.retry_times = self.retry_times - 1
                    print(get_time() + f"网络异常，正在重试，重试{10 - self.retry_times}次，重试10次后结束！")
                    time.sleep(1)
                    continue
            if retry_error_flag:
                break
            response_json = response.json()
            user_json_list = []
            if "ids" in response_json:
                id_list = response_json["ids"]
                for id in id_list:
                    user_json = {}
                    user_json["id"] = id
                    user_json["name"] = ""
                    user_json["username"] = ""
                    user_json["type"] = "following"
                    user_json["derived_id"] = ''
                    user_json["derived_name"] = 'name'
                    user_json["derived_username"] = ''
                    user_json_list.append(user_json)
                list_one_person.append(user_json_list)
                print(get_time() + user_id + "已完成一轮only id_V1.0")
                if "next_cursor_str" in response_json and response_json["next_cursor_str"] != "0":
                    self.params_only_id_v1["cursor"] = response_json["next_cursor_str"]
                else:
                    return list_one_person

    def run(self):
        self.read_file()

        scraper = Scraper(self.email, self.username, self.password, debug=1, save=True)

        screname = scraper.users([self.organization_username])
        print(screname)
        # id = screname[0][0]['data']['user']['result']['rest_id']
        id = ''
        try:
            id = screname[0]["data"]["user"]["result"]["rest_id"]
        except:
            print("读取id失败！！")

        self.filename_output = self.organization_username + "_id_list.csv"
        followers = self.request_to_twitter_for_followers_only_id_by_v1(id)
        self.write_csv(followers)
        following = self.request_to_twitter_for_following_only_id_by_v1(id)
        self.write_csv(following)

        return {"followers": followers, "following": following}
