import requests
from twitter.scraper import Scraper
import time
import csv
import pandas as pd
import numpy as np
import logging
import re


class MyLogHandler(logging.Handler):
    def __init__(self):
        super().__init__()
        self.logs = []  # 创建一个列表，用于存储日志消息

    def emit(self, record):
        log_message = self.format(record)
        log_message = re.sub(r'\x1b\[[0-9;]*m', '', log_message)
        self.logs.append(log_message)  # 将日志消息添加到列表中


def write_csv(list_one_person, title=False):
    f = open(filename_output, 'a', encoding='utf-8-sig')
    header = ['id', 'twitter_name', 'twitter_username']

    writer = csv.writer(f, lineterminator='\n')
    if not title:
        writer.writerow(header)
    writer.writerow([list_one_person[0] + '\t', list_one_person[1], list_one_person[2]])


proxy = "127.0.0.1:7890"  # 自己电脑的代理端口（要改）

excel_path = r'D:\Code\程序\python\twitter\myTwitter\data\seed\RAND Corporation_seed.xlsx'

filename_output = r"D:\Code\程序\python\twitter\myTwitter\data\seed\RAND Corporation_out1_id_list.csv"


def log_analyze(log_handler):
    print(log_handler.logs)
    remain_times = re.findall(r'\d+', log_handler.logs[2])
    wait_time = (int(re.findall(r'\d+', log_handler.logs[3])[0]) + 1) * 60
    if int(remain_times[0]) == 1:
        print(gettime() + "开始等待，预计等待{}分钟".format(wait_time // 60))
        time.sleep(wait_time)
        print(gettime() + "等待结束")
    print('error')
    log_handler.logs.clear()


def gettime():
    return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) + "|"


email, username, password = 'daimergouethc@hotmail.com', 'EudoraBron72039', 'kyob4924'
scraper = Scraper(email, username, password, debug=1, save=False)
log_handler = MyLogHandler()
scraper.logger.addHandler(log_handler)

title = False

df = pd.read_excel(excel_path, header=0)
# df = df.replace(np.nan, '')
df = df['twitter']

count = 0

for data in df:
    count += 1
    print(count)
    df = pd.DataFrame({'data': [count]})
    # 将DataFrame保存为txt文件，不保存索引列
    df.to_csv(r'D:\Code\程序\python\twitter\myTwitter\data\seed\test_count.txt', index=False, header=None)
    try:
        while True:
            try:
                screname = scraper.users([data])
                break
            except:
                print(gettime() + str(data) + "网络异常！1s后重试")
                time.sleep(1)
                continue
        id = screname[0]['data']['user']['result']['rest_id']
        name = screname[0]['data']['user']['result']['legacy']['name']
        a = [id, name, data]
        write_csv(a, title=title)
        title = True
        print(log_handler.logs)
        remain_times = re.findall(r'\d+', log_handler.logs[2])
        wait_time = (int(re.findall(r'\d+', log_handler.logs[3])[0]) + 1) * 60
        if int(remain_times[0]) == 1:
            print(gettime() + "开始等待，预计等待{}分钟".format(wait_time // 60))
            time.sleep(wait_time)
            print(gettime() + "等待结束")
            log_handler.logs.clear()
            continue
        log_handler.logs.clear()

    except:
        print(log_handler.logs)
        remain_times = re.findall(r'\d+', log_handler.logs[2])
        wait_time = (int(re.findall(r'\d+', log_handler.logs[3])[0]) + 1) * 60
        if int(remain_times[0]) == 1:
            print(gettime() + "开始等待，预计等待{}分钟".format(wait_time // 60))
            time.sleep(wait_time)
            print(gettime() + "等待结束")
        print('error')
        log_handler.logs.clear()
        continue
