#!/usr/bin/python3
# -*- coding:utf-8 -*-
import requests
import pandas as pd

import json
import time

# 批量爬虫函数
def batch_crawler(ip, page_no=1, page_size=10):
    base_url = "http://10.193.51.34/api/v1/generate/"
    params = {
        'pageNo': page_no,
        'pageSize': page_size,
        'ip': ip
    }
    
    try:
        response = requests.get(base_url, params=params)
        if response.status_code == 200:
            # 处理响应内容
            json = response.json()
            datas = json['result']['data']
            df = pd.DataFrame(datas)
            #df.set_index('datasource', drop=True, inplace=True)
            
            new_df = pd.DataFrame()
            # 遍历原始DataFrame
            for index, row in df.iterrows():
                # 将datasource列的值作为新列名，action列的值作为数据
                new_df['ip'] = ip
                new_df[row['datasource']] = [row['action']]
                
            #print(new_df)
            new_df.set_index('ip', drop=True, inplace=True)
            return new_df
        else:
            print("请求失败，状态码：", response.status_code)
    except requests.exceptions.RequestException as e:
        print("请求异常：", e)

# 主函数
def main():
    
    # 读取CSV文件
    ip_df = pd.read_csv('ip.csv')
    # 遍历DataFrame中的每个IP地址
    results = pd.DataFrame()
    for index, row in ip_df.iterrows():
        ip = row['ip']
        # 调用批量爬虫函数
        result = batch_crawler(ip=ip)
        results = results.append(result)
        # if not result is None:
            # print("爬取结果：", result)
        time.sleep(0.1)
        
    print(results)
    results.to_csv('check_result.csv')

if __name__ == "__main__":
    main()