import logging
import math
import os
import re

import multiprocessing as mp

import aiohttp
import asyncio
import itertools

# 日志配置
logging.basicConfig(level=logging.INFO,format='%(asctime)s - %(levelname)s - %(message)s')

# 创建aiohttp对象
session = None
# 设置网络请求等待时长
timeout = 30
# 设置写入文件频率
write_sleep=5
# 异步数量
scrape_num = 100000
# 进程数量
multi = 5
# 起步文件数---最小0
start_num=70
# 结尾文件数---最大256
end_num=75


# 访问成功列表
t_list = []
# 不能访问黑名单
not_list = []
# 创建文件夹
os.makedirs('result', exist_ok=True)
os.makedirs('no_result', exist_ok=True)
# 应需执行的数据源列表
total_num_list_y = []
for i in range(start_num,end_num):
    _ = [0, 0, 0, 0]
    if i in [0, 127, 224, 225]:
        continue
    # 用到的ip文件列表
    _[0]=i
    # 成功的
    _[1]=(open(rf'result/log{i}.txt', 'a', encoding='utf-8'))

    # 失败的
    _[2]=(open(rf'no_result/log{i}.txt', 'a', encoding='utf-8'))

    # 数据源文件
    _[3]=(open(rf'D:\code\BaiduSyncdisk\PY\yecao\tkWifi\文件夹0\file{i}.txt', 'r', encoding='utf8'))
    total_num_list_y.append(_)
    logging.info(total_num_list_y)


# 多进程循环次数
cycles_num = len(total_num_list_y) // multi #应需执行的数据源文件个数➗内核数量--取整
cycles_num_n = len(total_num_list_y) % multi #应需执行的数据源文件个数➗内核数量--取余
logging.info(f'{len(total_num_list_y)}个数据源{multi}个进程应需循环{cycles_num}次,和剩余{cycles_num_n}次')

# 读取文件配置
line_num=256*256*256 #共16777216行
line_num_n=100000 #提取文件内容的行数---每一次
line_read_num =math.ceil( line_num // line_num_n) #应取多少次向上取整---循环提取多少次以读取全部文件


# 提取接口数据
async def scrape_api(url,page):
    # logging.info(url)
    index = next((i for i, page_index in enumerate(total_num_list_y) if page_index[0] == page), None)
    async with asyncio.Semaphore(scrape_num):
        try:
            async with session.get(f'http://{url[0]}') as response:
                status = response.status
                if status == 200:
                    html_content = await response.text()
                    try:
                        title = re.search(r'<title>(.*?)</title>', html_content).group(1).strip()
                        logging.info('%s,%s',url, title)
                        total_num_list_y[index][1].write(f'{url},{title}\n')
                        total_num_list_y[index][1].flush()
                        # t_list.append([url, title])
                    except:
                        pass
                else:
                    not_list.append(url)

        except Exception as e:
            pass

# 从page标号的文件内提取10000条数据---请循环调用以读取全部内容
async def url_open(page):
    logging.info('读取数据')
    index = next((i for i, page_index in enumerate(total_num_list_y) if page_index[0] == page), None)
    reader = (line.strip().split('\n') for line in total_num_list_y[index][3])  # 创建一个生成器表达式
    batch = list(itertools.islice(reader, line_num_n))
    if not batch:
        logging.info(f'文件{page}读取完了')
    return batch

# 创建异步任务
async def make_asyncio_beat(page):
    global session
    session = aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=timeout))
    scrape_index_task= [asyncio.ensure_future(scrape_api(i,page)) for i in await url_open(page)]
    logging.info(f'{page}创建任务完成')
    await asyncio.gather(*scrape_index_task)
    await session.close()



# 程序根入口
async def main(page):
    logging.info('调用主函数')

    # 创建任务
    logging.info(f'{page}开始创建任务')
    # threading.Thread(target=run, args=(page,)).start()

    for _ in range(line_read_num):
       await make_asyncio_beat(page)

#
# # 辅助写入文件定时器
# def run(page):
#     time.sleep(write_sleep)
#     write(page)
#     run(page)
#
# def write(page):
#     # total_num_list_y[ip,成功，失败，数据源]
#     logging.info(f'正在写入log{page}.txt文件')
#     index = next((i for i, page_index in enumerate(total_num_list_y) if page_index[0] == page), None)
#
#     for _ in range(len(t_list)):
#         data = t_list.pop()
#         try:
#             total_num_list_y[index][1].write(f'{data[0]},{data[1]}\n')
#         except Exception as e:
#             logging.error(e)
#
#     for _ in range(len(not_list)):
#         data = not_list.pop()
#         total_num_list_y[index][2].write(f'{data}\n')
#     total_num_list_y[index][2].flush()
#     total_num_list_y[index][1].flush()

# 多进程入口
def mp_run_main(page):
    loop = asyncio.get_event_loop()
    loop.run_until_complete( main(page))


# 多进程列表
process_list = []
for index_i in total_num_list_y:
    process_list.append(mp.Process(target=mp_run_main, args=(index_i[0],)))

# 多进程列表临时辅助
process_list_temp = []
def root_run():
    global cycles_num
    while cycles_num:
        logging.info('进程轮回')
        for process in range(multi):
            p = process_list.pop(0)
            p.start()
            process_list_temp.append(p)

        for process in range(multi):
            process_list_temp.pop(0).join()
        cycles_num -= 1

if __name__ == '__main__':
    # 多进程运行
    if cycles_num_n>0:
        root_run()
        logging.info('进程1')
        multi = cycles_num_n
        cycles_num += 1
        root_run()
    else:
        root_run()

    # 普通运行---page传入文件编号
    # mp_run_main(page)

