import requests
from lxml import etree
import csv
import time
import random
import asyncio
import aiohttp

data = [['title', 'Status', 'CommissioningYear', 'NameplateCapacity', 'Technology', 'Location', 'Coordinates']]

page_size = 1
data_page = [['index', 'page_url']]

header = {
    'Cookie': '_ga=GA1.1.1762055020.1733582188; _ga_90LMYRYHEQ=GS1.1.1733655043.6.0.1733655043.60.0.0',
    'referer': 'https://www.gem.wiki/VNG_AG_solar_farm',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
}
root_url = "https://www.gem.wiki"


async def tbody_handler(tbody):
    list_data = []
    tr_list = tbody.xpath('tr')
    # 头
    head = tr_list[0]
    head_td_list = head.xpath('th')
    for i in range(1, len(tr_list[1:]) + 1):
        data_map = {}
        td_list = tr_list[i].xpath('td')
        for j in range(len(td_list)):
            head_text = head_td_list[j].xpath('text()')[0]
            td_value = td_list[j].xpath('text()')[0]
            data_map[head_text.replace("\n", "")] = td_value.replace("\n", "")
        list_data.append(data_map)
    return list_data


async def get_value(detail, target):
    detail_len = len(detail.keys())
    index = 0
    while (index < detail_len):
        for name in detail.keys():
            if target.lower() in name.lower():
                # 找到了
                return detail[name]
        index += 1
    return ''


async def put_data(text):
    try:
        et = etree.HTML(text)
        # 获取标题
        title = et.xpath('//*[@id="firstHeading"]/span/text()')[0]
        # 获取详情，位置
        tbody_list = et.xpath('//*[@id="mw-content-text"]//table[@class="wikitable"]/tbody')
        detail_list_data = await tbody_handler(tbody_list[0])
        location_list_data = await tbody_handler(tbody_list[1])
        # 表头
        status = 'Status'
        commissioning = 'Commissioning'
        nameplate = 'Nameplate'
        technology = 'Technology'
        location = 'Location'
        coordinates = 'Coordinates'

        # 开始拼接数据
        for i in range(len(detail_list_data)):
            data_info_csv = []
            # 标题
            data_info_csv.append(title)
            detail = detail_list_data[i]
            # 查找status
            data_info_csv.append(await get_value(detail, status))
            # 查找commissioning
            data_info_csv.append(await get_value(detail, commissioning))
            # 查找nameplate
            data_info_csv.append(await get_value(detail, nameplate))
            # 查找technology
            data_info_csv.append(await get_value(detail, technology))
            location_detail = location_list_data[i]
            # 查找location
            data_info_csv.append(await get_value(location_detail, location))
            # 查找coordinates
            data_info_csv.append(await get_value(location_detail, coordinates))
            print(f'存入数据：【{data_info_csv}】')
            data.append(data_info_csv)
    except Exception as e:
        print(f'数据处理异常{e}')
        pass


#
# """
# 获取所有列表
# """
async def detail_handler(detail_href):
    detail_url = f"{root_url}{detail_href}"
    print(f'开始获取详情：{detail_url}')
    # 获取详情
    async with aiohttp.ClientSession() as session:
        async with await session.get(url=detail_url, headers=header) as response:
            detail_response = await response.text(encoding='utf-8')
            try:
                await put_data(detail_response)
            except Exception as e:
                print(f'【{detail_url}】详情处理异常{e}')
                pass


def list_request(category_url):
    list_url = f"{root_url}{category_url}"
    print(f'请求列表url:{list_url}')
    try:
        response = requests.get(url=list_url, headers=header)
        response.encoding = 'UTF-8'
        return response.text
    except Exception as e:
        print(f'【列表】请求出现异常：{e},开始重试')
        sleep_seconds = random.randint(1, 3)
        print(f"线程开始休眠 {sleep_seconds} 秒")
        time.sleep(sleep_seconds)
        print('线程休眠结束，开始执行')
        is_continue = True
        continue_num = 1
        while is_continue:
            print(f'【列表】正在重新调用接口，重试次数：{continue_num}')
            continue_num += 1
            if continue_num > 60:
                is_continue = False
            try:
                response = requests.get(url=list_url, headers=header)
                status_code = response.status_code
                if status_code == 200:
                    response.encoding = 'UTF-8'
                    is_continue = False
                    return response.text
            except:
                time.sleep(sleep_seconds)
                pass


"""
获取列表
"""


def cattegoryList(category_url, page_size):
    print(f'正在请求第{page_size}页>>>>>>>>>>>>>>>>')

    response = list_request(category_url)
    et_list = etree.HTML(response)
    li_list = et_list.xpath('//*[@id="mw-pages"]//li')
    task = []
    for li in li_list:
        try:
            detail_href = li.xpath('a/@href')[0]
            print(f'请求：{detail_href}')
            task.append(asyncio.ensure_future(detail_handler(detail_href)))
        except Exception as e:
            print(f'请求{detail_href}出现异常：{e}')
            pass
    loop = asyncio.get_event_loop()
    loop.run_until_complete(asyncio.wait(task))
    next_page_flag = 'next page'
    next_page = et_list.xpath('//*[@id="mw-pages"]/a/text()')
    if next_page_flag in next_page:
        href_list = et_list.xpath('//*[@id="mw-pages"]/a')
        for href in href_list:
            href_text = href.xpath('text()')[0]
            if href_text == next_page_flag:
                next_href = href.xpath('@href')[0]
                print(f'开始查询下一页：{next_href}')
                page_size += 1
                cattegoryList(next_href, page_size)
                break


if __name__ == '__main__':
    category_url = "/Category:Wind_farms"
    cattegoryList(category_url, page_size)
    # filename = 'solar_farm_pages.csv'
    # with open(filename, 'w', encoding='utf-8', newline='') as file:
    #     writer = csv.writer(file)
    #     writer.writerows(data_page)
    # 保存表格
    filename = 'wind_farms.csv'
    with open(filename, 'w', encoding='utf-8', newline='') as file:
        writer = csv.writer(file)
        writer.writerows(data)
