import re

import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from selenium import webdriver
from tqdm import tqdm


#
# 1.卸载旧版本
# apt purge phantomjs    或者  sudo apt-get autoremove phantomjs
# 2.通过Wget下载phantomjs
# wget https://bitbucket.org/ariya/phantomjs/downloads/phantomjs-2.1.1-linux-x86_64.tar.bz2
# 3.解压
# tar xvjf phantomjs-2.1.1-linux-x86_64.tar.bz2
# 4.将phantomjs文件移动到/usr/bin/
# sudo cp phantomjs-2.1.1-linux-x86_64/bin/phantomjs /usr/bin/


# 此函数用于加载网页，并返回无头浏览器全部渲染过的数据，即所见即所得
def get_url(url):
    driver.get(url)  # 加载网页
    driver.implicitly_wait(5)  # 隐式等待5秒钟，智能等待网页加载
    source = driver.page_source  # 获取网页信息
    return source


def get_info(source):
    soup = BeautifulSoup(source, "lxml")  # 对渲染后的网页代码使用BS4进行解析
    car_info = soup.select('table > tbody > tr > td')  # 对车辆基本信息定位，获取网页代码
    car_info_ = [i.get_text() for i in car_info]  # 对代码进行解析，获得内容
    # print(car_info_)
    car_info__ = []
    for i in range(0, len(car_info_), 7):
        car_info_i = car_info_[i:i + 6]
        price = car_info_i[-1]
        min_price, max_price = [p.strip() for p in price.split("-")]
        car_info_i[-1] = min_price
        car_info_i.append(max_price)
        car_info__.append(car_info_i)

    # option_link = soup.select('table > tbody > tr > td > div > a')
    # option_link = [i.get('href') for i in option_link]
    # option_link = [option_link[i] for i in range(0, len(option_link), 7)]
    #
    # # 确保 car_info__ 和 option_link 的长度相同
    # min_length = min(len(car_info__), len(option_link))
    #
    # # 对较长的数组进行切片操作，使其长度与较短的数组相同
    # car_info__ = car_info__[:min_length]
    # option_link = option_link[:min_length]
    #
    # all_info = np.c_[np.array(car_info__), np.array(option_link)]
    all_info = np.array(car_info__)
    return all_info  # 最终返回爬取的内容


def links(month, province_dict):
#https://xl.16888.com/city-2022-06-2-0-0-0-0-6-1.html27 34
#https://xl.16888.com/ev-201903-201903-1.html25 31
    #https://xl.16888.com/city-%s-%s-%s-0-0-0-0-1-1.html
    links = []
    month_ = [i + '-' + j
              for i in ['2022', '2023', '2024']
              for j in ['06', '07', '08', '09', '10', '11', '12', '01', '02', '03', '04']
              if i + j >= '202206' and i + j <= month]
    provinces = [str(i) for i in range(2, 33)]
    for month in month_:
        for province_code, province_name in province_dict.items():
            # 访问每个月份和省份的第一页
            first_page_url = f'https://xl.16888.com/city-{month}-{province_code}-0-0-0-0-6-1.html'
            first_page_source = get_url(first_page_url)
            first_page_soup = BeautifulSoup(first_page_source, "lxml")

            # 从页面中提取出总页数
            page_info = first_page_soup.select_one('div.xl-data-page-r > div > span').get_text()
            total_pages = int(re.findall(r"\d+", page_info)[0])
            if int(total_pages) % 50 == 0:
                page_number = int(int(total_pages) / 50)
            else:
                page_number = int(int(total_pages) / 50) + 1

            # 为每个页面生成一个URL
            print(page_number)
            for page in range(1, page_number + 1):
                page_url = f'https://xl.16888.com/city-{month}-{province_code}-0-0-0-0-6-{page}.html'
                print(page_url)
                links.append((page_url, province_name))  # 将省份名称添加到URL中
    return links


def get_ev_data(driver_path=r"D:\Develop\phantomjs-2.1.1-windows\bin\phantomjs",
                month="202404", province_dict=None):
    if province_dict is None:
        province_dict = {
            "2": "北京",
            "3": "安徽",
            "4": "福建",
            "5": "甘肃",
            "6": "广东",
            "7": "广西",
            "8": "贵州",
            "9": "海南",
            "10": "河北",
            "11": "河南",
            "12": "黑龙江",
            "13": "湖北",
            "14": "湖南",
            "15": "吉林",
            "16": "江苏",
            "17": "江西",
            "18": "辽宁",
            "19": "内蒙古",
            "20": "宁夏",
            "21": "青海",
            "22": "山东",
            "23": "山西",
            "24": "陕西",
            "25": "上海",
            "26": "四川",
            "27": "天津",
            "28": "西藏",
            "29": "新疆",
            "30": "云南",
            "31": "浙江",
            "32": "重庆"
        }
    driver = webdriver.PhantomJS(
        executable_path=driver_path
    ) # 加载无头浏览器，具体查看selenium文档，可换成火狐或者谷歌浏览器
    # month = '202404'
    url = links(month, province_dict)
    for province_name in province_dict.values():
        all_info_ = pd.DataFrame()
        province_urls = [(u, p) for u, p in url if p == province_name]
        for i, province_name in tqdm(province_urls):
            print(i)
            source = get_url(i)
            all_info = get_info(source)
            date_month = i[26:33]
            date_month = date_month[:4] + '/' + date_month[5:]
            date = np.array([date_month for _ in range(len(all_info))])
            all_info = np.c_[all_info, date]
            all_info = pd.DataFrame(all_info)
            all_info_ = pd.concat([all_info_, all_info])
        print(all_info_)
        all_info_.to_csv(f"./data/2206_2404新能源汽车总体销量数据_{province_name}.csv")
    print("Finished")


if __name__ == '__main__':
    driver = webdriver.PhantomJS(
        executable_path=r"D:\Develop\phantomjs-2.1.1-windows\bin\phantomjs"
    )  # 加载无头浏览器，具体查看selenium文档，可换成火狐或者谷歌浏览器
    month = '202404'
    province_dict = {
        "2": "北京",
        "3": "安徽",
        "4": "福建",
        "5": "甘肃",
        "6": "广东",
        "7": "广西",
        "8": "贵州",
        "9": "海南",
        "10": "河北",
        "11": "河南",
        "12": "黑龙江",
        "13": "湖北",
        "14": "湖南",
        "15": "吉林",
        "16": "江苏",
        "17": "江西",
        "18": "辽宁",
        "19": "内蒙古",
        "20": "宁夏",
        "21": "青海",
        "22": "山东",
        "23": "山西",
        "24": "陕西",
        "25": "上海",
        "26": "四川",
        "27": "天津",
        "28": "西藏",
        "29": "新疆",
        "30": "云南",
        "31": "浙江",
        "32": "重庆"
    }
    url = links(month, province_dict)
    for province_name in province_dict.values():
        all_info_ = pd.DataFrame()
        province_urls = [(u, p) for u, p in url if p == province_name]
        for i, province_name in tqdm(province_urls):
            print(i)
            source = get_url(i)
            all_info = get_info(source)
            date_month = i[26:33]
            date_month = date_month[:4] + '/' + date_month[5:]
            date = np.array([date_month for _ in range(len(all_info))])
            all_info = np.c_[all_info, date]
            all_info = pd.DataFrame(all_info)
            all_info_ = pd.concat([all_info_, all_info])
        print(all_info_)
        all_info_.to_csv(f"./data/2206_2404新能源汽车总体销量数据_{province_name}.csv")
    print("Finished")

