import csv

import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np


class CPU:
    def __init__(self, Name, PID, Platform, Price, CoreNumbers, Frequency, Power, Slot, Performance, Architecture,
                 Process, RAMType):
        self.Name = Name
        self.PID = PID
        self.Platform = Platform
        self.Price = Price
        self.CoreNumbers = CoreNumbers
        self.Frequency = Frequency
        self.Power = Power
        self.Slot = Slot
        self.Performance = Performance
        self.Architecture = Architecture
        self.Process = Process
        self.RAMType = RAMType


def slove(begin, after):
    deal = {
        '适用类型': after,
        '核心数量': after,
        'CPU主频': after,
        '热设计功耗(TDP)': after,
        '插槽类型': after,
        'CPU架构': after,
        '制作工艺': after,
        '内存类型': after,
    }
    return deal.get(begin, None)


def tranToCpu(begin, after, cpu):
    if begin is '适用类型':
        cpu.Platform = slove(begin, after)
    elif begin is '核心数量':
        cpu.CoreNumbers = slove(begin, after)
    elif begin is 'CPU主频':
        cpu.Frequency = slove(begin, after)
    elif begin is '热设计功耗':
        cpu.Power = slove(begin, after)
    elif begin is '插槽类型':
        cpu.Slot = slove(begin, after)
    elif begin is 'Performance':
        cpu.Performance = slove(begin, after)
    elif begin is 'CPU架构':
        cpu.Architecture = slove(begin, after)
    elif begin is '制作工艺':
        cpu.Process = slove(begin, after)
    elif begin is '内存类型':
        cpu.RAMType = slove(begin, after)
    else:
        pass


def get_html(url):
    # 模拟浏览器访问
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
                      'AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/108.0.0.0 Safari/537.36 Edg/108.0.1462.54',
        'accept-language': 'zh-CN,zh;q=0.9'
    }
    print("--> 正在获取网站信息")
    response = requests.get(url, headers=headers)  # 请求访问网站
    if response.status_code == 200:
        html = response.text  # 获取网页源码
        return html  # 返回网页源码
    else:
        print("获取网站信息失败！")


if __name__ == '__main__':
    csv_file = open('CPUInfo.csv', 'w', newline='', encoding='utf-8')
    writer = csv.writer(csv_file)
    writer.writerow(
        ['Name', 'PID', 'Platform', 'Price', 'CoreNumbers', 'Frequency', 'Power', 'Slot', 'Performance',
         'Architecture', 'Process', 'RAMType'])

    data = pd.read_csv("../IDs/CPU.csv")
    col_1 = data["ID"]
    col_2 = data['PicPath']
    col_3 = data['Name']
    col_4 = data['Price']
    ids = np.array(col_1)
    picPath = np.array(col_2)
    names = np.array(col_3)
    prices = np.array(col_4)
    for i in range(len(ids)):
        search_url = 'https://detail.zol.com.cn/1428/' + str(ids[i]) + '/param.shtml'
        html = get_html(search_url)
        soup = BeautifulSoup(html, 'lxml')
        beginList = soup.find_all('th')
        afterList = soup.find_all('td', class_='hover-edit-param')
        cpu = CPU('null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null', 'null')
        cpu.Name = names[i]
        cpu.PID = picPath[i]
        cpu.Price = prices[i]
        for i in range(len(beginList)):
            tranToCpu(beginList[i].span.get_text(), afterList[i].span.get_text(), cpu)
        i += 1
