import json
import scrapy
import testPro.utils.RegularExpression as RE
from testPro.items import AliItem,AliQuota,AliQuotaMap


class AliSpider(scrapy.Spider):
    name = 'ali-family'
    allowed_domains = ['aliyun.com']

    def __init__(self, category=None, *args, **kwargs):
        super(AliSpider, self).__init__(*args, **kwargs)
        with open("./testPro/conf/config.json",'r') as load_f1:
            self.config = json.load(load_f1)
        with open("./testPro/conf/data.json",'r') as load_f2:
            self.table = json.load(load_f2)
        self.start_urls = self.config[self.name]["urlList"]

    def parse(self, response):
        for x in response.xpath(self.config[self.name]["XPATH-section"]):
            # 获取 title 并能通过title来判断该
            temp = x.xpath(self.config[self.name]["XPATH-title"]).extract()
            famliy = RE.find_unchinese(temp[0].strip()) # 是不是一个含有实例规格的section
            if not famliy:                              # title不含有实例famliy，直接跳过
                continue

            item = AliItem()                            # 声明一个 item，对应 excel 表中一行
            item["Family"] = '-'
            item["CPU"] = '-'
            item["ClockSpeed"] = '-'
            item["Proportion"] = '-'
            item["Usescenes"] = '-'
            item["Family"] = RE.subLowwer(RE.find_unchinesesin(famliy,temp=r'[（），。]'))    # 填写 famliy 表项
            temp = x.xpath('string(.)').extract()[0].replace(' ','')        # 获取所有文本 保存在temp中
            temp = RE.emplace_enter(temp)                                   # 并去除多余的 空格、换行符

            # 1.CPU 内存信息
            item["CPU"] = RE.find_target(temp,self.config[self.name]["RE-CPU"],True)
            item['CPU'] = RE.del_target(item['CPU'],r'-?\d+\.?\d*e?-?\d*GHz?')
            item['CPU'] = RE.del_target(item['CPU'],r'，')
            item['CPU'] = RE.del_target(item['CPU'],r'、')

            # 2.主频

            CpuMapKey = self.table["CPUMap"].keys()
            for i in CpuMapKey:
                if i in item["CPU"]:
                    item["CPU"] = self.table["CPUMap"][i] + ' ' +  RE.find_CPUnum(item["CPU"])

            if item["Family"] in self.table[self.name].keys():
                item["CPU"] = self.table[self.name][item["Family"]]["CPU"]
                item["ClockSpeed"] = self.table[self.name][item["Family"]]["ClockSpeed"]
            else:
                item["ClockSpeed"] = RE.fing_ClockSpeed(item["CPU"])


            # 2.CPU 内存配比
            tempDisk = RE.find_target(temp,self.config[self.name]["RE-memory"][0],True)\
                +RE.find_target(temp,self.config[self.name]["RE-memory"][1],True)

            if tempDisk == "--":
                tempDisk = RE.find_target(tempDisk,self.config[self.name]["RE-memory"][2],True)
            tempDisk = RE.emplace_enter(tempDisk)
            item["Proportion"] = RE.match_target(tempDisk,r'\d[:]\d')
            temp = RE.find_target(temp,self.config[self.name]["RE-usescenes"])
            item["Usescenes"] = RE.cutTails(temp)
            if 'xn4、n4、mn4、e4' in item['Family']:
                continue
            yield item
        for i in ['xn4','n4','mn4','e4']:
            item = AliItem()                            # 声明一个 item，对应 excel 表中一行
            item["Family"] = i
            item["CPU"] = self.table[self.name][item["Family"]]["CPU"]
            item["ClockSpeed"] = self.table[self.name][item["Family"]]["ClockSpeed"]
            item["Proportion"] = self.table[self.name][item["Family"]]["Proportion"]
            item["Usescenes"] = self.table[self.name][item["Family"]]["Usescenes"]
            yield item



class AliQuotaSpider(scrapy.Spider):
    name = 'ali-family-details'
    allowed_domains = ['aliyun.com']

    def __init__(self, category=None, *args, **kwargs):
        super(AliQuotaSpider, self).__init__(*args, **kwargs)
        with open("./testPro/conf/config.json",'r') as load_f1:
            self.config = json.load(load_f1)
        with open("./testPro/conf/data.json",'r') as load_f2:
            self.table = json.load(load_f2)
        self.start_urls = self.config[self.name]["urlList"]


    def parse(self, response):
        
        for t in response.xpath(self.config[self.name]["XPATH-section"]+"/div/table"):
            thead = t.xpath("thead/tr/th/text()").extract() # 表头list

            for tbody_line in t.xpath("tbody/tr"):
                item = AliQuota()
                # 初始化
                for value in AliQuotaMap.values():
                    item[value] = '-'
                    
                for index,content in enumerate(tbody_line.xpath("td/text()").extract()):
                    try:
                        if thead[index] in AliQuotaMap.keys():
                            item[AliQuotaMap[thead[index]]] = RE.find_details_translator(content)
                        else:
                            item["Remarks"] = content
                    except:
                        print('')
                yield item
