# -*- coding: utf-8 -*-
import datetime
from urllib.parse import urlencode, urlparse
import os
import requests
import scrapy
import re
from pyquery import PyQuery as pq
from digkey_icnet.items import PartsDetailItem
from digkey_icnet.items import PartsDollarItem
from digkey_icnet.items import PartsRMBItem
from digkey_icnet.items import DollorRateItem
from digkey_icnet.items import CompanyinfoItem
from digkey_icnet.items import ICnettotalItem

from selenium import webdriver
from selenium.common.exceptions import TimeoutException,NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.proxy import Proxy
from selenium.webdriver.common.proxy import ProxyType

from scrapy_redis.spiders import RedisSpider

class IrdaDataSpider(scrapy.Spider):
    name = 'ICdata'
    allowed_domains = ['www.digikey.com.cn','www.ic.net.cn','www.digikey.cn']
    #start_urls = ['https://www.digikey.com.cn/products/zh/']

    path="/digikey_html"

    # PhantomJS配置
    SERVICE_ARGS = ['--load-images=false', '--disk-cache-path=false']
    browser = webdriver.PhantomJS(service_args=SERVICE_ARGS)
    proxy = webdriver.Proxy()
    proxy.proxy_type = ProxyType.MANUAL
    wait_timeout = 10
    #Digikey
    #cate 获取的器件类型 num页码
    digikey_url = "https://www.digikey.com.cn{query}"
    digikey_url_1="https://www.digikey.com.cn/products/zh/"
    digikey_url_2="https://www.digikey.com.cn{cate}"
    digikey_url_detail="https://www.digikey.com.cn{parts}"
    digikey_url_detail1 = "https://www.digikey.cn{parts}"

    #ICNET
    # part_number(元器件编号)
    search_url="http://www.ic.net.cn/search.php?IC_Method=icsearch&{part_number}&isExact=0&mfg=&pack=&dc=&qty=&searchAreaCode=0&stockDate=90&stockType=0&page={page}&sort="

    digi_header={
      'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
      'Accept-Language': 'zh-CN,zh;q=0.9',
      'Cache-Control': 'max-age=0',
      'Connection': 'keep-alive',
      'Host': 'www.digikey.com.cn',
      'Upgrade-Insecure-Requests': '1',
      'Referer':'https://www.digikey.com.cn/products/zh',
      'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',
    }
    cookies={
        '_ga': 'GA1.2.411105391.1520390241',
        '_msuuid_6fzke3kdm0': '7134E5EA-8BBE-47F2-998D-2C0299CEE3DB',
        'sc_ext_contact': 'd46976ab3c1541b9959d93f4f57f9f68|True',
        '_gid': 'GA1.2.152676448.1520598872',
        'csscxt': '1439631882.20480.0000',
        'dtCookie': '374136F1E984D034434C430900D9F023|X2RlZmF1bHR8MQ',
        'TS01d64338': '01460246b6c4a8ae6433bf44eb9faa3812f6f90ecaba632790d86dc3d9482395dc9cf60a73cfefda64b276fe4684aa01fb1cee0847',
        'TS01f6a51d': '01460246b6c23dbc87027b2a8ebc8cb2d5d151c8d7a6784c3dee34031da33510a41b2d67a8f665d64e9e0054e99c167973c06d0b19',
        'i10c.sid': '1520660650681',
        'sc_ext_session': 'gsso3mgdlu1hyxo3oi22ojwo',
        #'clientipaddr': '61.187.92.238',
        'TS017613a9': '01460246b6ebb7c5ce13855b107435ec05b4c9751621c62a12a6bd93fc9e41fe2b1ea65368264562fb905e53bebda9fcb1e7917d5d',
        'TS018060f7': '01460246b634b02a5e7b22c87cf27b361fa776716b8c3388d112d4dfc73e07c0da4573f998398b8e35ce9bce088c1541e8ec18b5f0',
        'TS01d5128f': '01460246b69d0c586b43d39d95c24df0711383deac034636f59174b76c3f83f6bf698cc4ec60cb64a23950b651ac80041a801f4e9b',
        'utag_main': 'v_id:0161fe51f23e00195b5cd9c1cec504072006e06a00978$_sn:9$_ss:0$_st:1520663191744$ses_id:1520660651068%3Bexp-session$_pn:5%3Bexp-session',
        '_gat_Production': '1',
        'dtPC': '-',
        'dtLatC': '377',
        #'dtCookie': '374136F1E984D034434C430900D9F023|X2RlZmF1bHR8MQ',
        'dtSa': 'true%7CKD116%7C-1%7CPage%3A%206134689%7C-%7C1520661408026%7C261391328_398%7Chttps%3A%2F%2Fwww.digikey.cn%2Fproduct-detail%2Fzh%2Fmaxim-integrated%2FMAX30105EFD-%2FMAX30105EFD-ND%2F6134689%7CMAX30105EFD%2B%20Maxim%20Integrated%20%5Ep%20%E4%BC%A0%E6%84%9F%E5%99%A8%EF%BC%8C%E5%8F%98%E9%80%81%E5%99%A8%20%5Ep%20DigiKey%7C1520661397384%7C',
    }
    def start_requests(self):
        #yield scrapy.Request(self.digikey_url_1,headers=self.digi_header,callback=self.parse_class1)
        #url="https://www.digikey.com.cn/products/zh/%E4%BC%A0%E6%84%9F%E5%99%A8-%E5%8F%98%E9%80%81%E5%99%A8/%E4%B8%93%E7%94%A8%E4%BC%A0%E6%84%9F%E5%99%A8/561"
        #yield scrapy.Request(url, callback=self.parse_class2)
        #url="https://www.digikey.com.cn/product-detail/zh/nxp-usa-inc-/FXTH870902DT1/FXTH870902DT1CT-ND/5018195"
        # url=["https://www.digikey.com.cn/product-detail/zh/cynergy-3/RSF48Y100RF/725-1110-ND/2835309",
        #      "https://www.digikey.com.cn/product-detail/zh/cynergy-3/RSF73Y100RN/725-1005-ND/753339",
        #      "https://www.digikey.com.cn/product-detail/zh/cynergy-3/RSF74Y100RN/725-1006-ND/753340",
        #      "https://www.digikey.com.cn/product-detail/zh/cynergy-3/RSF53Y100RC/725-1013-ND/753326",
        #      ]
        #url="https://www.digikey.cn/product-detail/zh/maxim-integrated/MAX86141ENP-T/MAX86141ENP-TCT-ND/8535546"
        #url="https://www.digikey.com.cn/product-detail/zh/maxim-integrated/MAX86141ENP-T/MAX86141ENP-TTR-ND/7804058"
        #url="https://www.digikey.com.cn/product-detail/zh/maxim-integrated/MAX86141ENP-T/MAX86141ENP-TTR-ND/7804058"
        #yield scrapy.Request(url, headers=self.digi_header,callback=self.parse_parts_detail)
        # print("*******开始*******")
        # part="S87C751-1DB"
        # part_number = urlencode({"key": part})
        # page=1
        # yield scrapy.Request(self.search_url.format(part_number=part_number,page=page),meta={"partNumber":part,"page":page},callback=self.ICnet_parse)

    #获取digikey第一级分类的所有url
    def parse_class1(self,response):
        cates = response.css(".catfiltertopitem .catfiltersub .catfilterlink").xpath("@href").extract()
        isexists = os.path.exists(self.path)
        if not isexists:
            os.mkdir(self.path)

        for cate in cates:
            s = cate.split('/')
            fname = str(s[-3]) + "#" + str(s[-2])
            path = self.path.strip()
            path = path+"/"+fname
            path = re.sub(" ", "", path)  # 去掉空格

            isexists=os.path.exists(path)
            if not isexists:
                os.mkdir(path)
                print(path)
            else:
                print("目录已经存在")
            #yield scrapy.Request(self.digikey_url_2.format(cate=cate),headers=self.digi_header, callback=self.parse_class2)

    # 获取所有第一级分类下的第二级url
    def parse_class2(self,response):
        #获取该页的器件url
        cate2=response.css("#digikeyPartNumberLnk").xpath('@href').extract()
        for cate in cate2:
            yield scrapy.Request(self.digikey_url_detail.format(parts=cate),headers=self.digi_header,cookies=self.cookies,callback=self.parse_parts_detail)

        # 获取下一页
        if response.css('#paginationBtnNext').xpath("@href"):
            query=response.css('#paginationBtnNext').xpath("@href").extract_first()
            yield scrapy.Request(self.digikey_url.format(query=query),headers=self.digi_header,callback=self.parse_class2)

    #解析详情页获取数据
    def parse_parts_detail(self,response):
        try:
            # parts_detail表
            djpartNumber1 = urlparse(response.url).path.split('/')[-2]
            djpartNumber = response.xpath('//td[@id="PartNumber"]/text()').extract_first()
            # 验证爬取站点的正确性
            if djpartNumber == None:
                # 空页面就访问.cn域名
                print("*****空页面尝试.cn域名访问******")
                yield scrapy.Request(self.digikey_url_detail1.format(parts=urlparse(response.url).path),
                                     headers=self.digi_header, cookies=self.cookies, dont_filter=True,
                                     callback=self.parse_parts_detail)
            elif djpartNumber != None:
                # 站点网站非空
                # 域名和部件名不匹配，重新访问
                va1 = re.sub(r"\+|-", "", djpartNumber1, re.S)
                va2 = re.sub(r"\+|-", "", djpartNumber, re.S)
                fpath = response.xpath('//*[@id="familyLnk"]/@href').extract_first()
                if va1 != va2:
                    print("*****域名和部件名不匹配，重新访问******",djpartNumber1,djpartNumber)
                    #先检查该html文件是否存在
                    #不存在则存储，以djpartnumber为名建html文件
                    self.save_html(fpath,djpartNumber,response.text)
                    #存在则跳过
                    yield scrapy.Request(response.url, headers=self.digi_header, cookies=self.cookies, dont_filter=True,
                                         callback=self.parse_parts_detail)
                else:
                    #存储html文件
                    print("***正常获取页面***")
                    self.save_html(fpath, djpartNumber, response.text)
                    partNumber = response.xpath('//meta[@itemprop="name"]/@content').extract_first()
                    manufacturer = response.xpath('//span[@itemprop="name"]/text()').extract_first()
                    encapsulations = re.search(r'&#23553;&#35013;/&#22806;&#22771;[\s\S]*?<td>([\s\S]*?)</td>', response.text,
                                               re.S)
                    if encapsulations:
                        encapsulation = self.unicodetoascii(encapsulations.group(1))
                    else:
                        encapsulation = ""
                    class1 = response.xpath('//*[@id="categoryLnk"]/text()').extract_first()
                    class2 = response.xpath('//*[@id="familyLnk"]/text()').extract_first()
                    img_url = response.xpath('//img[@itemprop="image"]/@src').extract_first()
                    parts_detail_item = PartsDetailItem()
                    parts_detail_item['Part_Number'] = partNumber
                    parts_detail_item['Manufacturer'] = manufacturer
                    parts_detail_item['Encapsulation'] = encapsulation
                    parts_detail_item['class1'] = class1
                    parts_detail_item['class2'] = class2
                    parts_detail_item['img_url'] = img_url
                    parts_detail_item['url'] = response.url
                    parts_detail_item['dj_Part_Number']= djpartNumber
                    yield parts_detail_item

                    prices = re.search(r'总价([\s\S]*?)</table>', response.text, re.S)
                    prices = re.sub(',', '', prices.group(1))
                    price = re.findall(r"center'>([\s\S]*?)</td>[\s\S]*?right'>([\s\S]*?)</td>[\s\S]*?</tr>", prices)
                    i = 0
                    for price_item in price:
                        parts_dollar_item = PartsDollarItem()
                        i += 1
                        parts_dollar_item['dj_Part_Number'] = djpartNumber
                        parts_dollar_item['Part_Number'] = partNumber
                        parts_dollar_item['Number'] = price_item[0]
                        parts_dollar_item['Num_Low'] = price_item[0]
                        if len(price) > i:
                            parts_dollar_item['Num_Higher'] = str(int(price[i][0]) - 1)
                        else:
                            parts_dollar_item['Num_Higher'] = "999999"
                        parts_dollar_item['Unit_Price_Dollar'] = price_item[1]
                        parts_dollar_item['Grab_Time'] = self.now()
                        yield parts_dollar_item
                    part_number = urlencode({"key": partNumber})
                    # 开始搜索icnet 从第一页开始
                    #yield scrapy.Request(self.search_url.format(part_number=part_number,page=1),meta={"partNumber":partNumber, 'page':1,'proxy':None},callback=self.ICnet_parse)
        except AttributeError:
            yield scrapy.Request(response.url, headers=self.digi_header, cookies=self.cookies, dont_filter=True,
                                  callback=self.parse_parts_detail)
        except TypeError:
            yield scrapy.Request(response.url, headers=self.digi_header, cookies=self.cookies, dont_filter=True,
                                  callback=self.parse_parts_detail)

    def ICnet_parse(self,response):
        Parts_Number=response.meta['partNumber']
        page = int(response.meta['page'])
            #使用Phantomjs 加载页面
        try:
            self.proxy.http_proxy =re.sub(r"http://","",response.meta['proxy'])
            # 将代理设置添加到webdriver.DesiredCapabilities.PHANTOMJS中
            self.proxy.add_to_capabilities(webdriver.DesiredCapabilities.PHANTOMJS)
            self.browser.start_session(webdriver.DesiredCapabilities.PHANTOMJS)
            wait = WebDriverWait(self.browser, self.wait_timeout)
            self.browser.implicitly_wait(self.wait_timeout)
            self.browser.set_page_load_timeout(self.wait_timeout)
            self.browser.get(response.url)
            wait.until(
                EC.presence_of_element_located((By.CSS_SELECTOR, ".right_results"))
            )
            print("****执行完毕****")
            html=self.browser.page_source
            doc = pq(html)
            if page == 1:
                #存储ICnettotalItem
                ICtotal_Item=ICnettotalItem()
                iccp = response.css(".result_icons .iccp")
                sscp = response.css(".result_icons .sscp")
                ICCP_SSCP_total=len(iccp)+len(sscp)
                if doc(".pagepicker:first li"):
                    page_total = len(doc(".pagepicker:first li"))
                elif doc("#searchResult_empty"):
                    page_total = 0
                else:
                    page_total = 1

                #
                ICtotal_Item['Parts_Number']=Parts_Number
                ICtotal_Item['ICCP_SSCP_total']=ICCP_SSCP_total
                ICtotal_Item['page_total'] = page_total
                yield ICtotal_Item

            for company_items in doc('.stair_tr:gt(0)').items():
                Company_item = CompanyinfoItem()
                Company_item['Name'] = company_items('.result_supply .result_goCompany').attr('title')
                Company_item['Part_Number'] = company_items('.product_number').text()
                level = company_items('div.result_supply .result_icons a:first').attr('class')
                if level:
                    Company_item['Level']=re.sub("icon|vip", "",level)
                else:
                    Company_item['Level'] = ""
                Company_item['User_Date'] = company_items('div.result_supply .detailLayer .layer_mainContent .orangenumber').text()
                Company_item['Telephone'] = company_items('div.result_supply .detailLayer .layer_mainContent .layer_telNumber').text()
                Company_item['ContactMan'] = company_items('div.result_supply .detailLayer .layer_mainContent .layer_contactName').text()
                Company_item['Mobilephone'] = company_items('div.result_supply .detailLayer .layer_mainContent .layer_otherContentphone').text()
                Company_item['Facsimile'] = company_items('div.result_supply .detailLayer .layer_mainContent .layer_otherTitle_fax').next().text()
                office_address = company_items('div.result_supply .detailLayer .layer_mainContent .company_address').text()
                if office_address:
                    Company_item['Office_address'] = re.sub("\xa0", "",office_address)
                else:
                    Company_item['Office_address'] = office_address

                address = company_items('div.result_supply .detailLayer .layer_mainContent .company_address2').text()
                if address:
                    Company_item['address'] = re.sub("\xa0", "",address)
                else:
                    Company_item['address'] = address

                Company_item['QQ'] = company_items('.result_askPrice a:first').attr('title')
                Company_item['Batch_Number'] = company_items('.result_batchNumber').attr('title')
                Company_item['Inventory'] = company_items('.result_totalNumber').text()
                Company_item['Pacakage'] = company_items('.result_pakaging').text()
                Company_item['Remarks'] = company_items('.result_prompt .result_explain').text()
                Company_item['Issue_Date'] = company_items('.result_date input').attr('value')
                Company_item['Grap_Date'] = self.now()
                if Company_item['Name']:
                    yield Company_item

            #抓下一页
            #400错误 去掉cookies和header时成功！
            if doc('.pagepicker a').has_class('nextpage'):
                page+=1
                part_number = urlencode({"key": Parts_Number})
                yield scrapy.Request(self.search_url.format(part_number=part_number, page=page),meta={"partNumber": Parts_Number, "page": page},
                                     callback=self.ICnet_parse)
        #处理超时和空页面错误
        except TimeoutException:
            yield scrapy.Request(response.url,meta={"partNumber": Parts_Number,"page":page}, callback=self.ICnet_parse,dont_filter=True)
        except NoSuchElementException:
            yield scrapy.Request(response.url,meta={"partNumber": Parts_Number,"page":page}, callback=self.ICnet_parse,dont_filter=True)

    def save_html(self,fpath,djpartNumber,text):
        s = fpath.split('/')
        fname = str(s[-3]) + "#" + str(s[-2])  # 以#分开
        path = self.path.strip()
        path = path + "/" + fname
        path = re.sub(" ", "", path)  # 去掉空格
        path = path + "/" + djpartNumber + ".html"
        exists = os.path.exists(path)
        if not exists:
            # 不存在则存储
            with open(path, "wb") as f:
                f.write(bytes(text,encoding="utf8"))
            print("save:"+path)
        else:
            print(path+"已经存在！")
    #获取代理函数
    def get_proxy(self,port):
        url="http://localhost:{port}/random"
        proxy = requests.get(url.format(port=port)).text
        doc = pq(proxy)
        if doc('.debugger'):
            proxy = None
        else:
            proxy=str(proxy)
        print("***********using proxy**************:", proxy)
        return proxy

    # 编码转码函数
    def unicodetoascii(self, text):
        row_list = re.findall(r'.*?(&#\d+;).*?', text, re.S)  # 对照数列
        rep_list = []  # 转换后的中文数列
        list = re.findall(r'&#(\d+);', text, re.S)
        for i in list:
            rep_list.append(chr(int(i)))
        for j in range(0, len(row_list)):
            text = re.sub(row_list[j], rep_list[j], text, re.S)
        return text

    def now(self):
        return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')








