# -*- coding: utf-8 -*-
from selenium import webdriver
import scrapy
import json
import time



class JdSpider(scrapy.Spider):

    name = 'abcd'
    allowed_domains = ['elements.envato.com']
    start_urls = ['https://elements.envato.com/web-templates']

    base_url = 'https://elements.envato.com'
    base_page = 'https://elements.envato.com/web-templates/pg-'
    totalPages = 50
    currentPage = 2
    

    # 调用cookie
    str = ''
    with open('elements.json', 'r', encoding='utf-8') as f:
        listCookies = json.loads(f.read())
    cookie = [item["name"] + "=" + item["value"] for item in listCookies]
    cookiestr = '; '.join(item for item in cookie)

    
    headers = {
        'cookie': cookiestr,
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'
    }

    def parse(self, response):
       

        login_url = 'https://elements.envato.com/sign-in'
        down_url = 'https://elements.envato.com/trudel-moving-business-html-template-5K4KSN'
        
        # chrome_path = r'D:\Python37\chromedriver.exe'

        #账号密码
        acc = 'xiaoshan'
        pas = 'ZuO6OHY)g'

        browser = webdriver.Chrome()
        browser.get(down_url)
        
        for cookie in self.listCookies:
            if 'expiry' in cookie:
                del cookie['expiry']
            browser.add_cookie(cookie)

        browser.get(login_url)
        time.sleep(3)

        #输入账号
        account = browser.find_element_by_id('signInUsername')
        account.clear()
        account.send_keys(acc)
        time.sleep(3)
        password = browser.find_element_by_id('signInPassword')
        password.clear()
        password.send_keys(pas)
        time.sleep(3)
        #登录
        login = browser.find_elements_by_xpath('//*[@id="app"]/div[1]/main/div/div/div[2]/div/div/div/form/button')[0]
        

        time.sleep(80)

        login.click()
        # 对一页的内容进行解析
        li_list = response.xpath('/html/body/div[2]/div[1]/main/div/div/section/div[2]/div[2]/div/div[1]/ul/li')
        # print(len(li_list))

        #取到每一个模版的链接
        for i in range(1,len(li_list)+1):
            data = {}
            data['模版名字'] = response.xpath('/html/body/div[2]/div[1]/main/div/div/section/div[2]/div[2]/div/div[1]/ul/li[%s]/div/a/@href'%(i)).extract_first()
            # print('1111' + data['模版名字'])
            # print(data['模版名字'])
            new_url = self.base_url + data['模版名字']    
                                                  #/html/body/div[2]/div[1]/main/div/div/div[2]/div/div[2]/button
            # name = browser.find_element_by_xpath('/html/body/div[2]/div[1]/main/div/div/div[2]/div/div[2]')
            # print('111111'+ name.text)       
            
            #https://marketplace-downloads.customer.envatousercontent.com/files/256913574/Trudel-Template.zip?response-content-disposition=attachment%3B+filename%3Delements-trudel-moving-business-html-template-5K4KSN-N1S3kMB4-11-21.zip&Expires=1587692295&Signature=FckJHW5gtEL8TsKAEYT7JcoB1fEbICIbQj3eozOjvrKsz9OtrBGpQD8YQzHZSbSEreZGOSvn60VBbvwloQ1XyqpcDt~4N5Npaa5qgKdscyMsGNG9QEm3VWT39TtlyNOx0rd5UlAMJonsYr4dN8j3zYM~pZeKlv9Qgx8ZPoaNODiaU0kP~8Fqbu0WABnmeEWKAMaukcN1M1fYKeYIZ~I~~8ug1roQMW9qV0kwhYoeD19s2diVruiHx~jMGJCW3UM0~r855EBHBmi7uPQWAA981mv8DNphFAyR7ZI-cCR-Zr4ijMMjQowuu6PKjttx0vCnznga~RMCoPUVYuFlrz6cFYuuzf0PzIx5t-xSWW4yodV2pBk6ILtb4bBOwE7KsyYldCVnOcsjKjqG4nz0-d46l58hD~8CLNR2chsX-tA45XS3ACzt~N~bJ6M8ybeq7NzwynN2sYT5Fsn51O6ulYsP2XLsgEJmWNZl0wWJlZyzfa7Xw5TWIMlUyqbuRH8Yzuva6gHmpODX56JslSD~gL9yTyX9SKL6hP6ux1KwNL-mn7wdyYzqB10Y5K3AzPzoKG6UvyWc~a1PIcmmxr~kd4KP0VPCz8Ahd9nTp8y2Cae2J03JAsI~H5tJkiXUI7Ltg5oMw2468frgHqDNUOhp5wPWo2UELd2tdIikX~bsterPItQ_&Key-Pair-Id=APKAJRP2AVKNFZOM4BLQ
            # browser.find_element_by_xpath('/html/body/div[2]/div[1]/main/div/div/div[1]/section[1]/div[1]/div/div[2]/div[2]/button').click()
            # browser.find_element_by_xpath('//*[@id="app"]/div[1]/main/div/div/div[1]/section[1]/div[1]/div/div[2]/div[2]/button').click()
            # js = "document.getElementById('app').querySelector('div')[1].querySelector('main').querySelector('div')[1].querySelector('section')[1].querySelector('div')[1].querySelector('div').querySelector('div')[2].querySelector('div')[2].querySelector('button').click()"
            # js = "document.getElementById('app').querySelector('div').querySelector('main').querySelector('div').querySelector('section').querySelector('div').querySelector('div').querySelector('div').querySelector('div').querySelector('button').click()"
            # js = "document.querySelector('#app > div.SrMMBuFa > main > div > div > div:nth-child(1) > section._3SVdfM8h > div._4_rw75DQ > div > div._3CrOTb0O > div._1razaiF6' > button)"
            
            
            # browser.find_element_by_xpath('//*[@id="app"]/div[1]/main/div/div/div[2]/div/div[2]/button').click()    
            # js = "var elem = document.getElementsByClassName('DHVC-H3N wdDJENPq _1w-s9_Fi _2KvMjHu9');elem[0]"
            js = "document.getElementById('app').querySelector('div')[1].querySelector('main').querySelector('div').querySelector('div').querySelector('div')[2].querySelector('div').querySelector('div')[2].querySelector('button').click()"             
            browser.execute_script(js)


            # yield scrapy.Request(url = new_url , headers = self.headers, callback = self.memary, dont_filter=True)
           
        # 翻页
        # if self.currentPage <= self.currentPage:
        #     for i in range(2,51):
        #         page_url = 'https://elements.envato.com/web-templates/pg-' + str(i)
        #         # print(page_url)
        #     yield scrapy.Request(url = page_url , headers = self.headers, callback = self.parse, cookies = self.cookiestr)
        
        # else:
        #     pass


    # def memary(self,response):
        
        # item = {}
        # item['pic_name'] = response.xpath('/html/body/div[2]/div[1]/main/div/div/div[1]/section[1]/div[2]/div/div/div/div/div[2]/img/@src').extract_first()
        # item['name'] = response.xpath('/html/body/div[2]/div[1]/main/div/div/div[1]/section[1]/div[1]/div/div[1]/div/h1').extract_first()
        
        # #点击下载按钮
        


        


