import scrapy,sys,os

import json
path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(path)

from temp.items import huaItem
from temp.utils.utils import Utils
class huaSpider(scrapy.Spider):
    #handle_httpstatus_list = [500]
    name = 'hua'
    allowed_domains = ['www.hua.com','img01.hua.com']
    offset = 1
    orgPath = os.getcwd()
    # tarPath = "%s/torrent" %orgPath
    def __init__(self,offset=1):
        # self.start_urls = ['https://www.hua.com/aiqingxianhua/?r=0&pg=%d'%(offset)]
        self.start_urls = ['https://www.hua.com/businessFlower/kaiyehualan/']

    def parse(self, response):

        try:
            print(response)
            exit()
            # print(response.xpath("//
            #巧克力
            # list = response.xpath("//body/div[@class='container']/div[@class='wrapper']/div[2]/ul/li")
            #永生花
            # list = response.xpath("//body/section[@class='product-list']/div/ul/li")
            #蛋糕  鲜花
            list = response.xpath("//body/div[@class='container']/div[@class='wrapper']/div[1]/div[@class='grid-wrapper']/div[@class='grid-item']")
            # 礼品
            #list  = response.xpath("//body/div[@class='container']/div[@class='wrapper']/div[@class='product']/ul/li")
            print(list)
            # exit()
            for key,eachInfo in enumerate(list):

                url = eachInfo.xpath(".//a/@href").extract()[0]
                print(url)
                yield scrapy.Request('https://www.hua.com%s' % (url), callback=self.down_parse)


        except:
            print(11111111)
            # exit()
            pass
        if self.offset < 8:
            self.offset += 1
            print(self.offset)
            # yield scrapy.Request('https://www.hua.com/aiqingxianhua/?r=0&pg=%d' % (self.offset),callback=self.parse)


    def down_parse(self, response):

        item = huaItem()
        # print(response)
        # print(response)
        try:
            title = response.xpath("//body/div[@class='product-wrap']/div[@class='product-intro']/div[@class='product-info']/div/div[@class='title']/p").xpath('string(.)').extract()[0].strip()



            detailArr=[]
            detaillist = response.xpath("//body/div[@class='product-wrap']/div[@class='product-intro']/div[@class='product-info']/div/div[@class='huayu']/div[@class='huayu-item']")
            price = response.xpath("//body/div[@class='product-wrap']/div[@class='product-intro']/div[@class='product-info']/div/div[@class='price']/div[1]/div[@class='price-box']/span[@class='sell-price']").xpath('string(.)').extract()[0].strip()
            orgPrice = response.xpath("//body/div[@class='product-wrap']/div[@class='product-intro']/div[@class='product-info']/div/div[@class='price']/div[1]/div[@class='price-box']/span[@class='original-price']").xpath('string(.)').extract()[0].strip().replace('￥','')
            item['price'] = price
            item['org_price'] = orgPrice
            # print(orgPrice)
            # exit()
            for detailInfo in detaillist:
                detailObj = {'subname': '', 'context': ''}
                subName = detailInfo.xpath(".//span[@class='huayu-label']").xpath('string(.)').extract()[0].strip()
                detailObj['subname'] = subName
                if subName == '材料':
                    contextarr=detailInfo.xpath(".//p").xpath('string(.)').extract()[0]
                    context = ' '.join(contextarr)
                else:
                    context = detailInfo.xpath(".//p/text()").extract()[0].strip()
                    # print(context)
                detailObj['context'] = context

                detailArr.append(detailObj)
            item['detail'] = detailArr

            imgs = response.xpath("//body/div[@class='product-wrap']/div[@class='product-intro']/div[@class='product-preview']/div/div[2]/div")

            path1 = title.replace('/','').replace('，','').replace(',','')
            urls = {'main':[],'detail':[],'path':path1,'mainPath':'主图','detailPath':'详情'}

            for key, eachInfo in enumerate(imgs):
                downurl = eachInfo.xpath(".//img/@src").extract()[0].replace('https:','')


                urls['main'].append('https:%s' % downurl)

            #其它
            detailImgs = response.xpath("//body/div[@class='wrap clearfix']/div[@class='detail']/div[@id='product']/div/div[2]/img")
            if(len(detailImgs)<=1):
                detailImgs = response.xpath("//body/div[@class='wrap clearfix']/div[@class='detail']/div[@id='product']/div/div[2]/div/img")

            #鲜花
            # detailImgs = response.xpath("//body/div[@class='wrap clearfix']/div[@id='product']/div/div[2]/img")
            print(detailImgs)
            for eachInfo1 in detailImgs:
                downurl1 = eachInfo1.xpath(".//@data-original").extract()[0].replace('https:', '')
                # print(downurl1)
                urls['detail'].append('https:%s' % downurl1)

            item['file_urls'] = urls


            yield item
        except:
            print(22222222222)
            # yield scrapy.Request('http://www.55btjia.com%s' % (detail[0]), callback=self.parse)
            pass

    def getDownUrl(self,response):
        print(88)
        print(response)

