import os

import scrapy
from bs4 import BeautifulSoup
from scrapy.crawler import CrawlerRunner
from scrapy.utils.log import configure_logging
from twisted.internet import reactor

SETTINGS = {
    "FEEDS": {
        "products.xlsx": {"format": "xlsx", "overwrite": True},
    },
    'USER_AGENT': 'xxx',
    'FEED_EXPORTERS': {
        'xlsx': 'scrapy_xlsx.XlsxItemExporter',
    },
    'FEED_EXPORT_FIELDS': ['title', 'desc', 'pic1', 'pic2', 'pic3', 'pic4', 'pic5', 'price', 'category', 'ean'],
}


class FruugoItem(scrapy.Item):
    # define the fields for your item here like:
    title = scrapy.Field()
    desc = scrapy.Field()
    pic1 = scrapy.Field()
    pic2 = scrapy.Field()
    pic3 = scrapy.Field()
    pic4 = scrapy.Field()
    pic5 = scrapy.Field()
    price = scrapy.Field()
    category = scrapy.Field()
    ean = scrapy.Field()

class FruugoSpider(scrapy.Spider):
    name = "fruugo"

    def start_requests(self):
        with open('urls.txt', 'r', encoding='utf-8') as f:
            urls = f.readlines()
        urls = [x.strip() for x in urls]
        # urls = urls[:1]

        if os.path.exists('proxy.txt'):
            print("只会读取proxy.txt内第一行作为http代理，默认为http代理。仍需其他协议的代理，请务必带协议头")
            with open('proxy.txt', 'r', encoding='utf-8') as f:
                proxy = f.readline().strip()
            proxy = proxy if proxy.startswith('http://') else 'http://' + proxy

            for url in urls:
                yield scrapy.Request(url=url, meta={'proxy': proxy})
        else:
            for url in urls:
                yield scrapy.Request(url=url)

    def parse(self, response):

        pic1 = pic2 = pic3 = pic4 = pic5 = ''
        html = response.text
        soup = BeautifulSoup(html, 'html.parser')
        title = soup.find('h1').get_text()
        desc = soup.find('div', class_='Product__Description-text').get_text().replace('\n ', '<br>').strip()
        category = soup.find('ol', class_='breadcrumb').get_text().strip().replace('\n', ' > ')
        product_gallery_thumbs = soup.find('div', class_='ProductGallery--thumbs')
        if product_gallery_thumbs:
            pic_divs = soup.find('div', class_='ProductGallery--thumbs').find_all('div')
            pics = [pd['data-image'] for pd in pic_divs]
            pcount = len(pics)

            if pcount > 0:
                pic1 = pics[0]
            if pcount > 1:
                pic2 = pics[1]
            if pcount > 2:
                pic3 = pics[2]
            if pcount > 3:
                pic4 = pics[3]
            if pcount > 4:
                pic5 = pics[4]
        else:
            pic1 = soup.find('div', class_='ProductGallery--hover-zoom').find('img').attrs['src']

        price = soup.find('span', class_='price').get_text().strip()
        ean = soup.find_all('ul', class_='product-description-spec-list')[-1].find_all('li')[1].span.get_text()

        return FruugoItem(title=title, desc=desc, pic1=pic1, pic2=pic2, pic3=pic3, pic4=pic4, pic5=pic5, price=price,
                          category=category, ean=ean)


def start_spider():
    configure_logging({'LOG_FORMAT': '%(levelname)s: %(message)s'})
    runner = CrawlerRunner(SETTINGS)
    d = runner.crawl(FruugoSpider)
    d.addBoth(lambda _: reactor.stop())
    reactor.run()  # the script will block here until the crawling is finished


def html_parse_debug():
    with open('debug.html', 'r', encoding='utf-8') as f:
        html = ''.join(f.readlines())
    # content = get_today_event_content(html)
    # print(content)


if __name__ == '__main__':
    start_spider()

    # html_parse_debug()
