# /usr/bin/env python
# -*- coding: UTF-8 -*-
from bs4 import BeautifulSoup
from logs.Logger import Logger
from crawler import config
from crawler.downloader.ImageDownloader import ImageDownloader
from date_manage.date_manage import TimeHelper
import file_md5.FilePrint as fp
from crawler.db.ImageInfoDBOperator import DBOperator, ImageInfo
import time
import os
import common.const as const


path = config.generalConfig.little_red_book_output_path
log_path = config.generalConfig.log_path
urls_path = config.generalConfig.feed_file
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
# 日志初始化
log = Logger("D:/workspace/logs/crawler.log", level='info')

path = config.generalConfig.little_red_book_output_path


# 爬取类型
# 较老的ul模式
const.TYPE1 = 1
# 较新的
const.TYPE2 = 2
# 最新的
const.TYPE3 = 3
# article中的img组件src下直接是图片

class LittleRedBookCrawler:

    def readSeedFile(self, path):
        with open(path, 'r',encoding='UTF-8') as f:
            # print(f.read())
            return f.read()

    def selectAnalyzor(self, content, type):
        if type == const.TYPE1:
            return self.analysisContent(content)
        elif type == const.TYPE3:
            return self.analysisContent3(content)
        else:
            return self.analysisContent2(content)

    def analysisContent(self, content):
        tree = BeautifulSoup(content, "lxml")
        ul_list = tree.find_all("ul")
        img_list = []
        # print(tree.prettify())
        for ul in ul_list:
            li_list = ul.find_all("span")
            for li in li_list:
                style = li.get("style")
                start = style.find("//ci")
                end = style.find("\");")
                img_list.append("http:" + style[start:end])
        print(img_list)
        return img_list

    def analysisContent2(self, content):
        tree = BeautifulSoup(content, "lxml")
        img_list = []
        div_list = tree.find_all('div', {'class': 'swiper-slide'})
        for div in div_list:
            style = div.get("style")
            start = style.find("background-image: url(\"")
            end = style.find("\");")
            img_list.append(style[start+23:end])
            print(style[start+23:end])
        return img_list

    def analysisContent3(self, content):
        tree = BeautifulSoup(content, "lxml")
        img_list = []
        div_list = tree.find_all('img')
        for div in div_list:
            img = div.get("src")
            img_list.append(img)
            print(img)
        return img_list
    def getExtention(self, item):

        elements = item.split("/")
        if elements[-1] == 'webp':
            return 'jpg'
        if len(elements[-1]) > 4:
            return 'jpg'
        return elements[-1]


if __name__ == "__main__":
    time_helper = TimeHelper()
    littleBook = LittleRedBookCrawler()
    txt = littleBook.readSeedFile("D:/data/小红书.txt")

    # 选择页面模式<div class="swiper-wrapper" style="transition-duration: 0ms; transform: translate3d(-400px, 0px, 0px);"><!--[--><!--]--><!--[--><div class="swiper-slide swiper-slide-duplicate zoom-in swiper-slide-prev" data-swiper-slide-index="4" style="background-image: url(&quot;https://sns-img-hw.xhscdn.com/6de7ac52-8e72-efec-5817-2bf95257b191?imageView2/2/w/1920/format/webp|imageMogr2/strip&quot;); width: 400px;"></div><div class="swiper-slide zoom-in swiper-slide-active" data-swiper-slide-index="0" style="background-image: url(&quot;https://sns-img-hw.xhscdn.com/1665fefd-2d1f-f498-20b9-6ac9845dc9c6?imageView2/2/w/1920/format/webp|imageMogr2/strip&quot;); width: 400px;" data-v-c687cc1a=""></div><div class="swiper-slide zoom-in swiper-slide-next" data-swiper-slide-index="1" style="background-image: url(&quot;https://sns-img-hw.xhscdn.com/1480cde5-04ae-4dd2-f303-8afba12c7cd2?imageView2/2/w/1920/format/webp|imageMogr2/strip&quot;); width: 400px;" data-v-c687cc1a=""></div><div class="swiper-slide zoom-in" data-swiper-slide-index="2" style="background-image: url(&quot;https://sns-img-hw.xhscdn.com/ea0e0bcf-c8cd-3d0b-d49c-07cea93b5789?imageView2/2/w/1920/format/webp|imageMogr2/strip&quot;); width: 400px;" data-v-c687cc1a=""></div><div class="swiper-slide zoom-in" data-swiper-slide-index="3" style="background-image: url(&quot;https://sns-img-hw.xhscdn.com/75726f11-3eca-87b3-b770-ed678a74c00c?imageView2/2/w/1920/format/webp|imageMogr2/strip&quot;); width: 400px;" data-v-c687cc1a=""></div><div class="swiper-slide zoom-in swiper-slide-duplicate-prev" data-swiper-slide-index="4" style="background-image: url(&quot;https://sns-img-hw.xhscdn.com/6de7ac52-8e72-efec-5817-2bf95257b191?imageView2/2/w/1920/format/webp|imageMogr2/strip&quot;); width: 400px;" data-v-c687cc1a=""></div><div class="swiper-slide swiper-slide-duplicate zoom-in swiper-slide-duplicate-active" data-swiper-slide-index="0" style="background-image: url(&quot;https://sns-img-hw.xhscdn.com/1665fefd-2d1f-f498-20b9-6ac9845dc9c6?imageView2/2/w/1920/format/webp|imageMogr2/strip&quot;); width: 400px;"></div><!--]--><!--[--><!--]--></div>
    img_list = littleBook.selectAnalyzor(txt, const.TYPE3)
    downloader = ImageDownloader()
    db = DBOperator()
    batch_name = time_helper.getCurrentTimeStr('%Y%m%d%H%M%S')
    i = 0

    for img in img_list:

        if img == '':
            continue
        if littleBook.getExtention(img) == 'strip' or littleBook.getExtention(img) == 'jpg':
            title = batch_name + "_" + str(i + 1).zfill(5) + '.' + 'jpg'
        else:
            title = batch_name + "_" + str(i+1).zfill(5) + '.' + littleBook.getExtention(img)
        res_path = downloader.image_downloader(img, title, path, time_helper.getCurrentTimeStr('%Y%m%d'))
        img_info = ImageInfo()
        img_info.url = img
        img_info.parent_url = img
        img_info.abbr_parent_url = img
        img_info.crawled_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())

        file_print = fp.get_img_print(res_path)
        img_info.image_print = file_print

        res = db.check_image_info(img_info)
        if len(res) > 0:
            # 验证重复后，删除文件
            time.sleep(2)
            log.logger.warn(
                img_info.parent_url + " with image_print: " + str(img_info.image_print) + "have duplicated!")
            log.logger.warn("File Name is " + res_path)
            # 验证重复后，删除文件
            os.remove(res_path)
        else:
            db.add_image_info(img_info)
            log.logger.debug(img)
        i = i + 1
    print("Finished!")
