import requests
from bs4 import BeautifulSoup
from crawler import config
from selenium import webdriver
from crawler import Common as c
from date_manage.date_manage import TimeHelper
from crawler.downloader.ImageDownloader import ImageDownloader
from logs.Logger import Logger
import file_md5.FilePrint as fp
from crawler.db.ImageInfoDBOperator import DBOperator, ImageInfo
import time
import datetime
import os
import common.const as const
import Common as common

path = config.generalConfig.toutiao_output_path
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
# 谷歌浏览器位置
chrome_location = r'C:\Program Files\Google\Chrome\Application\chrome.exe'
# 日志初始化
log = Logger("D:/logs/crawler.log", level='info')

path = config.generalConfig.little_red_book_output_path

class WeiboCrawler:
    def analysisContent(self, content):
        tree = BeautifulSoup(content, "lxml")
        img_elements = tree.find_all("img")
        img_list = []
        for img_element in img_elements:
            img = img_element.get("src")
            print(img)
            img = img.replace("orj360", "large")
            img_list.append(img)
        return img_list


if __name__ == "__main__":
    time_helper = TimeHelper()
    weiboCrawler = WeiboCrawler()
    txt = common.readSeedFile("D:/data/小红书.txt")
    img_list = weiboCrawler.analysisContent(txt)
    downloader = ImageDownloader()
    db = DBOperator()
    batch_name = time_helper.getCurrentTimeStr('%Y%m%d%H%M%S')

    i = 0

    for img in img_list:

        if img == '':
            continue
        title = batch_name + "_" + str(i+1).zfill(5)
        res_path = downloader.image_downloader(img, title, path, time_helper.getCurrentTimeStr('%Y%m%d'))
        img_info = ImageInfo()
        img_info.url = img
        img_info.parent_url = img
        img_info.abbr_parent_url = img
        img_info.crawled_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())

        file_print = fp.get_img_print(res_path)
        img_info.image_print = file_print

        res = db.check_image_info(img_info)
        if len(res) > 0:
            # 验证重复后，删除文件
            time.sleep(2)
            log.logger.warn(
                img_info.parent_url + " with image_print: " + str(img_info.image_print) + "have duplicated!")
            log.logger.warn("File Name is " + res_path)
            # 验证重复后，删除文件
            os.remove(res_path)
        else:
            db.add_image_info(img_info)
            log.logger.debug(img)
        i = i + 1
    print("Finished!")