from crawler import config
from logs.Logger import Logger
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service as ChromeService
from selenium import webdriver
from crawler.db.ImageInfoDBOperator import DBOperator, ImageInfo
from crawler.downloader.ImageDownloader import ImageDownloader
import time
from bs4 import BeautifulSoup
from date_manage.date_manage import TimeHelper
import file_md5.FilePrint as fp
import os
from crawler import Common as c
import random

path = config.generalConfig.toutiao_output_path
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
# 谷歌浏览器位置
chrome_location = r'C:\\Dev\\chrome131\\chrome.exe'
# 用户数据目录
user_data_dir = r'C:\Users\zhang\AppData\Local\Google\Chrome\User Data\Default'

# 日志初始化
log = Logger("D:/workspace/logs/crawler.log", level='info')


def get_random_user_agent():
    user_agent = [
        "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
        "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
        "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko",
        "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
        "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
        "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
        "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
        "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
        "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
        "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
        "Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
        "Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
        "Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
        "MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
        "Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
        "Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
        "Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
        "Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
        "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
        "Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)",
        "UCWEB7.0.2.37/28/999",
        "NOKIA5700/ UCWEB7.0.2.37/28/999",
        "Openwave/ UCWEB7.0.2.37/28/999",
        "Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999",
        # iPhone 6：
        "Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25",

    ]
    return random.choice(user_agent)

class AdvancedLittleRedBookCrawler:

    def __init__(self):
        user_agent = get_random_user_agent()
        # user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.50 Safari/537.36'
        self.chrome_options = Options()
        # self.chrome_options.add_argument('-binary={0}'.format(chrome_location))
        self.chrome_options.add_argument('user-agent={0}'.format(user_agent))
        # self.chrome_options.add_argument('--proxy-server=http://127.0.0.1:7890')
        # 使用headless无界面浏览器模式
        self.chrome_options.add_argument('--headless')  # 增加无界面选项
        # 添加浏览器用户数据
        self.chrome_options.add_argument('--no-sandbox')  # 增加无界面选项
        self.chrome_options.add_argument('--disable-dev-shm-usage')  # 增加无界面选项
        self.chrome_options.add_argument('window-size=10,10')  # 增加无界面选项
        self.chrome_options.add_argument('--disable-gpu')  # 如果不加这个选项，有时定位会出现问题
        self.chrome_options.add_argument('blink-settings=imagesEnabled=false')
        self.chrome_options.add_argument(f'--user-data-dir={user_data_dir}') #用户数据，需要关闭已经运行的Chrome
        # self.driver = webdriver.Chrome("D:/Dev/006_Python/chromedriver-win64.V126/chromedriver.exe",
        #                                options=self.chrome_options)
        self.service = ChromeService(executable_path=r"C:\\Dev\\chromedriver131\\chromedriver.exe")

        # 使用服务对象和选项对象初始化 Chrome WebDriver
        self.driver = webdriver.Chrome(service=self.service, options=self.chrome_options)

        self.download_links = []

    def crawler_images(self, url):
        self.driver.get(url)
        # 处理反扒机制 先睡一会儿，之后再获取当前的url，再刷新页面
        print(url)
        time.sleep(2)
        real_url = self.driver.current_url
        real_url = url.replace('discovery/item', 'explore')
        # self.driver.current_url = real_url
        self.driver.get(real_url)
        print(self.driver.current_url)
        imgs = []
        html = self.driver.page_source
        soup = BeautifulSoup(html, 'lxml')
        meta = soup.find('meta', {'name': 'og:image'})
        full_img = meta.get('content')
        imgs.append(full_img)
        key_img = full_img[full_img.rfind('/')+1:full_img.rfind('!')]
        div_list = soup.find_all('div', {'class': 'img-container'})
        # print(full_img)
        for div in div_list:
            img = div.find('img')['src']
            if key_img in img:
                pass
            else:
                imgs.append(img)
                print(img)
        return imgs

    def getExtention(self, item):
        return 'jpg'

if __name__ == '__main__':
    littleBook = AdvancedLittleRedBookCrawler()
    urls = c.read_urls("小红书")
    time_helper = TimeHelper()
    for url in urls:
        db = DBOperator()
        img_list = littleBook.crawler_images(url)
        downloader = ImageDownloader()
        batch_name = time_helper.getCurrentTimeStr('%Y%m%d%H%M%S')
        i = 0

        for img in img_list:

            if img == '':
                continue
            if littleBook.getExtention(img) == 'strip' or littleBook.getExtention(img) == 'jpg':
                title = batch_name + "_" + str(i + 1).zfill(5) + '.' + 'jpg'
            else:
                title = batch_name + "_" + str(i + 1).zfill(5) + '.' + littleBook.getExtention(img)
            res_path = downloader.image_downloader(img, title, path, time_helper.getCurrentTimeStr('%Y%m%d'))
            img_info = ImageInfo()
            img_info.url = img
            img_info.parent_url = img
            img_info.abbr_parent_url = img
            img_info.crawled_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())

            file_print = fp.get_img_print(res_path)
            img_info.image_print = file_print

            res = db.check_image_info(img_info)
            if len(res) > 0:
                # 验证重复后，删除文件
                time.sleep(2)
                log.logger.warn(
                    img_info.parent_url + " with image_print: " + str(img_info.image_print) + "have duplicated!")
                log.logger.warn("File Name is " + res_path)
                # 验证重复后，删除文件
                os.remove(res_path)
                i = i + 1
            else:
                db.add_image_info(img_info)
                log.logger.debug(img)
                i = i + 1

    print("Finished!")

