# -*- coding: utf-8 -*-
# ☯ Author: ChinaPython
# ☯ Email : chinapython@yeah.net
# ☯ Date  : 2021/11/2 16:05
import re
import cv2
import time
import queue
import requests
import numpy as np
from lxml import etree
import paddlehub as hub
from selenium import webdriver
from selenium.webdriver.common.by import By
from urllib import parse


class ScanHelper:
    def __init__(self):
        self.driver = webdriver.Chrome(executable_path="./chromedriver.exe")
        self.scan_href_queue, self.urls_history, self.scan_href_list = queue.Queue(), list(), list()
        with open("./prohibited_words.txt", "r", encoding="utf8") as f:
            self.words = [line.strip() for line in f.readlines() if len(line.strip()) > 0]
        with open("./domain.txt", "r", encoding="utf8") as f:
            for line in f.readlines():
                if len(line.strip()) == 0:
                    continue
                self.scan_href_queue.put(line.strip())
                self.scan_href_list.append(line.strip())
        self.limit_domain = ["sinovoice.com", "aicloud.com"]
        self.ocr = hub.Module(name="chinese_ocr_db_crnn_mobile")
        self.image_history = list()

    def __del__(self):
        self.driver.quit()

    def distinguish(self, np_image):
        return self.ocr.recognize_text(
            images=[np_image],  # 图片数据，ndarray.shape 为 [H, W, C]，BGR格式；
            use_gpu=False,  # 是否使用 GPU；若使用GPU，请先设置CUDA_VISIBLE_DEVICES环境变量
            output_dir=r"C:\test",  # 图片的保存路径，默认设为 ocr_result；
            visualization=True,  # 是否将识别结果保存为图片文件；
            box_thresh=0.5,  # 检测文本框置信度的阈值；
            text_thresh=0.5  # 识别中文文本置信度的阈值；
        )

    @staticmethod
    def download(url):
        try:
            image = requests.get(url).content
            if b"JFIF" not in image and b"PNG" not in image and b"BM" not in image:
                return
            return cv2.imdecode(np.fromstring(image, np.uint8), 1)
        except Exception as e:
            return print(e)

    def image_detection(self, url):
        content = self.download(url)
        if content is None:
            return print("image download failed")
        try:
            items = self.distinguish(content)
            return "/".join(re.findall("'text': '(.*?)',", str(items)))
        except Exception as e:
            return print(e)

    # 获取所有链接
    def get_all_urls(self):
        selectors = self.driver.find_elements(By.XPATH, "//a")
        urls = list()
        for selector in selectors:
            url = selector.get_attribute("href")
            # 判断是否是支持的域
            if len([d for d in self.limit_domain if d in str(url)]) != 1:
                continue
            if "http" not in url:
                continue
            # url处理
            url = url.split("#")[0].strip()  # 去掉锚点
            url = url.strip('/ ')  # 去掉 后缀
            if ".apk" in url or ".rar" in url or ".zip" in url or ".exe" in url:
                continue
            if "login" in url or "wx_qrcode" in url or url.startswith('http') is False:
                continue
            if url in self.urls_history:
                continue
            if url.replace('http://', 'https://') in self.scan_href_list:
                continue
            if url.replace('https://', 'http://') in self.scan_href_list:
                continue
            urls.append(url)
        [self.scan_href_queue.put(url) for url in list(set(urls))]
        [self.scan_href_list.append(url) for url in list(set(urls))]
        # [print("获取url:%s" % url) for url in list(set(urls))]
        selectors = self.driver.find_elements(By.XPATH, "//a")

    # 检测正文
    def check(self, url):
        html_content = self.driver.page_source
        html_etree = etree.HTML(html_content)
        if html_etree is None:
            return
        content = "\n".join(html_etree.xpath('//text()'))
        words = [w for w in self.words if w in content]
        if len(words) == 0:
            return
        current_url = self.driver.current_url.split('#')[0].strip('/')
        url = url if url == current_url else current_url
        None if url in self.urls_history else self.urls_history.append(url)
        info = "当前URL:{} ===>>> 发现违禁词:{}".format(url, "、".join(words))
        # print(info)
        with open("report.txt", "a+", encoding="utf8") as f:
            f.write("%s\n" % info)
            f.flush()

    def image_check(self, url):
        page_source = self.driver.page_source
        # with open("1.html","a+",encoding="utf8") as f:
        #     f.write(page_source)
        temps = list()
        item_a = re.findall(r'(src|img)="(.*?\.(jpg|png|bmp))"', page_source)
        item_b = re.findall(r"url\('?(.*?\.(jpg|png|bmp))'?\)", page_source)
        for abc in item_a:
            if len(abc) != 3 or "/" not in str(abc) or "." not in str(abc):
                continue
            temps.append(abc[1])
        for ab in item_b:
            if len(ab) != 2 or "/" not in str(ab) or "." not in str(ab):
                continue
            temps.append(ab[0])
        for temp in list(set(temps)):
            image_url = parse.urljoin(url, temp)
            if image_url in self.image_history:
                continue
            items = self.image_detection(image_url)
            print("当前URL:{} -->> 识别结果:{}".format(image_url,str(items)))
            words = [w for w in self.words if w in str(items)]
            if len(words) == 0:
                return
            info = "当前URL:{} ===>>> 发现违禁词:{}".format(image_url, "、".join(words))
            print(info)
            with open("image_report.txt", "a+", encoding="utf8") as f:
                f.write("%s\n" % info)
                f.flush()




    def scan(self, url):
        # 加入历史
        self.urls_history.append(url)
        print("start view url:{}".format(url))
        # 访问链接
        self.driver.get(url)
        # 获取任务链接
        self.get_all_urls()
        # 违禁词检测
        # self.check(url)
        # 图片检测
        self.image_check(url)

    def run(self):
        while self.scan_href_queue.empty() is False:
            url = self.scan_href_queue.get()
            if url in self.urls_history:
                continue
            try:
                self.scan(url)
            except Exception as e:
                print(e)
            # finally:
                # break
        self.__del__()


if __name__ == '__main__':
    start = ScanHelper()
    start.run()
