import requests
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, Executor
from . import mylogger as log
import uuid
import time

myLogger = log.Mylogger()
# 根据cup设置线程数
MAX_THREAD = 4

def singleton(cls):
    instances = {}

    def _singleton(*args, **kwargs):
        if cls not in instances:
            instances[cls] = cls(*args, **kwargs)
        return instances[cls]

    return _singleton

class TagFilter:

    def __init__(self,filter_tags:[str],filter_classes:[str]):
        self.filter_tags = filter_tags
        self.filter_classes = filter_classes
    
# @singleton
class Spider:

    headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'}

    def __init__(self,url:str,encode="",secondPagReg=[''],secondPageClasses=[''],titleClasses=['']):
        self.url = url
        self.encode = encode
        self.secondPagReg = secondPagReg
        self.secondPageClasses = secondPageClasses
        self.titleClasses = titleClasses
    
    def isUrlVailid(self,url:str) -> bool:
        try:
            r = requests.get(url,headers=self.headers)
            myLogger.info(str(r.status_code) + " url is valid")
        except Exception as e:
            myLogger.debug("Exception: {}".format(e))
            return False
        else:
            return r.status_code==200
    
    def execute(self,url:str,surplus:TagFilter) -> BeautifulSoup:
        if self.isUrlVailid(url):
            r = requests.get(url,headers=self.headers,timeout=5) 
            myLogger.debug(self.encode)
            if self.encode!= "":
                r.encoding=self.encode
            soup = BeautifulSoup(r.text, "html.parser")
            if surplus:
                for i,f in enumerate(surplus.filter_tags):
                    if len(surplus.filter_classes[i]) == 0 :
                        for tag in soup.find_all(f):
                            tag.decompose()
                        myLogger.debug("no sense")
                    else:
                        query = f +"." + '.'.join(cls for cls in surplus.filter_classes[i])
                        myLogger.debug(query + "==================")
                        # for tag in soup.find_all(f,surplus.filter_classes[i]):
                        for tag in soup.select(query):
                            tag.decompose()
            return soup
        else:
            myLogger.debug("=======url is not valid======")
            return None 

    def executeWithFilter(self,filter:TagFilter,surplus:TagFilter) -> {}:
        results = {"_id": str(uuid.uuid1())}
        soup = self.execute(self.url,surplus)
        if soup is not None:
            results['title'] = soup.title.string   
            results['tag_list']=[] 
            # myLogger.debug(results['title'])
            for name in filter.filter_tags:
                results['tag_list'].append(name)
                results[name] = []
            for i,f in enumerate(filter.filter_tags):
                if len(filter.filter_classes[i]) == 0 :
                    for tag in soup.find_all(f):
                        if tag.get_text() is not None and str.strip(tag.get_text())!="" :
                            # results[tag.name].append(tag.get_text())
                            results[tag.name].append(tag.get_text())
                else:
                    for tag in soup.find_all(f,filter.filter_classes[i]):
                        if tag.get_text() is not None and tag.get_text()!="":
                            # myLogger.debug(tag.get_text())
                            # results[tag.name].append(tag.get_text())
                            results[tag.name].append(tag.get_text())
        try:
            myLogger.debug(results)
        except Exception as e:
            myLogger.debug("Exception: {}".format(e))
        return results
        
    def deepExecute(self,surplus:TagFilter,preUrl:str="") -> {}:
        results={}
        urlArray = []
        soup = self.execute(self.url,surplus)
        if soup is not None:
            results['title'] = soup.title.string
            results['contents'] = []
            urlArray = [tag['href'] if tag['href'].startswith("http") else preUrl + tag['href']  for tag in soup.find_all(href=True)]
            
        # for url in urlArray:
        #     myLogger.debug(url)
        if self.secondPagReg[0] != "" :
            targetUrls = list(filter(self.isValidUrl, urlArray))
        else:
            targetUrls = urlArray

        targetUrls = self.remove_duplicate(targetUrls)

        startTime = time.time()
        myLogger.debug("====start time :======="+str(startTime)  + "==== site num: === " + str(len(targetUrls)) + "=============")
        # for i,u in enumerate(targetUrls) :
        #     myLogger.debug("=============" + str(i) +"==============" + u)
            
        #     try:
        #         soup_tmp = self.execute(u,None)
        #     except Exception as e:
        #         myLogger.debug("Exception: {}".format(e))
        #     else:
        #         if soup_tmp:
        #             content = ""
        #             images = ''
        #             videos = ''
        #             title = ""
        #             for t in soup_tmp.select("." + '.'.join(cls for cls in self.titleClasses)):
        #                 title += t.get_text()
        #             # for c in soup_tmp.find_all(class_=self.isClassesCollection):
        #             for c in soup_tmp.select("." + '.'.join(cls for cls in self.secondPageClasses)):
        #                 #内容
        #                 # myLogger.debug(str(c))
        #                 if c.p:
        #                    content += "".join(str(tag_p) for tag_p in c.findAll('p'))
        #                 # content += c.get_text()

        #                 # 采集图片
        #                 if c.img:
        #                     images = [img['src'] for img in c.findAll('img')]
        #                     # images = "\n".join (img['src'] for img in c.findAll('img'))
                            
        #                 # 采集视频
        #                 if c.video:
        #                     videos = [video['src'] for video in c.findAll('video')]
        #                     # videos = "\n".join (video['src'] for video in c.findAll('video'))
        #                 # content += str(c)
        #             results['contents'].append({"_id":str(uuid.uuid1()),"index":i,"title":title,"url":u ,"article":content,"image":images,"video":videos})
        
        with ThreadPoolExecutor(max_workers=MAX_THREAD) as pool:
            thread_results = pool.map(self.findArticle, targetUrls)
        results['contents'] = [r for r in thread_results]
        myLogger.debug("====end time :======="+str( time.time()) + "==== total time:  === "+ str(time.time() - startTime) + "seconds =========")
        # myLogger.debug(results['contents'])
        return results

    def isClassesCollection(self,css_class:str)->bool:
        for cls in self.secondPageClasses:
            if str(css_class) == cls:
                return True
        return False

    def isValidUrl(self,url:str) -> bool:
        for reg in self.secondPagReg:
            # myLogger.debug(reg + " : " + url)
            if reg in url and url.startswith("http"):
                return True
            else:
                return False
        return False

    def findArticle(self,url:str):
        try:
            soup_tmp = self.execute(url,None)
        except Exception as e:
            myLogger.debug("Exception: {}".format(e))
        else:
            if soup_tmp:
                content = ""
                images = ''
                videos = ''
                title = ""
                if self.titleClasses[0] != "":
                    for t in soup_tmp.select("." + '.'.join(cls for cls in self.titleClasses)):
                        title += t.get_text()
                # for c in soup_tmp.find_all(class_=self.isClassesCollection):
                if self.secondPageClasses[0] != "":
                    for c in soup_tmp.select("." + '.'.join(cls for cls in self.secondPageClasses)):
                        #内容
                        # myLogger.debug(str(c))
                        if c.p:
                            content += "".join(str(tag_p) for tag_p in c.findAll('p'))
                        # content += c.get_text()

                        # 采集图片
                        if c.img:
                            images = [img['src'] for img in c.findAll('img')]
                            # images = "\n".join (img['src'] for img in c.findAll('img'))
                            
                        # 采集视频
                        if c.video:
                            videos = [video['src'] for video in c.findAll('video')]
                            # videos = "\n".join (video['src'] for video in c.findAll('video'))
                        # content += str(c)
                return {"_id":str(uuid.uuid1()),"title":title,"url":url ,"article":content,"image":images,"video":videos}
            return {}

    def remove_duplicate(self,urls:[str]):
        uni_urls = []
        if len(urls) == 0:
            return uni_urls
        for u in urls:
            if u in uni_urls:
                continue
            else:
                uni_urls.append(u)
        return uni_urls
