import json
import re
import time
import urllib
from urllib.parse import urlparse

from bs4 import BeautifulSoup as bs
from sqlalchemy import exists, and_

# from model import DBSession, WebSite, WebSiteLib, SensitiveWord, VioContent, SensitiveWordCategory
import model
# from proxy import RequestByProxyPool
import proxy
# from web_content import WebContent
import web_content


class Queue:
    # 初始化队列
    def __init__(self):
        self.items = []

    # 入队
    def enqueue(self, item):
        self.items.append(item)

    # 出队
    def dequeue(self):
        if self.is_Empty():
            print("当前队列为空！！")
        else:
            return self.items.pop(0)

    # 判断是否为空
    def is_Empty(self):
        return self.items == []

    # 队列长度
    def size(self):
        return len(self.items)

    # 返回队头元素，如果队列为空的话，返回None
    def front(self):
        if self.is_Empty():
            print("当前队列为空！！")
        else:
            return self.items[len(self.items) - 1]


class InterLinks:
    def __init__(self, website):
        self.base_url = website.website_domain
        self.id = website.id
        self.website_name = website.website_name
        self.user_id = website.user_id

    # 获取链接
    def _get_links(self, html, url, urls_queue, urls_list):
        """
        :return: 网站所有可用的内部链接
        链接可用，去重，内部链接。
        """

        inter_link = '{}://{}'.format(urlparse(url).scheme, urlparse(url).netloc)

        # for link in html.find_all('a', href=re.compile('^(/|.*' + inter_link + ')')):
        for link in html.find_all('a', href=re.compile('^([^http, www]|.*' + inter_link + ')')):
            if link.attrs['href'] == '#' or link.attrs['href'].startswith('javascript'):
                continue

            # 链接中的中文转码处理
            link.attrs['href'] = urllib.parse.quote(link.attrs['href'], safe='?=&:/')
            href = link.attrs['href']

            if (not inter_link.endswith('/')) and (not href.startswith('/')):
                inter_link += '/'

            if href is not None:
                # 去除空链接和后缀为文件（下载）的链接
                if href not in urls_list and not href.endswith(
                        (".doc", '.docx', '.pdf', '.xlsx', '.xls', '.csv', '.txt')):
                    if href.startswith('//'):
                        if inter_link + href not in urls_list:
                            urls_queue.enqueue(inter_link + href)
                            urls_list.append(inter_link + href)
                    elif href.startswith('/'):
                        if inter_link + href not in urls_list:
                            urls_queue.enqueue(inter_link + href)
                            urls_list.append(inter_link + href)
                    else:
                        if inter_link + href not in urls_list:
                            urls_queue.enqueue(inter_link + href)
                            urls_list.append(inter_link + href)
        return urls_queue, urls_list

    def _deep(self, urls_queue, urls_list):
        website_lib = []
        num = urls_queue.size()
        while num > 1:
            i = urls_queue.dequeue()
            if i is None:
                break
            else:
                print(i)
                req = proxy.RequestByProxyPool()
                r, status_code = req.response(i)
                if not r:
                    print('链接无法访问', i)
                    website_lib.append(model.WebSiteLib(
                        website_label='',
                        website_link=i,
                        grab_time=int(time.time()),
                        response_code=status_code,
                        status='3',
                        website_domain_id=self.id,
                        user_id=self.user_id,
                    ))
                    continue
                domain1 = '{}://{}'.format(urlparse(i).scheme, urlparse(i).netloc)

                website_lib.append(model.WebSiteLib(
                    website_label='',
                    website_link=i,
                    grab_time=int(time.time()),
                    response_code=status_code,
                    status='0',
                    website_domain_id=self.id,
                    user_id=self.user_id,
                ))
                html = bs(r.content, 'html.parser')
                self._get_links(html, domain1, urls_queue, urls_list)
        return website_lib

    def all_links(self):
        urls_queue = Queue()
        urls_list = []

        req = proxy.RequestByProxyPool()
        r, status_code = req.response(self.base_url)
        if not r:
            print('站点无法访问')
            # 站点不可访问则记录is_access = 0
            model.DBSession.query(model.WebSite).filter(model.WebSite.id == self.id).update(
                {model.WebSite.is_access: '0'})
            model.DBSession.commit()
            return []

        # 站点可访问则记录is_access = 1
        model.DBSession.query(model.WebSite).filter(model.WebSite.id == self.id).update(
            {model.WebSite.is_access: '1'})
        model.DBSession.commit()

        # 解析首页
        html = bs(r.content, 'html.parser')
        domain = '{}://{}'.format(urlparse(self.base_url).scheme, urlparse(self.base_url).netloc)
        home_page_queue, home_page_urls = self._get_links(html, domain, urls_queue, urls_list)
        website_lib = self._deep(home_page_queue, home_page_urls)
        return website_lib

    def update_website_ip(self, ip, port):
        w = model.DBSession.query(model.WebSite).filter(model.WebSite.id == self.id).first()
        w.ip_address = '{}:{}'.format(ip, port)
        model.DBSession.commit()

    def word_list_with_users(self) -> list:
        configs = model.DBSession.query(model.GrabConfig).filter(model.GrabConfig.user_id == self.user_id).all()

        result = []
        for c in configs:
            cate = c.sensitive_words_cate
            cate_id_list = cate.split(',')
            word_list = model.DBSession.query(model.SensitiveWord).filter(
                model.SensitiveWord.cid.in_(cate_id_list)).all()
            for word in word_list:
                result.append(word)
        return result

    def crawling_and_save(self):
        website_lib = self.all_links()
        new_website_lib = []

        # 链接去重
        for obj in website_lib:
            if not model.DBSession.query(exists().where(and_(model.WebSiteLib.website_domain_id == self.id,
                                                             model.WebSiteLib.user_id == self.user_id,
                                                             model.WebSiteLib.website_link == obj.website_link))).scalar():
                new_website_lib.append(obj)

        if new_website_lib:
            # 保存去重后的链接
            model.DBSession.add_all(new_website_lib)
            model.DBSession.commit()

        # 过滤链接状态为0-未爬取的链接进行爬取解析内容
        no_scrawling = model.DBSession.query(model.WebSiteLib).filter(model.WebSiteLib.website_domain_id == self.id,
                                                                    model.WebSiteLib.user_id == self.user_id,
                                                                    model.WebSiteLib.status == '0').all()
        if no_scrawling:
            req = proxy.RequestByProxyPool()

            # 根据抓取配置读取敏感词
            word_list = self.word_list_with_users()

            for lib in no_scrawling:
                # 更新链接状态为1-爬取中
                model.DBSession.query(model.WebSiteLib).filter(model.WebSiteLib.id == lib.id).update(
                    {model.WebSiteLib.status: '1'})
                model.DBSession.commit()

                r, _ = req.response(lib.website_link)
                h = bs(r.content, 'html.parser')


                w = web_content.WebContent(h, word_list)
                result = w.filter_context()  # 解析内容
                if not result:
                    # 没有匹配到敏感词则更新链接状态为2-已完成
                    model.DBSession.query(model.WebSiteLib).filter(model.WebSiteLib.id == lib.id).update(
                        {model.WebSiteLib.status: '2'})
                    model.DBSession.commit()
                    continue

                # 拆分过滤后的敏感词对象列表
                cate_id_list = []
                after_word_list = []
                word_id_list = []
                right_word_list = []
                for obj in w.unrepeated_word:
                    cate_id_list.append(obj.cid)
                    after_word_list.append(obj.words)
                    word_id_list.append([obj.id, obj.cid])
                    right_word_list.append(obj.right_words)

                # 获取敏感词类别
                cate_name_list = model.DBSession.query(model.SensitiveWordCategory.cate_name).filter(
                    model.SensitiveWordCategory.id.in_(cate_id_list)).all()
                if not cate_name_list:
                    cate_name_string = ''
                else:
                    cate_name_list = [i[0] for i in cate_name_list]
                    cate_name_string = '|'.join(cate_name_list)

                # 保存
                data = model.VioContent(
                    main=self.website_name,
                    title=w.get_title,
                    link=lib.website_link,
                    category=cate_name_string,
                    word='|'.join(after_word_list),
                    right_word="|".join(right_word_list),
                    content=w.context,
                    snapshot=w.snapshot,
                    website_lib_id=lib.id,
                    website_id=self.id,
                    user_id=self.user_id,
                )

                model.DBSession.add(data)
                model.DBSession.flush()

                # 处理vio_content和sensitive_words关联表
                vio_and_sensitive = [(data.id, i[0], i[1]) for i in word_id_list]
                for i in vio_and_sensitive:
                    model.DBSession.execute(
                        "insert into fa_vio_content_sensitive_words (vio_content_id, sensitive_words_id, sensitive_words_category_id, website_id, create_time, user_id) values({}, {}, {}, {}, {}, {})".format(
                            i[0], i[1], i[2], self.id, int(time.time()),  self.user_id))
                model.DBSession.commit()

                # 更新链接状态为2-已完成
                model.DBSession.query(model.WebSiteLib).filter(model.WebSiteLib.id == lib.id, model.WebSiteLib.user_id == self.user_id).update(
                    {model.WebSiteLib.status: '2'})
                model.DBSession.commit()
                print('链接：{} 中的内容存在违规，已记录'.format(lib.website_link))
