# -*- coding:UTF-8 -*-
import urllib
import codecs

from Tools.scripts.treesync import raw_input
from bs4 import BeautifulSoup
import urllib.request
import ssl
import re
import os
import jieba

ssl._create_default_https_context = ssl._create_unverified_context  # 全局取消ssl验证

# 伪装
headers = {'User-Agent':
               'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
           }


opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/49.0.2')]
url_name = int(1)
message_name = int(1)
origin_path = ""
origin_url = ""

# 网页路径处理
class UrlAct(object):

    def __init__(self):
        self.url_number = int(1)
        self.message_number = int(1)

    # 获取网页代码
    def get_html(self, url):
        try:
            req = urllib.request.Request(url=url, headers=headers)
            html = urllib.request.urlopen(req)
            html_code = html.read().decode('GB2312', 'ignore')
            html.close()
        except:
            return None
        return html_code

    # 获取网页url
    # /的添加origin_path
    # #的删除
    def get_url(self, page_code):
        url_lists = []
        util = Util()
        pattern = re.compile('^/')
        soup_tree = BeautifulSoup(page_code, 'lxml')
        a_lists = soup_tree.find_all('a')
        for box in a_lists:
            url = str(box.get('href', ''))  # box['href']

            if re.match(pattern, url) is None:
                if re.match(re.compile('^#'), url):
                    pass
                elif re.match(re.compile('^j'), url):
                    pass
                elif util.is_contain_chinese(url):
                    print("中文"+url)
                    pass
                else:
                    # print('if-----'+url)
                    url_lists.append(url)
            else:
                # print(origin_url[0:int(len(origin_url))-2]+url)
                # print(url)
                # print(origin_url)
                # print('else ------'+url)
                url_lists.append(origin_url[0:int(len(origin_url))-2]+url)
        return url_lists

    def filtration_char(self, url):
        url = url.replace(':', '')
        url = url.replace('//', '')
        url = url.replace('.', '')
        url = url.replace('/', '')
        url = url.replace('\n', '')
        url = url.replace('?', '')
        return url

    # 存url
    def save_page_url(self, url, url_lists):
        # 保存
        fp = open(str('url_' + url + ".txt"), 'a', encoding='utf-8')  # 打开文件
        # print(contain)
        for box in url_lists:
            fp.write(box + '\n')  # 写入内容
        fp.close()  # 关闭文件

    # 打印链接
    def print_urls(self, url_lists):
        for box in url_lists:
            print(box)
        return url_lists

    # 是否存在  以此判断是否访问
    def is_exist(self, url):
        py = UrlAct()
        if os.path.exists(str('url_' + py.filtration_char(url) + ".txt")):
            return True
        else:
            return False

    # 链接是否䖄
    def is_live(self, url):
        try:
            opener.open(url)
            print(url + '没问题')
            return True
        except urllib.error.HTTPError:
            print(url + '=访问页面出错')
            return False
        except urllib.error.URLError:
            print(url + '=访问页面出错')
            return False
        except UnicodeEncodeError:
            print(url + '=编码出错')
            return False



    # # 递归访问
    # def interview(self, url_lists):
    #     # print(url_lists)
    #     pattern = re.compile(
    #         '((http|ftp|https)://)(([a-zA-Z0-9\._-]+\.[a-zA-Z]{2,6})|([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}))(:[0-9]{1,4})*(/[a-zA-Z0-9\&%_\./-~-]*)?')
    #     py = UrlAct()
    #     for url in url_lists:
    #         if re.match(pattern, url) is None:
    #             pass
    #         else:
    #             if py.is_exist(url):
    #                 pass
    #             else:
    #                 print("hello  " + url)
    #                 page_code = py.get_html(url)
    #                 if page_code is None:
    #                     continue
    #                 else:
    #                     url_lists = py.get_url(page_code)
    #                     py.save_page_url(py.filtration_char(url), url_lists)
    #                     py.print_urls(url_lists)
    #                     py.interview(url_lists)


# 网页内容处理
class ContextAct(object):

    # 提取网页所有文字
    def translate(self, str):
        line = str  # .strip().decode('utf-8', 'ignore')  # 处理前进行相关的处理，包括转换成Unicode等
        p2 = re.compile(u'[^\u4e00-\u9fa5]')  # 中文的编码范围是：\u4e00到\u9fa5
        zh = " ".join(p2.split(line)).strip()
        zh = ",".join(zh.split())
        outStr = zh  # 经过相关处理后得到中文的文本
        outStr = outStr.replace(',', '\n')
        return outStr

    # 关键字   网页代码    中文内容
    def judge_type(self, key_words, chinese_context):
        if chinese_context.find_all(key_words):
            return True
        else:
            return False

    # 内容存储
    def save_context(self, chinese_context, name_by_url):
        op = open("chinese_context_" + name_by_url, 'a', encoding='utf-8')
        op.write(chinese_context)
        op.close()

    def jieba_deal(self, key_words):
        words_list = jieba.cut(key_words)
        return words_list
        # return ','.join(words_list)


# 文件管理
class FileManage(object):

    def __init__(self):
        pass

    # 创建文件夹
    def mk_dir(self, path):
        # 引入模块
        import os
        # 去除首位空格
        path = path.strip()
        # 去除尾部 \ 符号
        path = path.rstrip("\\")
        # 判断路径是否存在e
        # print(path)
        isExists = os.path.exists(path)
        # 判断结果
        if not isExists:
            # 如果不存在则创建目录
            # 创建目录操作函数
            os.makedirs(path)
            # print(path + ' 创建成功')
            return True
        else:
            # 如果目录存在则不创建，并提示目录已存在
            # print(path + ' 目录已存在')
            return False

    def save_url_list(self, txt_name, url_list):
        print(txt_name)
        # 保存
        fp = open(txt_name, 'a', encoding='utf-8')  # 打开文件
        for box in url_list:
            fp.write(box + '\n')  # 写入内容
        fp.close()  # 关闭文件

    def save_page_context(self, txt_name, chinese_context):
        op = open(txt_name, 'a', encoding='utf-8')
        op.write(chinese_context)
        op.close()

    def save_message(self, folder_path, url, context, url_list):
        url_act = UrlAct()
        # 当前链接文件名
        simple_url = url_act.filtration_char(url)
        simple_url = simple_url[0:int(int(len(simple_url))-1)]
        # 路径
        dir1 = os.path.join(folder_path, simple_url)
        self.mk_dir(dir1)
        # print("-----")
        self.save_url_list(os.path.join(dir1, str('url_' + simple_url + ".txt")), url_list)
        self.save_page_context(os.path.join(dir1, str('chinese_context_' + simple_url + ".txt")), context)


# 爬取策略
class CrawlStrategy(object):

    def __init__(self):
        pass

    # 深度爬取
    def deep_crawl(self):
        pass

    # 递归访问
    def interview(self, url_lists):
        # print(url_lists)
        pattern = re.compile(
            '((http|ftp|https)://)(([a-zA-Z0-9\._-]+\.[a-zA-Z]{2,6})|([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}))(:[0-9]{1,4})*(/[a-zA-Z0-9\&%_\./-~-]*)?')
        py = UrlAct()
        for url in url_lists:
            if re.match(pattern, url) is None:
                pass
            else:
                if py.is_exist(url):
                    pass
                else:
                    print("hello  " + url)
                    page_code = py.get_html(url)
                    if page_code is None:
                        continue
                    else:
                        url_lists = py.get_url(page_code)
                        py.save_page_url(py.filtration_char(url), url_lists)
                        py.print_urls(url_lists)
                        py.interview(url_lists)

    def breadthCrawlBody(self, url_lists):
        # print(url_lists)
        pattern = re.compile(
            '((http|ftp|https)://)(([a-zA-Z0-9\._-]+\.[a-zA-Z]{2,6})|([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}))(:[0-9]{1,4})*(/[a-zA-Z0-9\&%_\./-~-]*)?')
        ua = UrlAct()
        for url in url_lists:
            if re.match(pattern, url) is None:
                print(url+'链接无效')
            elif(ua.is_live(url)):
                if ua.is_exist(url):
                    print(url+'已访问')
                else:
                    url_act = UrlAct()
                    page_code = url_act.get_html(url)
                    url_lists = url_act.get_url(page_code)
                    # 获取网页内容
                    context_act = ContextAct()
                    context = context_act.translate(page_code)
                    # 保存
                    file_manage = FileManage()
                    # 创建数据根目录
                    file_manage.save_message(origin_path, url, context, url_lists)
            else:
                print(url+"链接无效")

    # 广度爬取
    def breadthCrawl(self, url):
        # 获取网页url
        url_act = UrlAct()
        page_code = url_act.get_html(url)
        url_lists = url_act.get_url(page_code)
        # 获取网页内容
        context_act = ContextAct()
        context = context_act.translate(page_code)
        # 保存
        file_manage = FileManage()
        # 创建数据根目录
        file_manage.save_message(origin_path, url, context, url_lists)
        self.breadthCrawlBody(url_lists)


# 工具
class Util(object):

    # 去掉回车
    def vanishSFC(self, input):
        return input.strip('\n')

    # 判断是否包含中文
    def is_contain_chinese(self, check_str):
        """
        判断字符串中是否包含中文
        :param check_str: {str} 需要检测的字符串
        :return: {bool} 包含返回True， 不包含返回False
        """
        for ch in check_str:
            if u'\u4e00' <= ch <= u'\u9fff':
                return True
        return False

if __name__ == "__main__":

    util = Util()
    origin_path = util.vanishSFC(raw_input("信息保存路径(F:\python_project\practice1\data):"))
    origin_url = util.vanishSFC(raw_input("origin url(http://www.cqtn.gov.cn/index/):"))  # 源路径
    file_manage = FileManage()
    file_manage.mk_dir(origin_path)

    # 策略
    crawl_strategy = CrawlStrategy()
    crawl_strategy.breadthCrawl(origin_url)

    # ac = ContextAct()
    # str = ac.translate(page_code)  # print(str(str))
    # print(str)
    # ac.judge_type('潼南，政府', str)
    # print(ac.jieba_deal("潼南政府"))
    # str = ac.jieba_deal("潼南政府")
    # for box in str:
    #     print(box)
    # print(str)

# http://www.cqtn.gov.cn/index/
