import requests, time, os
from lxml import etree
from re import findall, match
from voice.model.article import Article
from voice.database import Db
from xx_spider.logger import Logger
from xx_spider.parser import Parser
from xx_spider.util import FileUtil, Washer


class BaseSpider:
    url_path = "@href"
    title_path = "text()"
    source, category, domain = "", "", ""
    parser = Parser()
    logger = Logger("voice")
    invalid_title_key_list = []
    invalid_content_xpath_list = []

    def __init__(self, img_dir, debug):
        self.img_dir = img_dir
        self.debug = debug
        self.db = Db()

    def set_origin(self, source, category):
        self.source = source
        self.category = category

    def crawl(self, path, text=False):
        if not self.debug:
            path = path if "http" in path else FileUtil.fuse_path([self.domain, path])
            encoding = "utf-8"
            try:
                headers = {'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36"}
                content = requests.get(path, headers=headers)
                charset = findall("charset=(\w+)\"", content.text)
                if len(charset) > 0:
                    encoding = charset[0]
                    encoding = "gbk" if encoding.lower() == "gb2312" else encoding
                content.encoding = encoding
                content = content.text
                self.logger.debug("page path: " + path)
                self.logger.debug("page encoding: " + encoding)
            except Exception as e:
                self.logger.error(e)
                return None
        else:
            with open(path, encoding="u8") as f:
                content = f.read()
        if text:
            return content
        return etree.HTML(content)

    def parse_datas(self, page):
        if page is None:
            return []
        return page.xpath(self.datas_path)

    def parse_param(self, data, path):
        params = data.xpath(path)
        return params[0] if len(params) > 0 else ""

    def parse_self_param(self, data, attr, not_null=False):
        if hasattr(self, attr):
            return self.parse_param(data, getattr(self, attr))
        if not_null:
            raise Exception("can't find attr: " + attr)
        return None

    def parse_title(self, data):
        title = data.xpath(self.title_path)[0]
        return self.wash(title)

    def parse_summary(self, data):
        if hasattr(self, "summary_path"):
            return self.parse_param(data, self.summary_path)
        return None

    def get_createdate(self, data):
        return self.parse_self_param(data, "createdate")

    # 无法找到创建日期，则将当前日期八点作为创建日期
    def parse_createdate(self, data):
        createdate = self.get_createdate(data)
        if createdate is None:
            createdate = time.strftime("%Y-%m-%d", time.localtime()) + " 08:00:00"
        elif match("^\d+-\d+-\d+$", createdate.strip()):
            createdate += " 08:00:00"
        return createdate

    def parse_creator(self, data):
        return self.parse_self_param(data, "creator_path")

    def parse_url(self, data):
        return self.parse_param(data, self.url_path)

    def get_content(self, page):
        if page is None:
            return None
        if hasattr(self, "content_regex"):
            return self.parser.parse_by_regex(page, getattr(self, "content_regex"))
        if hasattr(self, "content_path"):
            return self.parser.parse_by_xpath(page, getattr(self, "content_path"))
        raise Exception("can't find attr content_regex or content_path!")

    def wash(self, content):
        if content is None:
            return None
        return content.replace('"', "'")

    def parse_content(self, path, handle_img=True):
        # 获得文章页面
        path = path.replace("list", "content") if "list.txt" in path else path
        content_page = self.crawl(path, text=True)
        self.logger.debug("抓取成功...")
        # 抽取文章内容
        content = self.get_content(content_page)
        self.logger.debug("抽取成功...")
        # 清洗文章内容
        content = Washer().clean_messy_code(content)
        content = Washer().clean_js_tag(content, "script", "style")
        self.logger.debug("清洗成功...")
        # 文章图片处理
        if handle_img:
            content = self.handle_img(content)
            self.logger.debug("图片处理成功...")
        return content, content_page

    def crawl_img(self, url):
        path = self.get_path(url)
        if not self.debug:
            r = requests.get(url)
            if r.status_code == 200:
                with open(path, "wb") as f:
                    for chunk in r:
                        f.write(chunk)
        return "/images" + path.split("images")[-1]

    def get_path(self, url):
        file_name = int(time.time())
        dir = self.img_dir + self.source + "/" + self.category
        if not os.path.isdir(dir):
            os.makedirs(dir)
        while True:
            path = dir + "/" + str(file_name) + "."
            suffix = url.split(".")[-1].split("?")[0]
            if len(suffix) > 4:
                suffix = "jpg"
            path += suffix
            if not os.path.exists(path):
                return path
            file_name += 1

    def get_img_urls(self, content):
        ret_urls = set()
        if content is not None:
            imgs = self.parser.parse_by_regex(content, '<img.*?>', mode="list")
            for img in imgs:
                for urls in img.split():
                    if "src" in urls:
                        try:
                            url = urls.split('=')[1][1:-1]
                        except Exception as e:
                            continue
                        if len(url.strip()) > 0:
                            if not url.startswith("http"):
                                url = FileUtil.fuse_path([self.domain, url])
                            ret_urls.add(url)
        return ret_urls

    def handle_img(self, content):
        if content is None:
            return None
        img_urls = self.get_img_urls(content)
        for url in img_urls:
            try:
                path = self.crawl_img(url)
                content = content.replace(url, path)
                time.sleep(2)
            except Exception as e:
                self.logger.error("Exception: " + str(e))
                self.logger.error("handle img error: " + url)
        return content

    def is_content_valid(self, content_page):
        for xpath in self.invalid_content_xpath_list:
            if self.parser.parse_by_xpath(content_page, xpath) is not None:
                return False
        return True

    def crawl_content(self, path, handle_img=True):
        cnt = 0
        yesterday = time.strftime("%Y-%m-%d", time.localtime(time.time() - 3600 * 24))
        for uid, url in self.db.list_uncrawled_url(self.source, self.category, yesterday):
            self.logger.debug("url_id: {0}".format(uid))
            content, content_page = self.parse_content(path if self.debug else url, handle_img)
            if not self.is_content_valid(content_page):
                self.db.del_by_url(path)
                continue
            try:
                if content is not None and findall(".*[\u4e00-\u9fa5].*", content):
                    cnt += self.db.update_field_by_url("content", content, url)
            except Exception as e:
                self.logger.error(e)
                self.logger.debug("Wrong Page: " + content)
                continue
            self.after_parse(content_page, url)
            if cnt > 0:
                self.logger.info("content of article {0} crawled...".format(cnt))
            if self.debug:
                break
            time.sleep(5)
        self.logger.info("{0} articles saved!".format(cnt))

    def parse_createdate_from_content(self, content):
        if hasattr(self, "createdate_regex"):
            createdate = self.parser.parse_by_regex(content, getattr(self, "createdate_regex"), mode="get")
            if createdate is not None:
                nums = findall("\d+", createdate)
                if len(nums) == 3:
                    return "-".join(findall("\d+", createdate))
                elif 3 < len(nums) < 7:
                    return "-".join(nums[:3]) + " " + ":".join(nums[3:])
                else:
                    raise Exception("nums length is: ", len(nums))
        return time.strftime("%Y-%m-%d %H:%M:%S")

    def after_parse(self, content_page, url):
        createdate = self.parse_createdate_from_content(content_page)
        if createdate is not None:
            self.db.update_field_by_url("createdate", createdate, url)

    def parse_article(self, data):
        title = self.parse_title(data)
        summary = self.parse_summary(data)
        create_date = self.parse_createdate(data)
        creator = self.parse_creator(data)
        creator = self.source if (creator is None or creator.strip() == "") else creator
        url = FileUtil.fuse_path([self.domain, self.parse_url(data)])
        article = Article([title, "", create_date, creator, summary, url, self.source, self.category])
        return article

    def is_article_valid(self, article):
        for key in self.invalid_title_key_list:
            if key in article.get("title"):
                return False
        return True

    def crawl_list(self, path):
        page = self.crawl(path)
        self.logger.info("crawling list " + path + "...")
        cnt = 0
        for data in self.parse_datas(page):
            article = self.parse_article(data)
            if not self.is_article_valid(article):
                continue
            self.logger.debug(article.to_string())
            try:
                cnt += self.db.save_article(article)
            except Exception as e:
                self.logger.error("Exception: " + str(e))
                self.logger.error("article saved error: \n" + article.to_string())
        self.logger.info("{0} articles added...".format(cnt))

    def start(self, path):
        self.crawl_list(path)
        self.crawl_content(path)
        self.logger.info("-" * 30 + self.source + ": " + self.category + "  finished!" + "-" * 30 + "\n\n")


if __name__ == "__main__":
    print(time.strftime("%Y-%d-%M"))
