import requests, time, os
from lxml import etree
from xx_spider.parser import Parser
from xx_spider.logger import Logger
from xx_spider.database import Mysql
from xx_spider.util import Washer, FileUtil


class LittleSpider:
    parser = Parser()
    washer = Washer()
    img_dir = "d:/image"
    log_dir = "d:/log"

    def __init__(self, db=None, debug=False):
        self.db = db if db is not None else Mysql()
        self.debug = debug
        self.logger = Logger(self.__class__.__name__, log_dir=self.log_dir)
        if not os.path.isdir(self.img_dir):
            os.makedirs(self.img_dir)
        if not os.path.isdir(self.log_dir):
            os.makedirs(self.log_dir)

    # 先通过正则表达式提取，找不到后通过xpath提取
    def extract(self, data, param, mode="fuse"):
        if hasattr(self, param + "_regex"):
            return self.parser.parse_by_regex(data, param + "_regex", mode)
        if hasattr(self, param + "path"):
            return self.parser.parse_by_xpath(data, param + "_path", mode)
        raise Exception("找不到{0}路径".format(param))

    # 根据field直接获取model
    def extract_model(self, model, page):
        for field in model.list_field:
            if model.get(field) is None:
                model.set(field, self.extract(page, field))
        return model

    # 抓内容
    def crawl(self, path, text=False):
        if self.debug:
            content = requests.get(path).text
        else:
            with open(path, encoding="u8") as f:
                content = f.read()
        if text:
            return content
        return etree.HTML(content)

    # 抓图片
    def crawl_img(self, url):
        path = FileUtil.get_path(self.img_dir, url.split(".")[-1])
        if not self.debug:
            r = requests.get(url)
            if r.status_code == 200:
                with open(path, "wb") as f:
                    for chunk in r:
                        f.write(chunk)
        return "/images" + path.split("images")[-1]

    # 替换图片地址
    def handle_img(self, content):
        img_urls = self.get_img_urls(content)
        for url in img_urls:
            try:
                path = self.crawl_img(url)
            except Exception as e:
                self.logger.info(content)
                raise e
            content = content.replace(url, path)
            time.sleep(2)
        return content

    # 获得图片地址
    def get_img_urls(self, content):
        ret_urls = []
        if content is not None:
            imgs = self.parser.parse_by_regex(content, '<img.*?>', mode="list")
            for img in imgs:
                for urls in img.split():
                    if "src" in urls:
                        url = urls.split('=')[1][1:-1]
                        if len(url.strip()) > 0:
                            ret_urls.append(url)
        return ret_urls
