# -*- coding:utf-8 -*-
import requests, re, json, os, time, datetime, random
from lxml import etree
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from hashlib import md5
from fdfs_client.client import Fdfs_client, get_tracker_conf
from spider import config

LOCAL_HOST_PREFIXES = config.LOCAL_HOST_PREFIXES
fdfs_upload_file = os.path.join(config.APP_ROOT, 'database', 'fdfs_upload_file')
client_config_path = os.path.join(config.APP_ROOT, 'database', 'client.conf')
imgs_path = os.path.join(config.APP_ROOT, 'imgs')

today = datetime.datetime.now().strftime('%Y-%m-%d')
today = int(time.mktime(time.strptime(today, '%Y-%m-%d'))) + 21600
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
now = int(time.mktime(time.strptime(now, '%Y-%m-%d %H:%M:%S')))

s = requests.Session()


class Cnblogs(object):
    def __init__(self):
        self.page = 1
        self.list_img = []
        self.pages_all = 0

    def get_url(self):
        """每页的文章的URL"""
        url = "https://www.cnblogs.com/cate/ai/#p{}".format(self.page)
        headers = {'User-Agent': (UserAgent()).random}
        response = s.get(url, headers=headers).text
        print(response)
        soup = etree.HTML(response)
        url_all = soup.xpath('//div[@id="post_list"]//div/a/@href')  # 所有文章url
        self.pages_all = "".join(soup.xpath('//div[@id="pager_top"]//div/a[last()-1]/text()'))  # 所有页数
        print(url_all)
        print(self.pages_all)
        self.get_packaging(url_all)

    def get_packaging(self, url_all):
        """每个篇文章的详情"""
        headers = {'User-Agent': (UserAgent()).random}
        for url in url_all[1:]:
            print(url)
            list_contents = []
            response = s.get(url, headers=headers)
            soup = etree.HTML(response.text)
            bs4_soup = BeautifulSoup(response.text, "lxml")
            iid = md5(url.encode()).hexdigest()  # 文章URL加密MD5
            title = "".join(soup.xpath('//h1[@class="postTitle"]//a/span/text()'))  # 标题
            if title == "":
                title = "".join(soup.xpath('//div[@id="post_detail"]//a/span/text()'))  # 标题
            title = (((title.replace("\n", "")).replace("\r", "")).replace("\t", "")).replace(" ", "")
            contents = bs4_soup.select('div[id="cnblogs_post_body"]')
            imgs = soup.xpath('//div[@id="cnblogs_post_body"]//img/@src')  # 图片
            list_imgs = []
            for img in imgs:
                if "png" in img or "jpg" in img or "jpeg" in img:
                    list_imgs.append(img)
            self.save_picture(list_imgs)
            for content in contents:
                list_contents.append(str(content))
            print("3333", list_contents)
            content = "".join(list_contents)
            for (img, list_img) in zip(list_imgs, self.list_img):
                content = re.sub(img, list_img, content.replace("\\", "/"))
                content = re.sub("src=\".*.gif\"", "", content)
                content = re.sub("src=\".*.webp\"", "", content)
            content = content
            print("$$$$", content)
            print(len(self.list_img), self.list_img)
            itemjson = {"category": "article", "name": title, "url": url, "abstract": "", "content": content,
                        "video_id": iid, "tag": "", "company": 454398, "typeid": ""}
            print("-----", itemjson)
            cur_time = random.randrange(today, now)
            bbs_post = {"special": 7, "author_id": 1, "title": title, "type": "thread", "url": url,
                        "cur_time": cur_time, "content": content, "id": iid, "group_num": 1088}
            print("+++++", bbs_post)

            self.save_database_post(itemjson)
            # self.save_database_post(bbs_post)
            print("保存++++++++++++++++++++++")
            time.sleep(5)

    def save_picture(self, imgs):
        """下载图片保存上传图片"""
        num = 1
        self.list_img.clear()
        for img in imgs:
            image = requests.get(img).content
            with open('./imgs/' + str(num) + '.jpg', 'wb')as fp:
                fp.write(image)
                tracker_path = get_tracker_conf(client_config_path)  # 绝对路径
                client = Fdfs_client(tracker_path)
                ret_upload = client.upload_by_filename(imgs_path + "/" + str(num) + '.jpg')
                img = config.LOCAL_HOST_PREFIXES + str(ret_upload['Remote file_id'], encoding="utf8")
                self.list_img.append(img)
                num += 1

    def save_database_post(self, itemjson):
        url = config.main_interface_domain + '/d/article/apipicker'
        headers = {'User-Agent': (UserAgent()).random}
        resp = s.post(url, data=itemjson, headers=headers)
        print(resp.status_code)
        print("---------", resp.text)
        if resp.status_code != 200:
            print("数据传送失败", resp.text)

    def save_bbs_post(self, bbs_post):
        """论坛"""
        url = config.bbs_interface_domain + '/home.php?mod=misc&ac=ajax&op=publish'
        headers = {'User-Agent': (UserAgent()).random}
        resp = s.post(url, data=bbs_post, headers=headers)
        print(resp.status_code)
        print("---------", resp.text)
        if resp.status_code != 200:
            print("bbs_post传送失败", resp.text)

    def run(self):
        while True:
            print("获取第{}页".format(self.page))
            self.get_url()
            self.page += 1
            time.sleep(2)
            if self.page > self.pages_all:
                break


if __name__ == '__main__':
    fps = Cnblogs()
    fps.run()
