# -*- coding:utf-8 -*-
import requests, re, json, os, time, datetime, random
from lxml import etree
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from hashlib import md5
from fdfs_client.client import Fdfs_client, get_tracker_conf
from spider import config

LOCAL_HOST_PREFIXES = config.LOCAL_HOST_PREFIXES
fdfs_upload_file = os.path.join(config.APP_ROOT, 'database', 'fdfs_upload_file')
client_config_path = os.path.join(config.APP_ROOT, 'database', 'client.conf')
imgs_path = os.path.join(config.APP_ROOT, 'imgs')

today = datetime.datetime.now().strftime('%Y-%m-%d')
today = int(time.mktime(time.strptime(today, '%Y-%m-%d'))) + 21600
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
now = int(time.mktime(time.strptime(now, '%Y-%m-%d %H:%M:%S')))

s = requests.Session()


class Ofweek(object):
    def __init__(self):
        self.page = 1
        self.list_imgs_fast = []
        self.list_imgs_original = []
        self.list_contents = []
        self.pages_all = 0
        self.proxy_url = "http://proxy.elecfans.net/proxys.php?key=nTAZhs5QxjCNwiZ6&num=90"
        ips = requests.get(self.proxy_url).text
        self.ips = json.loads(ips)['data']
        self.count = 100

    def get_url(self):
        """每页的文章的URL"""
        url = "https://www.ofweek.com/ai/CATList-201700-8300-ai-{}.html".format(self.page)  # 接口
        headers = {'User-Agent': (UserAgent()).random}
        proxy = {'http': random.choice(self.ips)['ip']}
        self.count -= 1
        response = s.get(url, proxies=proxy, headers=headers).text
        routelist = json.loads(response)["newsList"]
        for route in routelist:
            title = route.get('title')
            url = route.get('htmlpath')
            self.get_packaging(url, title)

    def get_packaging(self, url, title):
        """每个篇文章的详情"""
        headers = {'User-Agent': (UserAgent()).random}
        print(title)
        print(url)
        proxy = {'http': random.choice(self.ips)['ip']}
        self.count -= 1
        if self.count <= 0:
            ips = requests.get(self.proxy_url).text
            self.ips = json.loads(ips)['data']
        response = s.get(url, headers=headers)
        time.sleep(0.5)
        soup = etree.HTML(response.text)
        bs4_soup = BeautifulSoup(response.text, "lxml")
        iid = md5(url.encode()).hexdigest()  # 文章URL加密MD5
        contents = bs4_soup.select('div[id="articleC"]')
        imgs = soup.xpath('//div[@id="articleC"]//img/@src')  # 图片
        # articke_page = soup.xpath('//div[@class="page"]/span[last()-1]//text()')  # 页数
        articke_page = "".join(soup.xpath('//div[@class="page"]//a[text()="下一页>"]/@href'))  # 页数
        print("页数", type(articke_page), articke_page)
        for img in imgs:
            if "png" in img or "jpg" in img or "jpeg" in img:
                self.list_imgs_original.append(img)
        for content in contents:
            self.list_contents.append(str(content))
        print("3333", self.list_contents)
        content = "".join(self.list_contents)
        if articke_page != "":
            print("第多少页数", articke_page)
            url = "".join(re.findall("(.*)/.*.html", url)) + "/" + articke_page
            self.get_packaging(url, title)
        else:
            self.save_picture()
            for (img, list_img) in zip(self.list_imgs_original, self.list_imgs_fast):
                content = re.sub(img, list_img, content.replace("\\", "/"))
                content = re.sub("src=\".*.gif\"", "", content)
                content = re.sub("src=\".*.webp\"", "", content)
                content = re.sub("href=\"http.*ai/\"", "", content)
                content = re.sub("href=\".*.html\"", "", content)
            content = content
            print("$$$$", content)
            print(len(self.list_imgs_fast), self.list_imgs_fast)
            itemjson = {"category": "article", "name": title, "url": url, "abstract": "", "content": content,
                        "video_id": iid, "tag": "", "company": 454398, "typeid": ""}
            print("-----", itemjson)
            cur_time = random.randrange(today, now)
            bbs_post = {"special": 7, "author_id": 1, "title": title, "type": "thread", "url": url,
                        "cur_time": cur_time, "content": content, "id": iid, "group_num": 1088}
            print("+++++", bbs_post)
            self.save_database_post(itemjson)
            # self.save_database_post(bbs_post)
            print("保存++++++++++++++++++++++")
            time.sleep(5)

    def save_picture(self):
        """下载图片保存上传图片"""
        num = 1
        for img in self.list_imgs_original:
            image = requests.get(img).content
            with open('./imgs/' + str(num) + '.jpg', 'wb')as fp:
                fp.write(image)
                tracker_path = get_tracker_conf(client_config_path)  # 绝对路径
                client = Fdfs_client(tracker_path)
                ret_upload = client.upload_by_filename(imgs_path + "/" + str(num) + '.jpg')
                img = config.LOCAL_HOST_PREFIXES + str(ret_upload['Remote file_id'], encoding="utf8")
                self.list_imgs_fast.append(img)
                num += 1

    def save_database_post(self, itemjson):
        url = config.main_interface_domain + '/d/article/apipicker'
        headers = {'User-Agent': (UserAgent()).random}
        resp = s.post(url, data=itemjson, headers=headers)
        print(resp.status_code)
        print("---------", resp.text)
        if resp.status_code != 200:
            print("数据传送失败", resp.text)

    def save_bbs_post(self, bbs_post):
        """问答 论坛"""
        url = config.bbs_interface_domain + '/home.php?mod=misc&ac=ajax&op=publish'
        headers = {'User-Agent': (UserAgent()).random}
        resp = s.post(url, data=bbs_post, headers=headers)
        print(resp.status_code)
        print("---------", resp.text)
        if resp.status_code != 200:
            print("bbs_post传送失败", resp.text)

    def run(self):
        while True:
            print("获取第{}页".format(self.page))
            self.get_url()
            self.page += 1
            self.list_imgs_fast.clear()
            self.list_imgs_original.clear()
            self.list_contents.clear()
            time.sleep(2)
            if self.page > 15:
                break


if __name__ == '__main__':
    fps = Ofweek()
    fps.run()
