# -*- coding:utf-8 -*-
import requests, re, json, os, time, datetime, random
from lxml import etree
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from hashlib import md5
from fdfs_client.client import Fdfs_client, get_tracker_conf
from spider import config

LOCAL_HOST_PREFIXES = config.LOCAL_HOST_PREFIXES
fdfs_upload_file = os.path.join(config.APP_ROOT, 'database', 'fdfs_upload_file')
client_config_path = os.path.join(config.APP_ROOT, 'database', 'client.conf')
imgs_path = os.path.join(config.APP_ROOT, 'imgs')

today = datetime.datetime.now().strftime('%Y-%m-%d')
today = int(time.mktime(time.strptime(today, '%Y-%m-%d'))) + 21600
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
now = int(time.mktime(time.strptime(now, '%Y-%m-%d %H:%M:%S')))

s = requests.Session()


class Waveshere(object):
    def __init__(self):
        self.page = 1
        self.list_img = []

    def get_url(self):
        """每页的文章的URL"""
        lists_url = ['59', '29', '58', '57', '38', '33', '8', '30', '46', '42']
        for url in lists_url:
            url = "https://www.waveshare.net/study/portal.php?mod=list&catid={}".format(url)
            headers = {'User-Agent': (UserAgent()).random}
            response = s.get(url, headers=headers).text
            soup = etree.HTML(response)
            url_all = soup.xpath('//div[@class="bm_c xld"]//dt/a/@href')  # 所有文章url
            print(url_all)
            self.get_packaging(url_all)
            page_url_alls = soup.xpath('//div[@class="pg"]/a//@href')  # 多页url
            page_url_all = list(set(page_url_alls))  # 去重
            page_url_all.sort(key=page_url_alls.index)
            print(page_url_all)
            for url in page_url_all:
                response = s.get(url, headers=headers).text
                soup = etree.HTML(response)
                url_all = soup.xpath('//div[@class="bm_c xld"]//dt/a/@href')  # 所有文章url
                print(url_all)
                self.get_packaging(url_all)

    def get_packaging(self, url_all):
        """每个篇文章的详情"""
        for url in url_all[6:]:
            url = "https://www.waveshare.net/study/" + url
            headers = {'User-Agent': (UserAgent()).random}
            print(url)
            list_contents = []
            response = s.get(url, headers=headers)
            # response.encoding = response.apparent_encoding
            soup = etree.HTML(response.text)
            bs4_soup = BeautifulSoup(response.text, "lxml")
            iid = md5(url.encode()).hexdigest()  # 文章URL加密MD5
            title = "".join(soup.xpath('//div[@class="h hm"]//h1[@class="ph"]/text()'))  # 标题
            title = (((title.replace("\n", "")).replace("\r", "")).replace("\t", "")).replace(" ", "")
            # contents = soup.xpath('//td[@id="article_content"]/node()//text()')  # 内容
            contents = bs4_soup.select('td[id="article_content"]')
            imgs = soup.xpath('//td[@id="article_content"]//a//img/@src')  # 图片
            list_imgs = []
            for img in imgs:
                if "png" in img or "jpg" in img:
                    list_imgs.append(img)
            self.save_picture(list_imgs)
            for content in contents:
                list_contents.append(str(content))
            print("3333", list_contents)
            content = "".join(list_contents)
            for (img, list_img) in zip(list_imgs, self.list_img):
                content = re.sub(img, list_img, content.replace("\\", "/"))
                content = re.sub("src=\".*.gif\"", "", content)
            content = content
            print("$$$$", content)
            print(len(self.list_img), self.list_img)
            # img_len = 0
            # for i in range(len(self.list_img)):
            #     content = re.sub('<p>' + span[0] + '</p>' + '<p>' + span[0] + '</p>', self.list_img[img_len], content,
            #                      count=1)
            #     img_len += 1

            # for (img,list_img) in zip(imgs,self.list_img):
            #     path = '//td[@id="article_content"]//img[@src="{}"]//preceding::text()'.format(img)
            #     print(path)
            #     li_list1 = soup.xpath(path)
            #     # list_img_xpath =[]
            #     # for li_list1 in li_list1:
            #     #     img_xpath = re.sub(r'\s', '', li_list1)
            #     #     if img_xpath != "":
            #     #         list_img_xpath.append(img_xpath)
            #     num = 1
            #     img_len = 0
            #     print(li_list1)
            #     li_list1_num = -1
            #     for i in reversed(li_list1):
            #         print(i)
            #         if re.match('^[\s+]', i):
            #             print("符合", i)
            #             li_list1_num -= 1
            #             print(li_list1_num)
            #         else:
            #             print("不符合")
            #             i = (((i.replace("\n", "")).replace(" ", "")).replace("\t", "")).replace("\r", "")
            #             i = "<p>" + i + "</p>"
            #             print(i)
            #             p = list_contents.index(i)
            #             print("----------", p)
            #             img_len += 1
            #             p = p + num
            #             # content = re.sub(i, list_contents[p], content, count=1)
            #             # print(content)
            #             list_contents.insert(p, list_img)
            #             print("((((", list_contents)
            #             break

            # print("4444", list_contents)
            # content = "".join(list_contents)
            itemjson = {}
            itemjson = {"category": "article", "name": title, "url": url, "abstract": "", "content": content,
                        "video_id": iid, "tag": "", "company": 454398, "typeid": ""}
            print("-----", itemjson)
            cur_time = random.randrange(today, now)
            bbs_post = {"special": 7, "author_id": 1, "title": title, "type": "thread", "url": url,
                        "cur_time": cur_time, "content": content, "id": iid, "group_num": 1088}
            print("+++++", bbs_post)
            # itemJson = json.dumps(itemJson1, ensure_ascii=False)
            self.save_database_post(itemjson)
            self.save_database_post(bbs_post)
            print("保存++++++++++++++++++++++")
            time.sleep(5)

    def save_picture(self, imgs):
        """下载图片保存上传图片"""
        num = 1
        self.list_img.clear()
        for img in imgs:
            try:
                imgs = "https://www.waveshare.net/study/" + img
                image = requests.get(imgs).content
            except Exception as e:
                print("数据错误", e)
                image = requests.get(img).content
            # print(str(num) + '.jpg 正在保存...')
            with open('./imgs/' + str(num) + '.jpg', 'wb')as fp:
                fp.write(image)
                tracker_path = get_tracker_conf(client_config_path)  # 绝对路径
                client = Fdfs_client(tracker_path)
                ret_upload = client.upload_by_filename(imgs_path + "/" + str(num) + '.jpg')
                # img = "<img " + "src="'"' + config.LOCAL_HOST_PREFIXES + str(ret_upload['Remote file_id'],
                #                                                               encoding="utf8") + '"'">"
                img = config.LOCAL_HOST_PREFIXES + str(ret_upload['Remote file_id'], encoding="utf8")
                self.list_img.append(img)
                num += 1

    def save_database_post(self, itemjson):
        url = config.main_interface_domain + '/d/article/apipicker'
        headers = {'User-Agent': (UserAgent()).random}
        resp = s.post(url, data=itemjson, headers=headers)
        print(resp.status_code)
        print("---------", resp.text)
        if resp.status_code != 200:
            print("数据传送失败", resp.text)

    def save_bbs_post(self, bbs_post):
        """论坛"""
        url = config.bbs_interface_domain + '/home.php?mod=misc&ac=ajax&op=publish'
        headers = {'User-Agent': (UserAgent()).random}
        resp = s.post(url, data=bbs_post, headers=headers)
        print(resp.status_code)
        print("---------", resp.text)
        if resp.status_code != 200:
            print("bbs_post传送失败", resp.text)

    def run(self):
        while True:
            print("获取第{}页".format(self.page))
            self.get_url()
            self.page += 1
            time.sleep(2)
            if self.page > 47:
                break


if __name__ == '__main__':
    fps = Waveshere()
    fps.run()
