# -*- coding:utf-8 -*-
import requests, re, json
from lxml import etree
from bs4 import BeautifulSoup
import time
import pymongo
from fake_useragent import UserAgent
from hashlib import md5
from fdfs_client.client import Fdfs_client, get_tracker_conf
from spider import config

s = requests.Session()


class Hownet(object):
    def __init__(self):
        self.page = 1
        self.list_img = []

    def get_url(self):
        """每页的文章的URL"""
        url = "https://www.ednchina.com/tag/technical-applications/index_{}.html".format(self.page)
        headers = {'User-Agent': (UserAgent()).random}
        response = s.get(url, headers=headers).text
        soup = etree.HTML(response)
        url_all = soup.xpath('//div[@class="theword"]/h4/a/@href')  # 所有url
        print(url_all)
        self.get_details(url_all)

    def get_details(self, url_all):
        """遍历每页文章"""
        if self.page == 1:
            for url in url_all[5:]:
                self.get_packaging(url)
        else:
            for url in url_all:
                self.get_packaging(url)

    def get_packaging(self,url):
        """每个篇文章的详情"""
        headers = {'User-Agent': (UserAgent()).random}
        # url = "https://www.ednchina.com/news/202007081033.html"
        print(url)
        list_contents = []
        response = s.get(url, headers=headers)
        response.encoding = response.apparent_encoding
        soup = etree.HTML(response.text)
        iid = md5(url.encode()).hexdigest()
        title = "".join(soup.xpath('//div[@class="cover-text"]/h1/text()'))  # 标题
        title = (((title.replace("\n", "")).replace("\r", "")).replace("\t", "")).replace(" ", "")
        contents = soup.xpath('//div[@class="article_text"]//text()')  # 内容
        imgs = soup.xpath('//div[@class="article_text"]//img//@src')  # 图片
        span = soup.xpath('//div[@class="article_text"]//p//img/../span/text()')  # span
        if not span:
            span = soup.xpath('//div[@class="article_text"]//p//img/../../span//text()')  # span
            if not span:
                span = soup.xpath('//div[@class="article_text"]//span//text()')  # span
        print(title)
        print(contents)
        print(imgs)
        print(type(span), span)
        self.save_picture(imgs)
        for content in contents:
            content = (((content.replace("\n", "")).replace(" ", "")).replace("\t", "")).replace("\r", "")
            if content != "":
                content = re.sub(r'.*版权所有，禁止转载.*', '', "<p>" + content + "</p>")
                list_contents.append(content)
        print("3333", list_contents)
        print(len(self.list_img),self.list_img)
        content = "".join(list_contents)
        img_len = 0
        for i in range(len(self.list_img)):
            content = re.sub('<p>' + span[0] + '</p>' + '<p>' + span[0] + '</p>', self.list_img[img_len], content,
                             count=1)
            img_len += 1
        content = content.replace('<p>' + span[0] + '</p>', '')
        itemjson = {}
        itemjson.update({"category": "article", "name": title, "url": url, "abstract": "", "content": content,
                         "video_id": iid, "tag": "", "company": 454398, "typeid": ""})
        print("-----", itemjson)
        # itemJson = json.dumps(itemJson1, ensure_ascii=False)
        self.save_database(itemjson)
        print("保存++++++++++++++++++++++")
        time.sleep(5)

    def save_picture(self, imgs):
        """下载图片保存"""
        num = 1
        self.list_img.clear()
        for img in imgs:
            img = "https:" + img
            image = requests.get(img).content
            print(str(num) + '.jpg 正在保存...')
            with open('./imgs/' + str(num) + '.jpg', 'wb')as fp:
                fp.write(image)
                tracker_path = get_tracker_conf('E:\PycharmProjects\Job_code\spider\database\client.conf')  # 绝对路径
                client = Fdfs_client(tracker_path)
                ret_upload = client.upload_by_filename('E:\PycharmProjects\Job_code\spider\imgs/' + str(num) + '.jpg')
                img = "<img " + "src="'"' + config.LOCAL_HOST_PREFIXES + str(ret_upload['Remote file_id'],
                                                                              encoding="utf8") + '"'">"
                self.list_img.append(img)
                # download
                ret_download = client.download_to_file(ret_upload['Local file name'], ret_upload['Remote file_id'])
                # print(ret_download)
                # delete 注:file_id为bytes类型
                file_id = ret_upload['Remote file_id']
                ret_delete = client.delete_file(file_id)
                # print(ret_delete)
                num += 1

    def save_database(self, itemjson):
        url = config.main_interface_domain + '/d/article/apipicker'
        headers = {'User-Agent': (UserAgent()).random}
        resp = s.post(url, data=itemjson, headers=headers)
        print(resp.status_code)
        print("---------", resp.text)
        if resp.status_code != 200:
            print("数据传送失败", resp.text)
        # try:
        #     with open('be_tech_article_sendmsg.txt', 'a') as sm:
        #         sm.write(resp.content + '\n')
        #     print(json.loads(resp.content)['msg'], 'id:', itemjson['video_id'], '*' * 100)
        # except Exception as e:
        #     print(e)

    def run(self):
        while True:
            print("获取第{}页".format(self.page))
            self.get_url()
            # self.get_packaging()
            self.page += 1
            # self.list_img.clear()
            time.sleep(2)
            if self.page > 47:
                break


if __name__ == '__main__':
    fps = Hownet()
    fps.run()
