import os
import time
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup

"""
猪八戒网分析：

目标：爬取猪八戒动漫影音中四大板块，动画设计，漫画设计，影视制作，图像摄影。获取其页面下所有公司制作说明，
目的将这些说明全部汇集在一起打乱随机排序，来进行模拟聚类分析。


各板块页面分析：
    首先切换到全国
    1.动画设计
        有67页
        第一页：https://www.zbj.com/dhsjs/f.html?fr=zbj.sy.zyyw_1st.lv2
        第二页：https://www.zbj.com/dhsjs/fk70.html?fr=zbj.sy.zyyw_1st.lv2
        第三页：https://www.zbj.com/dhsjs/fk140.html?fr=zbj.sy.zyyw_1st.lv2
        第四页：https://www.zbj.com/dhsjs/fk210.html?fr=zbj.sy.zyyw_1st.lv2
        ...
        所以是 https://hangzhou.zbj.com/dhsjs/f{i*10}.html?fr=zbj.sy.zyyw_1st.lv2 的格式，第一页去掉k
        
    2.动漫设计
        共82页
        第一页：https://www.zbj.com/dhmh/f.html?fr=zbj.sy.zyyw_1st.lv2
        第二页：https://www.zbj.com/dhmh/fk70.html?fr=zbj.sy.zyyw_1st.lv2
        第三页：https://www.zbj.com/dhmh/fk140.html?fr=zbj.sy.zyyw_1st.lv2
        ...
        
    3.影视制作
        共100页
        第一页：https://www.zbj.com/video/f.html?fr=zbj.sy.zyyw_1st.lv2
        第二页：https://www.zbj.com/video/fk70.html?fr=zbj.sy.zyyw_1st.lv2
        第三页：https://www.zbj.com/video/fk140.html?fr=zbj.sy.zyyw_1st.lv2
        ...
    
    4.图像摄影
        共92页
        第一页：https://www.zbj.com/search/f/?kw=%E5%9B%BE%E5%83%8F%E6%91%84%E5%BD%B1&fr=zbj.sy.zyyw_1st.lv2
        第二页：https://www.zbj.com/search/f/k70.html?kw=%E5%9B%BE%E5%83%8F%E6%91%84%E5%BD%B1&fr=zbj.sy.zyyw_1st.lv2
        第三页：https://www.zbj.com/search/f/k140.html?kw=%E5%9B%BE%E5%83%8F%E6%91%84%E5%BD%B1&fr=zbj.sy.zyyw_1st.lv2
        ...
    
    
要爬取的标签

    标签 a,class="desc"中的内容，该内容里有可能带<h1></h1>,使用get_text()只获取文本




    
        

"""


class BaJie(object):

    def __init__(self):
        super(BaJie, self).__init__()
        # self.header = {
        #     'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.81 Safari/537.36 SE 2.X MetaSr 1.0',
        #     'Cookie':'_uq=1d3b0278816af1dc6a32726f52ff5171; ueAgent=s%3Awqe4tL8RalhT48beP_3sKuLP3u9F3a6r.ubfZVaorPX7tpWLhsjmeNSq3S7HfmU%2FiFFCZqjvFosc'}  # 设置http header，视情况加需要的条目，这里的token是用来鉴权的一种方式

        self.header={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'}
        # self.header={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'}


    def get_donghua(self,num):
        """
        爬取 动画设计
        :param num:
        :return:
        """
        task_list=[]
        for i in range(num):
            page_num=i*70
            if i!=0:
                page_num='k{}'.format(page_num)
            else:
                page_num=''
            url = 'https://www.zbj.com/dhsjs/f{}.html?fr=zbj.sy.zyyw_1st.lv2'.format(page_num)
            print(url)
            res = requests.get(url)#,headers=self.header)
            sp = BeautifulSoup(res.text,'html.parser')
            divs= sp.find_all("a", "desc")
            # print(len(divs))
            for d in divs:
                text=d.get_text()
                if not text:
                    pass
                else:
                    task = ['动画设计',text]
                    # print(task)
                    task_list.append(task)
                    print('-',end='')
            print('page{}完成'.format(i))
        return task_list

    def get_manhua(self,num):
        """
        爬取 漫画设计
        :param num:
        :return:
        """
        task_list=[]
        for i in range(num):
            page_num=i*70
            if i!=0:
                page_num='k{}'.format(page_num)
            else:
                page_num=''
            url = 'https://www.zbj.com/dhmh/f{}.html?fr=zbj.sy.zyyw_1st.lv2'.format(page_num)
            print(url)
            res = requests.get(url)
            sp = BeautifulSoup(res.text)
            divs= sp.find_all("a", "desc")
            for d in divs:
                text=d.get_text()
                if not text:
                    pass
                else:
                    task = ['漫画设计', text]
                    # print(task)
                    task_list.append(task)
                    print('-',end='')
            print('page{}完成'.format(i))
        return task_list

    def get_yingshi(self,num):
        """
        爬取 影视制作
        :param num:
        :return:
        """
        task_list=[]
        for i in range(num):
            page_num=i*70
            if i!=0:
                page_num='k{}'.format(page_num)
            else:
                page_num=''
            url = 'https://www.zbj.com/video/f{}.html?fr=zbj.sy.zyyw_1st.lv2'.format(page_num)
            print(url)
            res = requests.get(url)
            sp = BeautifulSoup(res.text)
            divs= sp.find_all("a", "desc")
            for d in divs:
                text=d.get_text()
                if not text:
                    pass
                else:
                    task = ['影视制作', text]
                    # print(task)
                    task_list.append(task)
                    print('-',end='')
            print('page{}完成'.format(i))
        return task_list

    def get_sheying(self,num):
        """
        爬取 图像摄影
        :param num:
        :return:
        """
        task_list=[]
        for i in range(num):
            page_num=i*70
            if i!=0:
                page_num='k{}.html'.format(page_num)
            else:
                page_num=''
            url = 'https://www.zbj.com/search/f/{}?kw=%E5%9B%BE%E5%83%8F%E6%91%84%E5%BD%B1&fr=zbj.sy.zyyw_1st.lv2'.format(page_num)
            print(url)
            try:
                res = requests.get(url)#,headers=self.header)
                # res = requests.get(url,headers=self.header)
                res.cookies.clear()
                sp = BeautifulSoup(res.text,'html.parser')
                divs= sp.find_all("a", "desc")
                for d in divs:
                    text=d.get_text()
                    if not text:
                        pass
                    else:
                        task = ['图像摄影', text]
                        # print(task)
                        task_list.append(task)
                        print('-', end='')
                print('page{}完成'.format(i))
            except Exception as get_sheying_ERR:
                print('get_sheying_ERR:',str(get_sheying_ERR))
        return task_list


    def save_csv(self, csv_path, data_list):
        """
        将该页的所有模型信息csv存下
        :param csv_path:  本地页面的绝对路径
        :param data_list:  数据集
        :return:
        """
        ar = np.array(data_list)
        df = pd.DataFrame(ar, columns=['module','task'], index=range(1, len(data_list) + 1))
        df.to_csv(csv_path)


    def get_all_tasks(self):
        """
        获取所有信息并存储
        动画设计，漫画设计，影视制作，图像摄影
        :return:
        """
        all_tasks=[]
        donghua_tasks=bj.get_donghua(67)
        self.save_csv('动画设计.csv', donghua_tasks)
        manhua_tasks=bj.get_manhua(82)
        self.save_csv('漫画设计.csv', manhua_tasks)
        yingshi_tasks=bj.get_yingshi(100)
        self.save_csv('影视制作.csv', yingshi_tasks)
        sheying_tasks=bj.get_sheying(92)
        self.save_csv('图像摄影.csv', sheying_tasks)
        all_tasks.extend(donghua_tasks)
        all_tasks.extend(manhua_tasks)
        all_tasks.extend(yingshi_tasks)
        all_tasks.extend(sheying_tasks)


        self.save_csv('猪八戒制作内容.csv',all_tasks)

    def save_one_module(self):
        sheying_tasks=bj.get_sheying(92)
        self.save_csv('图像摄影.csv', sheying_tasks)


if __name__ == '__main__':
    # url='https://www.zbj.com/dhsjs/f.html?fr=zbj.sy.zyyw_1st.lv2'
    # res = requests.get(url)
    # # sp = BeautifulSoup(res.text,"html.parser")
    # sp = BeautifulSoup(res.text)
    # # text = sp.find_all("div", "servicesAndCases-title")[0].find_all('a')
    # # divs= sp.find_all("div", "servicesAndCases-title")
    # divs= sp.find_all("a", "desc")
    # print(len(divs))
    # for d in divs:
    #     text=d.get_text()
    #     print(text)
    bj=BaJie()
    # tasks=bj.get_donghua(67)
    # tasks=bj.get_manhua(82)
    # tasks=bj.get_yingshi(100)
    # tasks=bj.get_sheying(92)
    # print(tasks)
    # bj.get_all_tasks()
    bj.save_one_module()
