#导入需要的库
import requests
import re
import csv
import time
import pandas as pd 
import matplotlib.pyplot as plt
import numpy as np

#定义爬虫类
class Jobspyder :

    #初始化函数
    def __init__(self,job,csv_name,page_num) :
        """
        job:你希望爬取的职位（中英文均可）。
        csv_name:你希望将保存的CSV文件命名为。（不需添加后缀）
        page_num：你希望爬取的页数。
        """

        self.url = 'http://search.51job.com/list/000000,000000,0000,00,9,99,{},2,{}.html'
        self.nexturl = self.url
        self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
        self.job = job
        self.pos = r'class="t1 ">.*? <a target="_blank" title="(.*?)".*? <span class="t2"><a target="_blank" title="(.*?)".*?<span class="t3">(.*?)</span>.*?<span class="t4">(.*?)</span>.*? <span class="t5">(.*?)</span>'
        self.csv_name = csv_name + ".csv"
        self.page_num = page_num
        self.fieldnames = ["职位","工作单位","工作地点","薪酬","日期"]
        self.data = []
        

    def getData(self) : #该函数用于获取网页数据并将每一个职位的信息以字典的形式储存在列表中。
        
        response = requests.get(self.nexturl,headers = self.headers) #请求网页
        html = response.content.decode('GBK','ignore') #将网页从二进制数据，根据GBK编码方式返回。
        reg = re.compile(self.pos,re.S) #根据数据的位置来匹配
        items = re.findall(reg,html) #找到职位信息
        for item in items : #将职位储存在列表中
            
            d = {
                    "职位" : item[0],
                    "工作单位" : item[1],
                    "工作地点" : item[2],
                    "薪酬" : item[3],
                    "日期" : item[4]
                }
            self.data.append(d)


    def nextUrl(self,num) : #得到下一个网页的url
        self.nexturl = self.url
        self.nexturl = self.nexturl.format(str(self.job),str(num))

    def csv_writer(self,data): #将得到的存有信息的列表写入到csv文件中。
        with open (self.csv_name,'a+',encoding='GBK',newline='') as f :
            writer = csv.DictWriter(f,fieldnames = self.fieldnames)
            for datas in data :
                writer.writerow(datas)
            self.data = []


    def run(self) : #运行
        try:
            for num in range(1,int(self.page_num)) :
                time.sleep(1)
                self.nextUrl(num)
                self.getData()
                self.csv_writer(self.data)
            print("爬取成功！")

        except ConnectionError:
            print('链接失败！')




#数据清洗类
class DataCleaning:
    def __init__(self,filename):
        """
        filename：爬虫保存的文件名（不需要后缀）
        """
        self.jobName = 0
        self.locality = 0
        self.salary = 0
        self.companyName = 0
        self.releaseTime = 0
        self.data = []
        self.nd_data = 0
        self.minSa=[]
        self.maxSa =[]
        self.newLocality = []
        self.filename = filename + ".csv"


    def readFile(self):
        with open(self.filename,encoding='gbk') as f:
            csv_reader = csv.reader(f) #使用csv.reader读取f中的文件
            data_header = next(csv_reader) #读取第一行每一列的标题
            for row in csv_reader: #将csv文件中的数据保存到data中
                self.data.append(row)
        self.nd_data = np.array(self.data)   #将list数组转化成array数组便于查看数据结构
        self.jobName = self.nd_data[:, 0]  
        self.companyName = self.nd_data[:, 1]  
        self.locality = self.nd_data[:, 2]  
        self.salary = self.nd_data[:, 3]
        self.releaseTime = self.nd_data[:, 4]


    def salaryCleaning(self):
        for sa in self.salary:
            if sa:
                if '-' in sa: #针对1-2万/月或者10-20万/年的情况，包含-
                    minSalary = re.findall(re.compile('(\d*\.?\d+)'),sa)[0]
                    maxSalary = re.findall(re.compile('(\d?\.?\d+)'),sa)[1]
                    if u'万' in sa and u'年' in sa: #单位统一成千/月
                        minSalary = float(minSalary) / 12 *10 
                        maxSalary = float(maxSalary) / 12 *10
                    elif u'万'in sa and u'月' in sa:
                        minSalary = float(minSalary) * 10
                        maxSalary = float(maxSalary) * 10
                else:
                    minSalary = re.findall (re.compile('(\d*\.?\d+)'),sa)[0]
                    maxSalary = ""
                    if u'万' in sa and u'年' in sa:
                        minSalary = float(minSalary) /12 *10
                    elif u'万' in sa and u'月' in sa:
                        minSalary = float(minSalary) *10
                    elif u'元' in sa and u'天' in sa:
                        minSalary = float(minSalary)/1000*21
            else:
                minSalary = ""; maxSalary = "";
            self.minSa.append(minSalary);self.maxSa.append(maxSalary)


    def locFormat(self):
        for loc in self.locality:
            if '-'in loc:#针对有区域的情况，包含-
                newLoc = re.findall(re.compile('(\w*)-'),loc)[0]
            else:#针对没有区域的情况
                newLoc = loc
            self.newLocality.append(newLoc)


    def saveNewFile(self):
        new_f = open(self.filename , 'wt', newline='', encoding='GBK', errors='ignore')
        writer = csv.writer(new_f)
        writer.writerow(('职位', '地区','最低薪资(千/月)','最高薪资(千/月)', '公司名称', '发布时间'))
        num = 0  
        while True:
            try:#所有数据都写入文件后，退出循环
               if self.newLocality[num] and self.minSa[num] and self.maxSa[num] and self.companyName[num] and self.newLocality[num]!="异地招聘":
                    writer.writerow((self.jobName[num], self.newLocality[num], self.minSa[num], self.maxSa[num], self.companyName[num], self.releaseTime[num]))
               num += 1
            except Exception:
                break


    def main(self):
    # 获取源数据
      self.readFile()     
     # 清洗源数据中的公司地区和薪资
      self.locFormat ()
      self.salaryCleaning()
    # 将清洗后的数据存入CSV文件
      self.saveNewFile()
      


class DataVisual :
    def __init__(self,filename):
        self.filename = filename+".csv"
        self.data = None



    def read_csv(self) :
        self.data = pd.read_csv(self.filename,encoding='GBK')
    
    def jobcity(self) :
        d = {}
        for citys in self.data["地区"] :
            if citys not in d.keys() :
                d[citys] = 1
            else :
                d[citys] += 1
        

        place_name = d.keys()
        place_count = d.values()
        plt.rcParams["font.sans-serif"] = ["SimHei"]
        plt.rcParams["axes.unicode_minus"] = False 
        plt.figure(figsize=(18,6), dpi=150)
        plt.pie(place_count,labels=place_name,autopct="%1.2f%%",textprops={"fontsize":10})
        plt.axis("equal") 
        plt.title("工作机会")
        plt.legend()
        plt.show()
    





#csv_name = input("请输入保存数据的文件名：")
#page_num = input(" 您希望爬取多少页？")
#job = input("请输入你想要爬取的职位:")
#python = Jobspyder(job,csv_name,page_num)
#python.run()
#python_clean = DataCleaning(csv_name)
#python_clean.main()