# This is a sample Python script.

# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.

# !/usr/bin/env python

# -*- coding = utf-8 -*-

'''
@Project : doubanTest
@File    : spider.py
@IDE     : PyCharm
@Author  : Simon
@Date    : 2021/5/4 10:15
'''


import bs4
import re
import urllib.request
import urllib.error
import urllib.parse
import chardet
import xlwt
import sqlite3


def main():
    baseurl = "https://www.shixiseng.com/interns?page="

    # 1. crawler website
    datalist = getData(baseurl)
    # test def askURL(url)
    # html = askURL(baseurl)
    # print(html)

    # 2. parsing one by one

    # 3. save data
    savepath = "ShiXiSeng.xls"
    saveData(datalist, savepath)


#########################################################
#########################################################
def find_chinese(file):
    pattern = re.compile(r'[^\u4e00-\u9fa5]')
    chinese = re.sub(pattern, '', file)
    return chinese


#########################################################
#########################################################
# Global Variable
findname = re.compile(r'<a class="title ellipsis font".*title="(.*?)">')
findsalary = re.compile(r'<span class="day font" .*>(.*)</span>')
findcity = re.compile(r'span class="city ellipsis" data-v-98c756d6="">(.*)</span>')
findday = re.compile(r'<span class="font" .*>(.*)</span>')

#########################################################
#########################################################
# 1. crawler website
def getData(baseurl):
    datalist = []
    # word1 = '互联网'
    # word1 = urllib.parse.quote(word1)
    # word2 = '全国'
    # word2 = urllib.parse.quote(word2)

    # keyword = {"word1": "互联网", "word2": "全国"}
    # word1 = urllib.parse.urlencode(keyword)
    # word2 = urllib.parse.urlencode(keyword)
    # urlextrap1 = "&type=intern&keyword=%"
    # urlextrap2 = "%2FIT&area=&months=&days=&degree=&official=&enterprise=&salary=-0&publishTime=&sortType=&city=%"
    # urlextrap3 = "&internExtend="
    # urlextrapcity = "&type=intern&keyword=&area=&months=&days=&degree=&official=&enterprise=&salary=-0&publishTime=&sortType=&city="
    # urlextrapcity2 = "&internExtend="

    for i in range(0, 10):
        # url = baseurl + str(i + 1) + "&type=intern&keyword=%互联网%2FIT&area=&months=&days=&degree=&official=&enterprise=&salary=-0&publishTime=&sortType=&city=%全国&internExtend="
        # url = baseurl + str(i + 1) + urlextrap1 + word1 + urlextrap2 + word2 + urlextrap3
        # url = urllib.parse.unquote(url)
        url = baseurl + str(i + 1)
        # test
        # print(url)
        html = askURL(url)
        # print(html)

        # 2. parsing one by one
        soup = bs4.BeautifulSoup(html, "html.parser")
        for item in soup.find_all('div', class_="f-l intern-detail__job"):
            data = []
            item = str(item)
            # test
            # print(item)

            name = re.findall(findname, item)[0]
            name = find_chinese(name)
            # test
            # print(name)
            data.append(name)

            salary = re.findall(findsalary, item)[0]
            # decode the salary
            salary = str(salary).encode('utf-8')
            salary = salary.replace(b'\xef\xa3\x8c', b'1')
            salary = salary.replace(b'\xee\xbd\xac', b'5')
            salary = salary.replace(b'\xee\x96\xb6', b'0')
            salary = salary.replace(b'\xee\xbe\xaf', b'2')
            salary = salary.replace(b'\xef\x9e\x80', b'3')
            salary = salary.replace(b'\xee\x93\x81', b'4')
            salary = salary.replace(b'\xef\xa0\x9d', b'8')
            salary = salary.replace(b'\xef\x90\xad', b'7')
            salary = salary.replace(b'\xee\x85\xa7', b'9')
            salary = salary.decode("utf-8")
            # test
            # print(salary)
            data.append(salary)

            city = re.findall(findcity, item)
            city = find_chinese(str(city))
            city = city.replace("周", "")
            city = city.replace("个", "")
            city = city.replace("月", "")
            if city[-1] == "天":
                city = city.replace("天", "")
            # print(city)
            data.append(city)

            day = re.findall(findday, item)[0]
            # print(day)
            data.append(day)

            datalist.append(data)

    # test
    print(datalist)
    return datalist


#########################
# 1.1 crawler single url to get html
def askURL(url):
    # simulation website header
    head = {
        'User-Agent': ' Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36'
    }

    request = urllib.request.Request(url, headers=head)

    html = ""

    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("utf-8")
        # test
        # print(html)
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)

    return html


#########################################################
# 3. save data
def saveData(datalist, savepath):
    print("save...")
    # 创建 workbook 对象
    book = xlwt.Workbook(encoding="utf-8")

    # 创建工作表
    sheet = book.add_sheet('ShiXiSeng200', cell_overwrite_ok=True)

    # 列的定义
    col = ("Name", "Salary", "City", "Day")
    # 写入数据，第一个参数表示行，第二个参数表示列， 第三个参数表示添加的内容
    for i in range(0, 4):
        sheet.write(0, i, col[i])  # 写入列名
    for i in range(0, 200):
        print("第%d条" % (i + 1))
        data = datalist[i]
        for j in range(0, 4):
            sheet.write(i + 1, j, data[j])  # 数据写入

    # 保存数据表
    book.save(savepath)


# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    main()

# See PyCharm help at https://www.jetbrains.com/help/pycharm/
