#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2023/11/16 21:01
# @Author  : yyosong
# @File    : SCJob.py
# @Software: PyCharm

import chardet
import requests
from bs4 import BeautifulSoup
from datetime import date

# 获取当前日期
TODAY = date.today()

STARTURL = 'http://www.scrsw.net/'
SCURL = 'http://www.scrsw.net/jiaoshi/'
FILENASME = '四川-' + str(TODAY) + '-job.txt'


def add_enter(temp_txt):
    temp_back = STARTURL + temp_txt
    return temp_back


# 将获取的信息保存到txt文件中
def save_to_txt(data, filename):
    with open(filename, 'a', encoding='utf-8') as f:
        f.write('\n'.join(data))
        f.write('\n')  # 在每条信息后面添加一个新行


def decodeChar(txt):
    encoding = chardet.detect(txt)['encoding']
    txt = txt.decode(encoding)
    return txt


def readURL(url):
    response = requests.get(url)
    content = decodeChar(response.content)
    soup = BeautifulSoup(content, 'html.parser')
    content_info = soup.find('div', class_='brf')
    content_info = content_info.find('div', class_='nwli')
    # content_info = soup.find_all('div', class_='nwli')
    for temp_ct in content_info.contents:
        try:
            sf, gg = temp_ct.find_all('a')
            if '课程' in gg.text:
                continue
            save_to_txt([sf.text], FILENASME)
            save_to_txt([gg.text, ], FILENASME)  # temp_ct.find('span', class_='sj').text
            save_to_txt([add_enter(gg.attrs['href'])], FILENASME)
        except:
            pass


if __name__ == '__main__':
    readURL(SCURL)

    # pyinstaller --onefile --windowed SCJob.py
