#导入需要的模块
import requests
import re
import pymysql#连接数据库
from threading import Thread#多线程
from bs4 import BeautifulSoup
from multiprocessing import Pool

header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:57.0) Gecko/20100101 Firefox/57.0'}

#获取所有学院官网地址
#http://www.ccu.edu.cn/xxgk/jgsz.htm长春大学机构设置
def src_all():
    src_list=[]#地址列表
    url="http://www.ccu.edu.cn/xxgk/jgsz.htm"
    response=requests.get(url=url,headers=header)
    response.encoding="utf-8"
    content=response.text
    #使用正则表达式获取所有机构的链接地址及机构名称
    jigou=re.findall('<TD class="style5" width="310" style="text-align: left"><A href="(.*?)" target="_blank">(.*?)</A></TD>',content)
    src_list.extend(jigou)#将获取的所有信息添加到链接地址列表中去
    print(src_list)

#定义获取新闻内容的函数

#访问每条新闻的内容。
    """
    因为每条新闻中网页的结构不同，所以才用两种方法
    获取新闻内容数据，一是利用正则表达式，二是遍历结点，
    获取内容。
    """


#写入txt文件
def write_txt(content,filename):
    with open('爬取到的新闻数据\{}.txt'.format(filename),'a+',encoding="utf-8")as f:
        f.write(content)


def get_newsContent(url):
    header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:57.0) Gecko/20100101 Firefox/57.0'}
    content=requests.get(url,header)
    content.encoding="utf-8"
    #print(content.text)
    news_content=re.findall('<span style="font-family: 宋体;">(.*?)</span>',content.text)
    #print(news_content)
    con=""
    for i in news_content:
        con+=i
    #print(con)
    p_content=""
    soup = BeautifulSoup(content.text, 'lxml')   #初始化BeautifulSoup库,并设置解析器
    for p in soup.find_all(name='div',id='vsb_newscontent'):         #遍历父节点
            for a in p.find_all(name='p'):     #遍历子节点
                if a.string==None:
                    pass
                else:
                    con_ = a.string
                    p_content += con_     #输出结果
                    #print(p_content)
    return con+p_content  #返回新闻内容

#创建数据库连接对象
class Sql(object):
    #数据库相关信息
    conn = pymysql.connect(
        host='127.0.0.1',
        port=3306,
        user='root',
        passwd='mysql',
        db='ccunews',
        charset='utf8')

    #将新闻信息添加到数据库中去
    """
    xueyuan:学院名称
    ls：序号
    newsll：新闻标题
    time：新闻发布时间
    src：新闻的网址，访问可以查看
    content：新闻的内容
    """
    def addnews(self,xueyuan,ls,newsll,time,src,content):
        cur=self.conn.cursor()
        cur.execute("insert into ccunews(xueyuan,ls,newsll,time,src,content) values('%s','%d','%s','%s','%s','%s') "%(xueyuan,ls,newsll,time,src,content))
        lastrowid=cur.lastrowid
        cur.close()#关闭游标
        self.conn.commit()
        return lastrowid

mysql=Sql()


#机械与车辆工程学院
def jixie():
    news_list = []#新闻标题
    time_list = []#时间
    src_list=[]#网址
    for p in range(1,13):
        #机械与车辆工程学院新闻地址
        url="http://cdjxxy.ccu.edu.cn/list.jsp?a6t=12&a6p="+str(p)+"&a6c=10&urltype=tree.TreeTempUrl&wbtreeid=1013"
        #print(url)
        response=requests.get(url,headers=header)
        response.encoding="utf-8"#编码为utf-8
        content=response.text
        #print(content)
        soup=BeautifulSoup(content,'lxml')
        for i in soup.find_all('a',class_="c58849"):
            new=i["title"]#获取新闻标题
            src=i['href']#获取新闻地址
            news_list.append(new)
            src_list.append(src)
        for i in soup.find_all('td',class_="timestyle58849"):
            time=i.string#获取新闻发布时间
            time_list.append(time)
    for k in range(len(news_list)):
        #每条新闻地址
        src="http://cdjxxy.ccu.edu.cn/"+src_list[k]
        p_content=get_newsContent(src)
        print("机械与车辆工程学院",k,news_list[k],time_list[k],src,p_content)#打印获取到的信息
        #需要写入的内容
        write_data= news_list[k]+''+time_list[k]+''+p_content+"\n"
        write_txt(write_data,"机械与车辆工程学院")#写入到txt中
        try:
            #将获取到的信息存入到数据库中
            mysql.addnews("机械与车辆工程学院",k,news_list[k],time_list[k],src,p_content)
        except:
            print("存入失败！")

#jixie()

#计算机科学技术学院
def jisuanji():
    news_list = []
    time_list = []
    src_list = []
    for p in range(8):
        if(p==0):
            url1="http://cst.ccu.edu.cn/index/xwdt.htm"
        else:
            url1="http://cst.ccu.edu.cn/index/xwdt/"+str(p)+".htm"
        response = requests.get(url1, headers=header)
        response.encoding = "utf-8"
        content = response.text
        #print(content)
        soup = BeautifulSoup(content, 'lxml')
        for i in soup.find_all('a', class_="c57749"):
            new = i["title"]
            src=i['href'][-18:]
            #print(src)
            news_list.append(new)
            src_list.append(src)
            #print(new)
        for i in soup.find_all('span', class_="timestyle57749"):
            time = i.string
            time_list.append(time)
    #获取新闻内容
    for k in range(len(news_list)):
        src = "http://cst.ccu.edu.cn/" + src_list[k]
        p = get_newsContent(src)
        print("计算机科学技术学院", k, news_list[k], time_list[k], src, p)  # 打印获取到的信息
        write_data = news_list[k] + '' + time_list[k] + '' + p + "\n"
        write_txt(write_data, "计算机科学技术学院")  # 写入到txt中
        # 将获取到的信息存入到数据库中
        try:
            mysql.addnews("计算机科学技术学院", k, news_list[k], time_list[k], src, p)
        except:
            print("存入失败！")
#jisuanji()


#电子信息工程学院
def dianzixinxi():
    news_list = []
    time_list = []
    src_list = []
    for p in range(1, 15):
        url = "http://cddz.ccu.edu.cn/list.jsp?a8t=14&a8p="+str(p)+"&a8c=10&urltype=tree.TreeTempUrl&wbtreeid=1011"
        # print(url)
        response = requests.get(url, headers=header)
        response.encoding = "utf-8"
        content = response.text
        soup = BeautifulSoup(content, 'lxml')
        for i in soup.find_all('a', class_="c59244"):
            new = i["title"]
            src=i['href']
            news_list.append(new)
            src_list.append(src)
        for i in soup.find_all('td', class_="timestyle59244"):
            time = i.string
            time_list.append(time)
    for k in range(len(news_list)):
        src="http://cddz.ccu.edu.cn/"+src_list[k]
        print(src)
        p_content = get_newsContent(src)
        print("电子信息工程学院", k, news_list[k], time_list[k], src, p_content)  # 打印获取到的信息
        write_data = news_list[k] + '' + time_list[k] + '' + p_content + "\n"
        write_txt(write_data, "电子信息工程学院")  # 写入到txt中
        try:
            #存储到数据库
            mysql.addnews("电子信息工程学院", k, news_list[k], time_list[k],src,p_content)
        except:
            print("存入失败！")


#dianzixinxi()

#管理学院
def guanlixueyuan():
    news_list = []
    time_list = []
    src_list = []
    for p in range(31):
        if (p == 0):
            url1 = "http://glxy.ccu.edu.cn/index/xykx.htm"
        else:
            url1 = "http://glxy.ccu.edu.cn/index/xykx/"+str(p)+".htm"
        response = requests.get(url1, headers=header)
        response.encoding = "utf-8"
        content = response.text
        # print(content)
        soup = BeautifulSoup(content, 'lxml')
        for i in soup.find_all('a', class_="c58605"):
            new = i["title"]
            src=i['href'][-18:]
            news_list.append(new)
            src_list.append(src)
            # print(new)
        for i in soup.find_all('span', class_="timestyle58605"):
            time = i.string
            time_list.append(time)
    for k in range(len(news_list)):
        src="http://glxy.ccu.edu.cn/"+src_list[k]
        print(src)
        p_content = get_newsContent(src)
        print("管理学院", k, news_list[k], time_list[k], src, p_content)  # 打印获取到的信息
        write_data = news_list[k] + '' + time_list[k] + '' + p_content + "\n"
        write_txt(write_data, "管理学院")  # 写入到txt中
        try:
            mysql.addnews("管理学院", k, news_list[k], time_list[k],src,p_content)
        except:
            print("存入失败！")
#guanlixueyuan()


#外国语学院

def waiguoyu():
    news_list = []
    time_list = []
    src_list = []
    for p in range(1, 9):
        url = "http://cdwyxy.ccu.edu.cn/list.jsp?a6t=8&a6p="+str(p)+"&a6c=10&urltype=tree.TreeTempUrl&wbtreeid=1004"
        # print(url)
        response = requests.get(url, headers=header)
        response.encoding = "utf-8"
        content = response.text
        soup = BeautifulSoup(content, 'lxml')
        for i in soup.find_all('a', class_="c59025"):
            new = i["title"]
            src=i['href']
            news_list.append(new)
            src_list.append(src)
        for i in soup.find_all('td', class_="timestyle59025"):
            time = i.string
            time_list.append(time)
    print(len(news_list))
    for k in range(len(news_list)):
        src="http://cdwyxy.ccu.edu.cn/"+src_list[k]
        print(src)
        p_content = get_newsContent(src)
        print("外国语学院", k, news_list[k], time_list[k], src, p_content)  # 打印获取到的信息
        write_data = news_list[k] + '' + time_list[k] + '' + p_content + "\n"
        write_txt(write_data, "外国语学院")  # 写入到txt中
        try:
            mysql.addnews("外国语学院", k, news_list[k], time_list[k],src,p_content)
        except:
            print("存入失败！")
#waiguoyu()


#特殊教育学院
def tejiao():
    news_list = []
    time_list = []
    src_list = []
    for p in range(5):
        if (p == 0):
            url1 = "http://cdtjxy.ccu.edu.cn/index/rdxw.htm"
        else:
            url1 = "http://cdtjxy.ccu.edu.cn/index/rdxw/"+str(p)+".htm"
        response = requests.get(url1, headers=header)
        response.encoding = "utf-8"
        content = response.text
        # print(content)
        soup = BeautifulSoup(content, 'lxml')
        for i in soup.find_all('a', class_="c131815"):
            new = i["title"]
            src=i['href'][-18:]
            news_list.append(new)
            src_list.append(src)
            # print(new)
        for i in soup.find_all('span', class_="timestyle131815"):
            time = i.string
            time_list.append(time)
    print(len(news_list))
    for k in range(len(news_list)):
        src="http://cdtjxy.ccu.edu.cn/"+src_list[k]
        #print(src)
        p_content = get_newsContent(src)
        print("特殊教育学院", k, news_list[k], time_list[k], src, p_content)  # 打印获取到的信息
        write_data = news_list[k] + '' + time_list[k] + '' + p_content + "\n"
        write_txt(write_data, "特殊教育学院")  # 写入到txt中
        try:
            mysql.addnews("特殊教育学院", k, news_list[k], time_list[k],src,p_content)
        except:
            print("存入失败！")
#tejiao()


#经济学院
def jingji():
    news_list = []
    time_list = []
    src_list=[]
    #控制爬取的页数
    for p in range(1, 4):
        url = "http://jjxy.ccu.edu.cn/list.jsp?a8t=3&a8p="+str(p)+"&a8c=15&urltype=tree.TreeTempUrl&wbtreeid=1013"
        # print(url)
        response = requests.get(url, headers=header)
        response.encoding = "utf-8"
        content = response.text
        soup = BeautifulSoup(content, 'lxml')
        for i in soup.find_all('a', class_="c58872"):
            new = i["title"]
            src=i['href']
            news_list.append(new)
            src_list.append(src)
        for i in soup.find_all('td', class_="timestyle58872"):
            time = i.string
            time_list.append(time)
    #print(len(news_list))
    for k in range(len(news_list)):
        src="http://jjxy.ccu.edu.cn/"+src_list[k]
        p_content = get_newsContent(src)
        print("经济学院", k, news_list[k], time_list[k], src, p_content)  # 打印获取到的信息
        write_data = news_list[k] + '' + time_list[k] + '' + p_content + "\n"
        write_txt(write_data, "经济学院")  # 写入到txt中
        try:
            mysql.addnews("经济学院", k, news_list[k], time_list[k],src,p_content)
        except:
            print("存入失败！")
#jingji()



if __name__ == '__main__':
    jingji = Thread(target=jingji(), args=("jingji",))
    jixie = Thread(target=jixie(), args=("jixie",))
    waiguoyu = Thread(target=waiguoyu(), args=("waiguoyu",))
    tejiao = Thread(target=tejiao(), args=("tejiao",))
    guanlixueyuan = Thread(target=guanlixueyuan(), args=("guanlixueyuan",))
    dianzixinxi = Thread(target=dianzixinxi(), args=("dianzixinxi",))
    jisuanji = Thread(target=jisuanji(), args=("jisuanji",))
    jisuanji.start()
    dianzixinxi.start()
    guanlixueyuan.start()
    tejiao.start()
    waiguoyu.start()
    jixie.start()
    jingji.start()