from os import write
import scrapy


class XyinfoSpider(scrapy.Spider):
    name = 'xyinfo'
    allowed_domains = ['www.jsu.edu.cn']
    start_urls = ['https://www.jsu.edu.cn/xygk/xxjj.htm']

    def parse(self, response):
        print("正在解析学校简介！")
        allinfo  = response.xpath(".//div[@class='v_news_content']/div/p")
        
        with open("./ama/csvhandle/info","a",encoding='utf-8') as f:
            f.write("\n\n\n学校简介:  \n\n\n")
            for items in allinfo:
                f.write(items.xpath("./text()").get("None"))
                f.write("\n")
        
        yield scrapy.Request(
            "https://www.jsu.edu.cn/xygk/xrld.htm",
            dont_filter=True,
            callback=self.parseLd
        )

    def parseLd(self,response):
        print("正在解析学校领导！")
        allLi = response.xpath(".//ul[has-class('leader')]/li")
        with open("./ama/csvhandle/info","a",encoding='utf-8') as f:
            f.write("\n\n\n学校领导:  \n\n\n")
            for items in allLi:
                # f.write(items.xpath("./text()").get("None"))
                f.write(items.xpath("./div[2]/h3/text()").get("None").replace(" ","|"))
                f.write(items.xpath("./div[2]/p/span/text()").get("None"))
                f.write("\n")
        yield scrapy.Request(
            "https://www.jsu.edu.cn/jgsz/xyxb.htm",
            dont_filter=True,
            callback=self.handleYX
        )
    
    def handleYX(self,response):
        print("正在解析学院系部！")
        allli = response.xpath(".//ul[has-class('xyxb')]/li")
        with open("./ama/csvhandle/info","a",encoding='utf-8') as f:
            f.write("\n\n\n学院系部:  \n\n\n")
            for items in allli:
                # f.write(items.xpath("./text()").get("None"))
                # f.write(items.xpath("./div[2]/h3/text()").get("None").replace(" ","|"))
                # f.write(items.xpath("./div[2]/p/span/text()").get("None"))
                f.write(items.xpath("./a/text()").get("None"))
                f.write("\n")
        yield scrapy.Request(
            "https://www.jsu.edu.cn/jgsz/dzgl.htm",
            dont_filter=True,
            callback=self.handleDz
        )

    def handleDz(self,response):
        print("正在解析党政管理部门！")
        allli = response.xpath(".//ul[has-class('dzjg')]/li")
        with open("./ama/csvhandle/info","a",encoding='utf-8') as f:
            f.write("\n\n\n党政管理部门:  \n\n\n")
            for items in allli:
                # f.write(items.xpath("./text()").get("None"))
                # f.write(items.xpath("./div[2]/h3/text()").get("None").replace(" ","|"))
                # f.write(items.xpath("./div[2]/p/span/text()").get("None"))
                f.write(items.xpath("./a/text()").get("None"))
                f.write("\n")
        yield scrapy.Request(
            "https://www.jsu.edu.cn/jgsz/jfzs.htm",
            dont_filter=True,
            callback=self.handleJf
        )
    def handleJf(self,response):
        print("正在解析教辅直属部门！")
        allli = response.xpath(".//ul[has-class('dzjg')]/li")
        with open("./ama/csvhandle/info","a",encoding='utf-8') as f:
            f.write("\n\n\n教辅直属部门:  \n\n\n")
            for items in allli:
                # f.write(items.xpath("./text()").get("None"))
                # f.write(items.xpath("./div[2]/h3/text()").get("None").replace(" ","|"))
                # f.write(items.xpath("./div[2]/p/span/text()").get("None"))
                f.write(items.xpath("./a/text()").get("None"))
                f.write("\n")
        yield scrapy.Request(
            "https://www.jsu.edu.cn/jgsz/fsdw.htm",
            dont_filter=True,
            callback=self.handleFs
        )
    def handleFs(self,response):
        print("正在解析附属单位！")
        allli = response.xpath(".//ul[has-class('dzjg')]/li")
        with open("./ama/csvhandle/info","a",encoding='utf-8') as f:
            f.write("\n\n\n附属单位部门:  \n\n\n")
            for items in allli:
                # f.write(items.xpath("./text()").get("None"))
                # f.write(items.xpath("./div[2]/h3/text()").get("None").replace(" ","|"))
                # f.write(items.xpath("./div[2]/p/span/text()").get("None"))
                f.write(items.xpath("./a/text()").get("None"))
                f.write("\n")
        yield scrapy.Request(
            "https://www.jsu.edu.cn/zsjy/jxjyzs.htm",
            dont_filter=True,
            callback=self.handleJX
        )
    def handleJX(self,response):
        print("正在解析继续教育招生信息！")
        allp = response.xpath(".//div[@class='v_news_content']/p")
        with open("./ama/csvhandle/info","a",encoding='utf-8') as f:
            f.write("\n\n\n继续教育招生简介:  \n\n\n")
            for items in allp:
                # f.write(items.xpath("./text()").get("None"))
                # f.write(items.xpath("./div[2]/h3/text()").get("None").replace(" ","|"))
                # f.write(items.xpath("./div[2]/p/span/text()").get("None"))
                # f.write(items.xpath("./a/text()").get("None"))
                content = []
                allispan = items.xpath("./span/span")
                for itspan in allispan:
                    content.append(itspan.xpath("./text()").get("None"))
                f.write(" ".join(content))
                f.write("\n")
        yield scrapy.Request(
            "https://renshichu.jsu.edu.cn/info/1023/1435.htm",
            dont_filter=True,
            callback=self.handleYOUX
        )
    def handleYOUX(self,response):
        print("正在解析优秀研究生导师信息！")
        trs = response.xpath(".//table[@class='MsoTableGrid']/tbody/tr")[1:]
        with open("./ama/csvhandle/info","a",encoding='utf-8') as f:
            f.write("\n\n\n优秀研究生导师:  \n\n\n")
            for items in trs:
                # f.write(items.xpath("./text()").get("None"))
                # f.write(items.xpath("./div[2]/h3/text()").get("None").replace(" ","|"))
                # f.write(items.xpath("./div[2]/p/span/text()").get("None"))
                # f.write(items.xpath("./a/text()").get("None"))
                # content = []
                # allispan = items.xpath("./span/span")
                # for itspan in allispan:
                #     content.append(itspan.xpath("./text()").get("None"))
                # f.write(" ".join(content))
                f.write(items.xpath("./td[1]/p/span/text()").get("None"))
                f.write(":  ")
                allspp = items.xpath("./td[2]/p/span").get("None")
                # for ispp in allspp:
                #     print(ispp.xpath("./text()").get('None'))
                # # f.write()
                conttt = allspp.replace('<span lang="EN-US"><span style="mso-spacerun: yes">',
                "").replace("</span></span>",
                "").replace('<span style="font-family: 宋体;font-size: 10pt">',
                "").replace('<font color="#000000">',
                "").replace("<span>",
                "").replace("</span>",
                "").replace('<span lang="EN-US">',
                "").replace('<font color="#110007">',
                "").replace("<p>",
                "").replace("</p>",
                "").replace("</font>",
                "").replace('<span style="mso-spacerun: yes">',
                "")
                f.write(conttt)
                f.write("\n")
        yield scrapy.Request(
            "https://www.jsu.edu.cn/kxyj/zrkx.htm",
            dont_filter=True,
            callback=self.handleZR
        )
    def handleZR(self,response):
        print("正在解析自然科学！")
        allppp = response.xpath('.//div[@class="v_news_content"]/div/p')
        with open("./ama/csvhandle/info","a",encoding='utf-8') as f:
            f.write("\n\n\n科学研究：自然科学:  \n\n\n")
            for items in allppp:
                # f.write(items.xpath("./text()").get("None"))
                # f.write(items.xpath("./div[2]/h3/text()").get("None").replace(" ","|"))
                # f.write(items.xpath("./div[2]/p/span/text()").get("None"))
                # f.write(items.xpath("./a/text()").get("None"))
                
                f.write(items.xpath("./text()").get("None"))
                f.write("\n")
        yield scrapy.Request(
            "https://www.jsu.edu.cn/kxyj/shkx.htm",
            dont_filter=True,
            callback=self.handleShkx
        )
    def handleShkx(self,response):
        print("正在解析社会科学！")
        allppp = response.xpath('.//div[@class="v_news_content"]/div/p')
        with open("./ama/csvhandle/info","a",encoding='utf-8') as f:
            f.write("\n\n\n科学研究：社会科学:  \n\n\n")
            for items in allppp:
                # f.write(items.xpath("./text()").get("None"))
                # f.write(items.xpath("./div[2]/h3/text()").get("None").replace(" ","|"))
                # f.write(items.xpath("./div[2]/p/span/text()").get("None"))
                # f.write(items.xpath("./a/text()").get("None"))
                
                f.write(items.xpath("./text()").get("None"))
                f.write("\n")
        yield scrapy.Request(
            "https://www.jsu.edu.cn/kxyj/kyjg.htm",
            dont_filter=True,
            callback=self.handleKyjg
        )
    def handleKyjg(self,response):
        print("正在解析科研机构！")
        allppp = response.xpath('.//ul[has-class("kyjgr")]/li')
        with open("./ama/csvhandle/info","a",encoding='utf-8') as f:
            f.write("\n\n\n科学研究：科研机构:  \n\n\n")
            for items in allppp:
                # f.write(items.xpath("./text()").get("None"))
                # f.write(items.xpath("./div[2]/h3/text()").get("None").replace(" ","|"))
                # f.write(items.xpath("./div[2]/p/span/text()").get("None"))
                # f.write(items.xpath("./a/text()").get("None"))
                
                f.write(items.xpath("./a/text()").get("None"))
                f.write("\n")
        

    
            


