#爬取信息
import requests
import codecs
import re
from time import sleep
import os
from bs4 import BeautifulSoup
def GetCityURl():
    url="http://www.bendibao.com/city.htm" #本地宝网站
    headers = {
                'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) '
                             'AppleWebKit/537.36 (KHTML, like Gecko) '
                             'Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'
            }
    dict ={}
    html=requests.get(url=url,headers=headers)
    html.encoding='utf-8'
    html_news=html.text
    bs=BeautifulSoup(html_news,'html.parser')
    paras = bs.find('div',class_='wrap')
    paras=paras.find_all('a')                   #找到所以的a标签
    for i in paras:
        url=i['href']                           #获取url
        city_name=i.text                        #获取城市名称
        dict[city_name]=url;                    #使用字典类型过滤（重复的会直接覆盖）
        print('已经获取'+city_name+'的url')
    f = codecs.open("cityhref.txt", 'w')        #将获取的url放入文件中
    for key in dict.keys():
        f.write(key+'\t'+dict[key]+'\n')
    f.close()
    return

def GetURLSet():  #获取人才落户的URL
    city_name_list = ["北京", "上海", "广州",
                       "成都", "杭州", "重庆", "西安", "苏州",
                       "武汉", "南京", "天津", "郑州", "长沙",
                       "东莞", "佛山", "青岛", "沈阳"]
    f = codecs.open('cityhref.txt', 'r')
    URLs = f.readlines()
    f.close()
    f=codecs.open('citySethref.txt', 'w')
    headers = {
                'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) '
                             'AppleWebKit/537.36 (KHTML, like Gecko) '
                             'Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'
            }
    for http in URLs:
        url=http.split()[1]
        url=url.strip('\n')
        city_name=http.split()[0];
        if city_name in city_name_list:
            html = requests.get(url=url, headers=headers)
            html.encoding = 'utf-8'
            html_news = html.text
            bs = BeautifulSoup(html_news, 'html.parser')
            try:
                pare=bs.find('form')
                f.write(city_name+'\t'+pare['action']+'?q=2021年人才落户&p=&s='+pare.input['value']+'\n')
                print(city_name + '人才落户拼接搜索url已完成')
            except Exception as e:
                print('other error')
    f.close()
def GetURLSub():
    city_name_list = ["北京", "上海", "广州",
                      "成都", "杭州", "重庆", "西安", "苏州",
                      "武汉", "南京", "天津", "郑州", "长沙",
                      "东莞", "佛山", "青岛", "沈阳"]       #热门城市
    f = codecs.open('cityhref.txt', 'r')
    URLs = f.readlines()                                   #从文件中读取所有的URL进行遍历
    f.close()
    f=codecs.open('citySubhref.txt', 'w')
    headers = {
                'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) '
                             'AppleWebKit/537.36 (KHTML, like Gecko) '
                             'Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'
            }
    for http in URLs:
        url=http.split()[1]             #文件中的内容分割后第二个是url   第一个是城市名称
        url=url.strip('\n')
        city_name=http.split()[0];
        if city_name in city_name_list:     #判断是否在热门城市
            html = requests.get(url=url, headers=headers)
            html.encoding = 'utf-8'
            html_news = html.text
            bs = BeautifulSoup(html_news, 'html.parser')
            try:
                pare=bs.find('form')
                print(city_name+'人才补贴拼接搜索url已完成')
                f.write(city_name+'\t'+pare['action']+'?q=2021年人才补贴&p=&s='+pare.input['value']+'\n')
            except Exception as e:
                print('other error')
    f.close()
def GetInformationSet():
    f=codecs.open('citySethref.txt ','r')
    URLs=f.readlines()
    f.close()
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) '
                      'AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'
    }
    for url in URLs:                #从URLs中分割url
        city_name=url.split()[0]
        url = url.split()[1]
        url=url.split('p=')
        url1=url[0]+'p={}'+url[1]
        if (os.path.exists('city\\'+city_name) == False):  #判断路径是否存在
            p = os.getcwd()                                #当前路径
            os.mkdir(p + '\\'+'city\\' + city_name)
        txt_name = 'city\\' +'{}\\'.format(city_name)+ '{}的人才落户政策.txt'.format(city_name);
        page=1                                              #爬取的页数
        f=codecs.open(txt_name,'w')
        for i in range(page):
            url=url1.format(i)
            print(url)
            html=requests.get(url,headers=headers)
            html.encoding = 'utf-8'
            html_news = html.text
            bs = BeautifulSoup(html_news, 'html.parser')
            datas=bs.find_all('h3',class_='c-title')    #找到所有的h3标签，在h3标签内提取href和标题
            for data in datas:
                url =data.find('a')['href']
                text=data.find('a').text
                f.write(text+' '+url+'\n')
        f.close()
def GetInformationSub():
    f=codecs.open('citySubhref.txt ','r')
    URLs=f.readlines()
    f.close()
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) '
                      'AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'
    }
    for url in URLs:
        city_name=url.split()[0]
        url = url.split()[1]
        url=url.split('p=')
        url1=url[0]+'p={}'+url[1]
        txt_name = 'city\\' +'{}\\'.format(city_name)+ '{}的人才补贴政策.txt'.format(city_name);
        page=1  #爬取的页数
        f=codecs.open(txt_name,'w')
        for i in range(page):
            url=url1.format(i)
            print(url)
            html=requests.get(url,headers=headers)
            html.encoding = 'utf-8'
            html_news = html.text
            bs = BeautifulSoup(html_news, 'html.parser')

            datas=bs.find_all('h3',class_='c-title')
            for data in datas:
                url =data.find('a')['href']
                text=data.find('a').text
                f.write((text+' '+url+'\n').replace(u'\u200b',u' '))
        f.close()

def GetMoreInformationSet(city_name):
    if (os.path.exists("city\\"+city_name+"\\人才落户政策")==False):
        p=os.getcwd()
        os.mkdir(p+'\\city\\'+city_name+"\\人才落户政策")
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) '
                      'AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'
    }
    txt_name ="city\\"+city_name+'\\{}的人才落户政策.txt'.format(city_name);
    f=codecs.open(txt_name,'r')
    Items=f.readlines()
    f.close()
    list=[]
    for item in Items:
            flag=re.findall(r"人才.*落户",item)
            if(len(flag)):
                name=item.split()[0]
                name=name.strip('-')
                url=item.split()[2]
                print(name+' '+url+'正在抓取')
                try:
                    html=requests.get(url,headers=headers)
                    html.encoding = 'UTF-8'
                    html_news = html.text
                    bs = BeautifulSoup(html_news, 'html.parser')
                    f=codecs.open("city\\"+city_name+'\\'+"人才落户政策\\"+name+'.txt','w')
                    pares=bs.find('div',class_='title')  #导语部分
                    pares1=bs.find('div',class_='content')     #正文部分
                    title=pares.find('h1')  #标题
                    time=pares.find('span',class_='time') #发布时间
                    #form=pares.find('span',id='writer')   #作者（！！！）
                    f.write(title.text+'\n')
                    f.write(time.text+'\n')
                    pa=pares.find('p')
                    if(pa!=None):
                        f.write(pa.text+'\n')                  #导语部分
                    pa1=pares1.find_all('p',class_='',style='')
                    for item in pa1:
                        str=re.findall(r'温馨提示',item.text.replace(u'\xa0', u' ').replace(u'\u27a4',u' ')+'\n')
                        if(len(str)==0):
                            for i in item.text.replace(u'\xa0', u' ').replace(u'\u27a4', u' ').split():
                                f.write(i+'\n')
                    f.close()
                except AttributeError:
                    print(name+'格式错误，停止抓取')
                    list.append(name)
                    continue
                print('抓取完成')
    for i in list:
        print(i)
def GetMoreInformationSub(city_name):
    if (os.path.exists("city\\"+city_name+"\\人才补贴政策")==False):
        p=os.getcwd()
        os.mkdir(p+'\\city\\'+city_name+"\\人才补贴政策")
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) '
                      'AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'
    }
    txt_name ='city\\'+city_name+"\\" + '{}的人才补贴政策.txt'.format(city_name);
    f=codecs.open(txt_name,'r')
    Items=f.readlines()
    f.close()
    list=[]
    for item in Items:
            flag=re.findall(r"人才.*补贴",item)
            if(len(flag)):
                name=item.split()[0]
                name=name.strip('-')
                url=item.split()[2]
                print(name+' '+url+'正在抓取')
                try:
                    html=requests.get(url,headers=headers)
                    html.encoding = 'UTF-8'
                    html_news = html.text
                    bs = BeautifulSoup(html_news, 'html.parser')
                    f=codecs.open("city\\"+city_name+'\\'+"人才补贴政策\\"+name+'.txt','w')
                    pares=bs.find('div',class_='title')  #导语部分
                    pares1=bs.find('div',class_='content')     #正文部分
                    title=pares.find('h1')  #标题
                    time=pares.find('span',class_='time') #发布时间
                    form=pares.find('span',id='writer')   #作者（！！！）
                    f.write(title.text+'\n')
                    f.write(time.text+'\n')
                    pa=pares.find('p')
                    if(pa!=None):
                        f.write(pa.text+'\n')                  #导语部分
                    pa1=pares1.find_all('p',class_='',style='')
                    for item in pa1:
                        str=re.findall(r'温馨提示',item.text.replace(u'\xa0', u' ').replace(u'\u27a4',u' ')+'\n')
                        if(len(str)==0):
                            for i in item.text.replace(u'\xa0', u' ').replace(u'\u27a4', u' ').split():
                                f.write(i+'\n')
                    f.close()
                except AttributeError:
                    print(name+'格式错误，停止抓取')
                    list.append(name)
                    continue
                print('抓取完成')
    for i in list:
        print(i)
def GetMoreInformationExp():
    city_name='武汉'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) '
                      'AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'
    }
    item='2021年武汉人才补贴政策(不断更新)- 武汉本地宝 http://wh.bendibao.com/live/2007813/113158.shtm'
    name = item.split()[0]
    name = name.strip('-')
    url = item.split()[2]
    print(name + ' ' + url + '正在抓取')
    try:
        html = requests.get(url, headers=headers)
        html.encoding = 'UTF-8'
        html_news = html.text
        bs = BeautifulSoup(html_news, 'html.parser')
        f = codecs.open("city\\" + city_name + '\\' + "人才补贴政策\\" + name + '.txt', 'w')
        pares1 = bs.find('article')  # 正文部分
        title = pares1.find('h1')  # 标题
        time = pares1.find('span', class_='public_time')  # 发布时间
        form = bs.find('span', id='writer')  # 作者（！！！）
        f.write(title.text + '\n')
        print(title.text)
        f.write(time.text + '\n')
        print(time.text)
        pa = pares1.find('p',class_='dao')
        if (pa != None):
            f.write(pa.text + '\n')  # 导语部分
            print(pa.text)
        pa1 = pares1.find_all('p', class_='', style='')
        for item in pa1:
            str = re.findall(r'温馨提示', item.text.replace(u'\xa0', u' ').replace(u'\u27a4', u' ') + '\n')
            if (len(str) == 0):
                for i in item.text.replace(u'\xa0', u' ').replace(u'\u27a4', u' ').split():
                    f.write(i + '\n')
        f.close()
    except AttributeError:
        print(name + '格式错误，停止抓取')
    print('抓取完成')
