# -*- coding:utf-8 -*-
import urllib.request
import re
from sqlalchemy import Column, String,Integer, create_engine  # 导入包
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import datetime
import time
import json
import pymysql
pymysql.install_as_MySQLdb()
from zeep import Client
#获取原码

header={
      'Content-Type': 'application/x-www-form-urlencoded' ,
      'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0' ,
      'Cookie':'ASPSESSIONIDSSDDSRTS=PCHJODPBPMOOGOPKBPHIGENK',
      'Connection':'keep-alive',
      'Referer':'http://www.baidu.com',
      'Upgrade-Insecure-Requests':'1',
      'Origin':'http://search.51job.com'
        }
def get_content(page):
    
    url ='http://search.51job.com/list/080200,000000,0000,00,9,99,java,2,'+ str(page)+'.html'
    a = urllib.request.urlopen(url)#打开网址
    
    html = a.read().decode('gbk')#读取源代码并转为unicode
    return html

def get(html):
    reg = re.compile(r'class="t1 ">.*? <a target="_blank" title="(.*?)" href="(.*?)".*? <span class="t2"><a target="_blank" title="(.*?)" href="(.*?)".*?<span class="t3">(.*?)</span>.*?<span class="t4">(.*?)</span>.*? <span class="t5">(.*?)</span>',re.S)#匹配换行符
    items=re.findall(reg,html)
    return items
# 创建对象的基类:
Base = declarative_base()


# 定义一个类
class TEMP_JOB51(Base):  # 表的名字:
	__tablename__ = 'TMP_JOB_51'  # 表的结构:
	id = Column(Integer, primary_key=True,autoincrement=True)
	jname = Column(String(100))
	request_url = Column(String(100))
	company = Column(String(100))
	company_detail = Column(String(200))
	address = Column(String(100))
	salary = Column(String(100))
	update = Column(String(100))
	detail_address =Column(String(100))

# 初始化数据库连接:
db_engine = create_engine('mysql+mysqldb://root:mima@*.*.*.*:3306/51job?charset=utf8',encoding='gbk', echo=True)
s = datetime.datetime.now() + datetime.timedelta(-7)
b_week_time = s.strftime('%Y-%m-%d')
today = time.strftime('%Y-%m-%d', time.localtime(time.time()))

# 创建DBSession类型:
DBSession = sessionmaker(bind=db_engine)
session = DBSession()
#多页处理，下载到文件
for  j in range(1,2):
   
    print("正在爬取第"+str(j)+"页数据...")
    html=get_content(j)#调用获取网页原码
    #print(html)
    for i in get(html):
        a = urllib.request.urlopen(i[3])#打开网址
        html = a.read().decode('gbk')#读取源代码并转为unicode
        #print(html)
        reg = re.compile(r'class="fp">.*?<span class="label">公司地址：</span>(.*?)</p>',re.S)#匹配换行符
        items=re.findall(reg,html)
        if(len(items)<1):
            items=["无记录"]
        jobinfo = TEMP_JOB51(jname=i[0],request_url=i[1],
                             company=i[2],company_detail=i[3],
                             detail_address=items,
                             address=i[4],salary=i[5],
                             update=i[6])
        session.add(jobinfo)
        session.commit()
       
session.close()

