# coding:utf-8
import urllib2
import json
import base64
import sys
reload(sys)
sys.setdefaultencoding('utf8')

# 1. 获取主页 html (1)
# 这个是西工院就业信息网
# url = "http://jy.51uns.com:8022/Pro_StudentEmploy/StudentJobFair/Zhaoping.aspx?WorkType=0"
def OpenPage(url):
    # import urllib2
    headers = {}
    req = urllib2.Request(url, headers=headers)
    f = urllib2.urlopen(req)
    # 页面不是 GBK, 因此就不用转码了
    return f.read()

def Test1():
    url = 'http://jy.51uns.com:8022/Pro_StudentEmploy/StudentJobFair/Zhaoping.aspx?WorkType=0'
    print OpenPage(url)
# Test1()

# 2. 获取主页 html (2)
# 抓包发现, 西工院的网站是用 ajax 搞的. 需要用以下请求获取到数据
# url = "http://jy.51uns.com:8022/Frame/Data/jdp.ashx?rnd=1528794488557&fn=GetZhaopinList&StartDate=2000-01-01&SearchKey=&InfoType=-1&CompanyAttr=&CompanyType=&Area=&City=&CompanyProvice=&Post=&Zhuanye=&XLkey=&Age=&start=0&limit=15&DateType=999&InfoState=1&WorkType=0&CompanyKey="
# 请求中包含 start 和 limit 字段用于控制页数
# 并且返回结果是 json 格式的字符串
def Test2():
    url = "http://jy.51uns.com:8022/Frame/Data/jdp.ashx?rnd=1528794488557&fn=GetZhaopinList&StartDate=2000-01-01&SearchKey=&InfoType=-1&CompanyAttr=&CompanyType=&Area=&City=&CompanyProvice=&Post=&Zhuanye=&XLkey=&Age=&start=0&limit=15&DateType=999&InfoState=1&WorkType=0&CompanyKey="
    print OpenPage(url)
# Test2()

# 3. 解析主页内容, 获取到详情页 url
# 使用抓包工具分析详情页 url 的特点, ajax + 前缀 + id
def ParseMainPage(page):
    data = json.loads(page)
    rows = data['rows']
    prefix = "http://jy.51uns.com:8022/Frame/Data/jdp.ashx?rnd=1528801284667&fn=GetOneZhaopin&StartDate=2000-01-01&JobId="
    return [prefix + row['Id'] for row in rows]

def Test3():
    url = "http://jy.51uns.com:8022/Frame/Data/jdp.ashx?rnd=1528794488557&fn=GetZhaopinList&StartDate=2000-01-01&SearchKey=&InfoType=-1&CompanyAttr=&CompanyType=&Area=&City=&CompanyProvice=&Post=&Zhuanye=&XLkey=&Age=&start=0&limit=15&DateType=999&InfoState=1&WorkType=0&CompanyKey="
    page = OpenPage(url)
    print ParseMainPage(page)
# Test3()

# 4. 解析详情页
# 通过 fiddler 分析返回的 JSON 结果格式
def ParseDetailPage(page):
    # 需要安装 BeautifulSoup
    # pip install BeautifulSoup4
    from bs4 import BeautifulSoup
    data = json.loads(page)
    if not data['Succeed']:
        print 'crawler error!'
        return
    data = data['Data']
    # 需要进行解析 Content 部分, 取出其中的描述信息
    detail = data['EmployContent']
    soup = BeautifulSoup(detail, 'html.parser')
    # 获取到 p 标签
    ret = soup.find_all("p")
    # get_text 真是个 NB 的方法~~
    content = "\n".join([item.get_text() for item in ret])
    return data['Id'], data['CompanyTitle'], data['WorkPositon'], content

def Test4():
    url = 'http://jy.51uns.com:8022/Frame/Data/jdp.ashx?rnd=1528801284667&fn=GetOneZhaopin&StartDate=2000-01-01&JobId=09768292dfdf448a9162eb1b186e789e'
    page = OpenPage(url)
    jobid, title, position, content = ParseDetailPage(page)
    print jobid
    print title
    print position
    print content
# Test4()

# 5. 结果写入到 mysql
# 需要安装 MySQL(过程略)
# 需要安装 MySQLdb
# yum install python-devel
# pip install mysql-python
# 需要先创建表 sql 语句提前准备好
# 然后在客户端里执行 source table.sql 即可完成导入
#
# 创建 table.sql. 这里创建数据库最好每个班都创建一个.
# create database if not exists TestPy ;
# use TestPy;
# drop table if exists CrawlerSchool;
# CREATE TABLE `CrawlerSchool` (
#       `id` text,
#       `company` text,
#       `work_position` text,
#       `content` text
# ) ENGINE=InnoDB DEFAULT CHARSET=utf8
#
# 创建 table.sh
# mysql -uroot -p < table.sql
def WriteData(data):
    import MySQLdb
    # import base64
    # 此处需要声明 charset, 否则插入时会出现错误. 另一方面, sql
    # 创建表也需要声明 charset
    db = MySQLdb.connect(host='localhost', user='root',
                         passwd='', db='TestPy', charset='utf8')
    cursor = db.cursor()
    # 如果不进行 base64 编码, 插入数据库就会出错(包含了特殊符号, 搞坏了sql)
    # 同时也是防止 SQL 注入攻击的一种方法.
    # content = data[3]
    content = base64.b64encode(data[3])
    sql = "insert into CrawlerSchool values('%s', '%s', '%s', '%s')" % (data[0], data[1], data[2], content)
    print "sql=" + sql
    # 为了保证数据库操作的原子性, 失败需要进行回滚
    try:
        cursor.execute(sql)
        db.commit()
    except Exception, e:
        db.rollback()
        print str(e)
    db.close()

def Test5():
    # test_data = ('123', '比特科技', '班主任小姐姐', "认真负责, 人美音甜")
    # 一旦正文中带有单引号, 就会插入出错~~
    test_data = ('123', '比特科技', '班主任小姐姐', "认真负责, '人'美音甜")
    WriteData(test_data)
# Test5()

# 6. 把以上流程串到一起
def Main():
    url = "http://jy.51uns.com:8022/Frame/Data/jdp.ashx?rnd=1528794488557&fn=GetZhaopinList&StartDate=2000-01-01&SearchKey=&InfoType=-1&CompanyAttr=&CompanyType=&Area=&City=&CompanyProvice=&Post=&Zhuanye=&XLkey=&Age=&start=0&limit=15&DateType=999&InfoState=1&WorkType=0&CompanyKey="
    main_page = OpenPage(url)
    url_list = ParseMainPage(main_page)
    for url in url_list:
        print "crawler url=" + url
        detail_page = OpenPage(url)
        data = ParseDetailPage(detail_page)
        # print data[1], data[3]
        WriteData(data)
    print "crawler done!"
Main()
