################################################################################
#                                 Spider Part                                  #
################################################################################
snippet ua "User-Agent"
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \\
	AppleWebKit/537.36(KHTML, like Gecko) \\
	Chrome/70.0.3538.110 Safari/537.36'
endsnippet

snippet wait "wait"
wait = WebDriverWait(browser, 10)
wait.until(EC.presence_of_element_located((${1:By.ID}, ${2:'id'})))$0
endsnippet

snippet bxp "browser.find_element_by_xpath"
browser.find_element_by_xpath('$1')$0
endsnippet

snippet getpage "get page try catch"
try:
	resp = session.post(url, data=data, headers=headers)
	if resp.status_code == 200:
		return resp.${1:json()}
except requests.ConnectionError as e:
	print('Error:', e.args)
$0
endsnippet

################################################################################
#                               Code Templation                                #
################################################################################
snippet spider_simple "简单单线程的 spider"
import requests
from pyquery import PyQuery as pq
from PIL import Image
import time
import pymysql
import os
from hashlib import md5


def download_check_code(session, login_url):
    """下载验证码图片, 用于肉眼识别.

    :session: session
    :returns: check_code

    """
    resp = session.get(login_url)
    # 获取验证码图片
    doc = pq(resp.text)
    #  TODO <18-12-28, Codergege> # 获取验证码 url
    check_code_url = ''

    print('验证码图片路径为:', check_code_url)
    resp_img = session.get(check_code_url)
    with open('check_code.png', 'wb') as fp:
        fp.write(resp_img.content)
    image = Image.open('check_code.png')
    image.show()
    # 输入验证码
    check_code = input('输入验证码:')
    return check_code


def get_token():
    """获取登录 token
    :returns: token

    """
    return str(int(round(time.time() * 1000)))


def login(session, login_url, login_post_url):
    """登录功能

    :session: session
    :login_url: 登录页 url
    :login_post_url: 登录的 post 请求 url
    :returns: None

    """
    #  TODO <18-12-28, Codergege> # 登录
    pass


def get_page(session, url):
    """获取页面(数据)

    :session: session
    :url: url
    :returns: 页面源码(json)

    """
    #  TODO <18-12-28, Codergege> # 使用 getpage snippets, 获取页面(json)
    pass


def parse_page(data):
    """解析 get_page(session, url) 返回的数据

    :data: 页面源码或 json 数据.
    :returns: yield result

    """
    #  TODO <18-12-28, Codergege> # 解析数据
    pass


def save_to_mysql(item, db, table, cursor, print_count=None):
    """将结果保存到数据库

    :item: item
    :db: 数据库
    :table: 表
    :cursor: 游标
    :print_count: 一般用于打印保存了多少条 item
    :returns: None

    """
    keys = ', '.join(item.keys())
    values = ', '.join(['%s'] * len(item))
    sql = 'insert into {table}({keys}) values({values})'.format(
        table=table, keys=keys, values=values)
    try:
        if cursor.execute(sql, tuple(item.values())):
            if print_count:
                print(str(print_count), '- Saved to mysql sucessfully!')
            db.commit()
    except Exception as e:
        print('Failed:', e.args)
        db.rollback()


def save_image(session, item):
    """保存图片, title 作为目录名, 图片内容的 md5 值作为图片名, 防止重复.

    :item: 图片信息字典, 包含 title 和 image_url
    :returns: None

    """
    if not os.path.exists('Downloads/' + item.get('title')):
        os.mkdir('Downloads/' + item.get('title'))
    try:
        resp_img = session.get(item.get('image_url'))
        if resp_img.status_code == 200:
            file_path = 'Downloads/{0}/{1}.{2}'\
                .format(
                    item.get('title'), md5(resp_img.content).hexdigest(), 'jpg'
                )
            if not os.path.exists(file_path):
                with open(file_path, 'wb') as f:
                    f.write(resp_img.content)
                print(file_path, '保存成功!')
            else:
                print('Already downloaded', file_path)
    except requests.ConnectionError as e:
        print('Failed to save image:', e.args)


def main():
    """爬虫入口函数
    :returns: TODO

    """
    # 连接数据库
    host = 'localhost'
    user = 'root'
    password = 'root'
    port = 3306
    db_name = 'demo'
    db = pymysql.connect(host=host, user=user,
                         password=password, port=port, db=db_name)
    cursor = db.cursor()
    session = requests.session()
    #  TODO <18-12-28, Codergege> # login, get_page, parse_page, save
    db.close()


if __name__ == "__main__":
    main()
endsnippet
snippet urllibpost "使用 urllib 库的简单爬虫程序模板"
"""
使用 urllib 库的简单爬虫程序. 使用 post: $1
"""
import urllib.request
import urllib.parse

# url
post_url = '${2:todo}'

# 构建 post 表单数据
form_data = {
    # todo
	${3:todo}
}
# 构建 headers
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
        AppleWebKit/537.36(KHTML, like Gecko) \
        Chrome/70.0.3538.110 Safari/537.36'
}
# 构建请求 request
request = urllib.request.Request(url = post_url, headers = headers)
# 处理 form_data
form_data = urllib.parse.urlencode(form_data).encode()
# 发送请求
response = urllib.request.urlopen(request, data = form_data)

# 解析
${0:todo}
endsnippet

snippet urllibget "使用 urllib 库的简单爬虫程序模板"
"""
使用 urllib 库的简单爬虫程序. 使用 get: $1
"""
import urllib.request
import urllib.parse

# url
url = '${2:todo}'
# 构建 get 请求参数
# 如果有中文, 处理一下, 例如: 'wd': urllib.parse.quote('美女')
data = {
	${3:todo}
}
# 处理 get 请求参数
query_string = urllib.parse.urlencode(data)
# 拼装 url
url += '?'
url += query_string
# 发送请求
response = urllib.request.urlopen(url = url)

# 解析
${0:todo}
endsnippet
