# -*- coding: utf-8 -*-
import pymysql

class mysqlOper:
    def __init__(self):
        self.conn = pymysql.connect(host='118.190.159.113', user='root', passwd='P<ZqshA5*', db='liuwei',charset='utf8')

    #从scrapy_aper批量获取需要下载的文件地址等信息
    def getDownUrl(self,num):
        cursor = self.conn.cursor(cursor=pymysql.cursors.DictCursor)
        effect_row = cursor.execute("select xkwid,downurl,papername from scrapy_paper  limit %s", (num,))
        result = cursor.fetchall()
        count = cursor.rowcount;
        data_dict = []
        for d in result:
            data_dict.append(d)
        print(data_dict)
        return data_dict;
    #xkwid 待爬虫的学科网id(id对应的文档有可能有多个)
    #npid 新生成的paperid
    #papername 试卷名称 即doc文件的名称
    def insertPaper(self,xkwid,npid,papername,picurls):
        cursor = self.conn.cursor(cursor=pymysql.cursors.DictCursor)
        effect_row = cursor.execute("insert into paper(id,papername,paperyear,papertype,paperarea,channel,picurls) " \
                                    "select %s,%s,paperyear,papertype,paperarea,channel,%s from scr_apper as a left join paper as b where a.xkwid=b.xkwid and a.xkwid=%s", (npid,papername,picurls,xkwid))
    def updateScrapyStats(self,xkwid):
        cursor = self.conn.cursor(cursor=pymysql.cursors.DictCursor)
        updatestats = "update scrapy_paper set stats=1 where xkwid=%s".format(xkwid);
        cursor.execute(updatestats);

#mysqlOper=mysqlOper()
#mysqlOper.getDownUrl(5);



