#! /user/bin/env python
# -*- coding:utf-8 -*-


# python2

import pymssql
import urllib, re
import urllib2
from bs4 import BeautifulSoup


class MSSQL:
    """
    对pymssql的简单封装
    pymssql库，该库到这里下载：http://www.lfd.uci.edu/~gohlke/pythonlibs/#pymssql
    使用该库时，需要在Sql Server Configuration Manager里面将TCP/IP协议开启

    用法：

    """

    def __init__(self, host, user, pwd, db):
        self.host = host
        self.user = user
        self.pwd = pwd
        self.db = db

    def __GetConnect(self):
        """
        得到连接信息
        返回: conn.cursor()
        """
        if not self.db:
            raise(NameError,"没有设置数据库信息")
        self.conn = pymssql.connect(host=self.host,user=self.user,password=self.pwd,database=self.db,charset="utf8")
        cur = self.conn.cursor()
        if not cur:
            raise(NameError,"连接数据库失败")
        else:
            return cur

    def ExecQuery(self,sql):
        """
        执行查询语句
        返回的是一个包含tuple的list，list的元素是记录行，tuple的元素是每行记录的字段

        调用示例：
                ms = MSSQL(host="localhost",user="sa",pwd="123456",db="PythonWeiboStatistics")
                resList = ms.ExecQuery("SELECT id,NickName FROM WeiBoUser")
                for (id,NickName) in resList:
                    print str(id),NickName
        """
        cur = self.__GetConnect()
        cur.execute(sql)
        resList = cur.fetchall()

        #查询完毕后必须关闭连接
        self.conn.close()
        return resList

    def ExecNonQuery(self,sql):
        """
        执行非查询语句

        调用示例：
            cur = self.__GetConnect()
            cur.execute(sql)
            self.conn.commit()
            self.conn.close()
        """
        cur = self.__GetConnect()
        cur.execute(sql)
        self.conn.commit()
        self.conn.close()

def getContentOrComment(url):

    headers = {
        'User-Agent': r'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      r'Chrome/45.0.2454.85 Safari/537.36 115Browser/6.0.3',
        'Referer': r'http://www.lagou.com/zhaopin/Python/?labelWords=label',
        'Connection': 'keep-alive',
        'Cookie':'cnsuuid=b30b4697-9274-c086-45fb-266e2d5d515a2144.412362344692_1516091100440; cn_1263394109_dplus=%7B%22distinct_id%22%3A%20%22160fe1252063aa-009e5e5eb285c3-6b1b1279-1fa400-160fe125207788%22%7D; UM_distinctid=160fe1252063aa-009e5e5eb285c3-6b1b1279-1fa400-160fe125207788; CNZZDATA1263394109=164018367-1516090584-null%7C1516090584; vjuids=-cbcce550.160fe125c88.0.c5086b62efd1d; vjlast=1516091104.1516091104.30',
        'Host':'www.chinanews.com',
        'Pragma':'no-cache'
    }
    try:
        req = urllib2.Request(url, headers=headers)
        a = urllib2.urlopen(req).read()
        msg = a.decode('gbk')
    except Exception as e:
        print(e)
        msg = None
    # print(msg)
    return msg

def main():
## ms = MSSQL(host="localhost",user="sa",pwd="123456",db="PythonWeiboStatistics")
## #返回的是一个包含tuple的list，list的元素是记录行，tuple的元素是每行记录的字段
## ms.ExecNonQuery("insert into WeiBoUser values('2','3')")

    # 先爬取数据 :中国新闻网 滚动新闻
    articlurl = "http://www.chinanews.com/scroll-news/news%d.html"
    page =0
    while True:
        raw = input("exit out：")
        if raw == "exit":
            break;
        page +=1
        Url = articlurl % page
        print(Url)
        # articlePage = getContentOrComment(Url)
        # if articlePage == None:
        #     print('None')
        #     continue
        # articles = BeautifulSoup(articlePage, 'html.parser')  # 解析
        # for string in articles.find_all(attrs='dd_bt'):
        #     # a = string.find("a").get_text().strip()
        #     href = string.find("a").get("href")
        #     detailPage = getContentOrComment("http:"+href)
        #     if detailPage == None:
        #         continue
        #     content = BeautifulSoup(detailPage, 'html.parser')
        #     # 标题
        #     h1 =content.find("h1")
        #     if(h1 is None):
        #         return;
        #     title= content.find("h1").get_text()
        #     print(title)
        #     # print(content.find(attrs="left_zw").get_text().strip())
        #     # 内容
        #     for c in content.find_all(attrs="left_zw"):
        #         print(c)
        #     content = content.find(attrs="left_zw")
        #     print(content)
        ms = MSSQL(host=".", user="sa", pwd="ok", db="wscms")
        resList = ms.ExecQuery("SELECT id,user_name FROM dt_manager")
        print(resList)
        for (id,user_name) in resList:
            print(str(user_name).decode("utf8"))

if __name__ == '__main__':
    main()