import requests
from lxml import etree
from datetime import datetime
from datetime import timedelta
import pymysql
import traceback

headers = {
    "User-Agent":
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
}


# sql生成
def makesql(t, f, n, u, time, s):
    time = str(time).replace('.', '-').replace('/', '-')
    sql = "INSERT INTO behc_zixun (type, `from`, name, url, `time`, sort) VALUES ('"
    sql += t + "', '" + f + "', '" + n + "', '"
    sql += u + "', '" + time + "', '" + s + "');"
    return sql


# sql保存
def savesql(sql):
    con = None
    cur = None
    result = '执行成功'
    try:
        con = pymysql.connect(host="192.168.155.141",
                              port=3306,
                              user="root",
                              passwd="fD@LCVby2WJI",
                              database="behc_python",
                              charset='utf8')
        if con:
            print("链接成功")
            # 获取操作数据库的游标
            cur = con.cursor()
            # 执行sql语句
            cur.execute(sql)
            con.commit()
        else:
            print("链接失败")
    except Exception as e:
        result = e
    finally:
        if con:
            if cur:
                cur.close()
            con.close()
    print(result)


# 检查是否继续爬取
def checksql(t, f, n, u, time, s):
    sql = "SELECT * FROM behc_zixun WHERE type = '" + t + "' AND `from` = '" + f
    sql += "' AND name = '" + n + "' AND url = '" + u
    sql += "' AND sort = '" + s + "'"
    #     print(sql)
    con = None
    cur = None
    try:
        con = pymysql.connect(host="192.168.155.141",
                              port=3306,
                              user="root",
                              passwd="fD@LCVby2WJI",
                              database="behc_python",
                              charset='utf8')
        if con:
            print("链接成功")
            # 获取操作数据库的游标
            cur = con.cursor()
            # 执行sql语句
            cur.execute(sql)
            datas = cur.fetchall()
        else:
            print("链接失败")
    except Exception as e:
        print(e)
    finally:
        if con:
            if cur:
                cur.close()
            con.close()
    return datas


# 主程序
def zhanfastart():
    print(datetime.now(), '开始')
    i = 0
    try:
        i += zgbdthyxhzc()
    except Exception as e:
        print(e, '中国半导体行业协会政策')
        print(traceback.print_exc())
    try:
        i += zgbdthyxhcy1()
    except Exception as e:
        print(e, '中国半导体行业协会产业-国内新闻')
        print(traceback.print_exc())
    try:
        i += zgbdthyxhcy2()
    except Exception as e:
        print(e, '中国半导体行业协会产业-国际新闻')
        print(traceback.print_exc())
    try:
        i += cqbdtgc()
    except Exception as e:
        print(e, '全球半导体观察')
        print(traceback.print_exc())
    try:
        i += bdtcygc()
    except Exception as e:
        print(e, '半导体产业观察')
        print(traceback.print_exc())
    try:
        i += icdzw()
    except Exception as e:
        print(e, '21ic电子网')
        print(traceback.print_exc())
    try:
        i += zgbdthychjs1()
    except Exception as e:
        print(e, '中国半导体行业协会技术-新品纵横')
        print(traceback.print_exc())
    try:
        i += zgbdthychjs2()
    except Exception as e:
        print(e, '中国半导体行业协会技术-技术新闻')
        print(traceback.print_exc())
    try:
        i += dzfsy()
    except Exception as e:
        print(e, '电子发烧友')
        print(traceback.print_exc())
    print(datetime.now(), '结束')
    log = "更新" + str(i) + "条入库behc_zixun"
    savesql('INSERT INTO behc_log (name, log, update_time) VALUES ("9家资讯", "' + log + '", "' + datetime.strftime(
        datetime.now(), '%Y-%m-%d %H:%M:%S') + '")')


def zgbdthyxhzc():
    num = 0
    all = 1
    page = all
    l = 0
    datatype = '政策类'
    datafrom = '中国半导体行业协会'
    datasort = '政策法规'
    while page <= all:
        url = 'http://www.csia.net.cn/Article/ShowClass.asp?ClassID=79&page=' + str(
            page)
        r = requests.get(url=url, headers=headers)
        r.encoding = 'GBK'
        root = etree.HTML(r.text)
        content_list = root.xpath('//*[@id="ArticleBody"]/ul/li/p')
        for c in content_list:
            try:
                href = 'http://www.csia.net.cn' + c.xpath('./a/@href')[0]
                title = c.xpath('./a/span/text()')[0]
                time = c.xpath('./span/text()')[1][1:-1]
                if len(time) == 0:
                    time = c.xpath('./span/span/text()')[0]
                # if len(str(time)) == 10 and '.' in str(time):
                #     time = str(time).replace('.', '-')
                # if len(str(time)) > 10:
                #     time = str(time)[0, 10]
                if checksql(datatype, datafrom, title, href, time, datasort):
                    print(datatype, datafrom, title, href, time, datasort,
                          '已存在')
                    page = page + all
                else:
                    sql = makesql(datatype, datafrom, title, href, time,
                                  datasort)
                    #             print(sql)
                    savesql(sql)
                    num += 1
                l += 1
            except Exception as e:
                print(e)
        all = int(root.xpath('//div[@class="showpage"]/form/b/text()')[2][1:])
        page += 1
    print(datetime.now(), l)
    return num


def zgbdthyxhcy1():
    num = 0
    all = 1
    page = all
    l = 0
    datatype = '产业类'
    datafrom = '中国半导体行业协会'
    datasort = '国内新闻'
    while page <= all:
        url = 'http://www.csia.net.cn/Article/ShowClass.asp?ClassID=7&page=' + str(
            page)
        r = requests.get(url=url, headers=headers)
        r.encoding = 'GBK'
        root = etree.HTML(r.text)
        content_list = root.xpath('//*[@id="ArticleBody"]/ul/li/p')
        for c in content_list:
            try:
                href = 'http://www.csia.net.cn' + c.xpath('./a/@href')[0]
                title = c.xpath('./a/span/text()')[0]
                time = c.xpath('./span/text()')[1][1:-1]
                if len(time) == 0:
                    time = c.xpath('./span/span/text()')[0]
                if checksql(datatype, datafrom, title, href, time, datasort):
                    print(datatype, datafrom, title, href, time, datasort,
                          '已存在')
                    page = page + all
                else:
                    sql = makesql(datatype, datafrom, title, href, time,
                                  datasort)
                    #             print(sql)
                    savesql(sql)
                    num += 1
                l += 1
            except Exception as e:
                print(e)
        all = int(root.xpath('//div[@class="showpage"]/form/b/text()')[2][1:])
        page += 1
    print(datetime.now(), l)
    return num


def zgbdthyxhcy2():
    num = 0
    all = 1
    page = all
    l = 0
    datatype = '产业类'
    datafrom = '中国半导体行业协会'
    datasort = '国际新闻'
    while page <= all:
        url = 'http://www.csia.net.cn/Article/ShowClass.asp?ClassID=8&page=' + str(
            page)
        r = requests.get(url=url, headers=headers)
        r.encoding = 'GBK'
        root = etree.HTML(r.text)
        content_list = root.xpath('//*[@id="ArticleBody"]/ul/li/p')
        for c in content_list:
            try:
                href = 'http://www.csia.net.cn' + c.xpath('./a/@href')[0]
                title = c.xpath('./a/span/text()')[0]
                time = c.xpath('./span/text()')[1][1:-1]
                if len(time) == 0:
                    time = c.xpath('./span/span/text()')[0]
                if checksql(datatype, datafrom, title, href, time, datasort):
                    print(datatype, datafrom, title, href, time, datasort,
                          '已存在')
                    page = page + all
                else:
                    sql = makesql(datatype, datafrom, title, href, time,
                                  datasort)
                    #             print(sql)
                    savesql(sql)
                    num += 1
                l += 1
            except Exception as e:
                print(e)
        all = int(root.xpath('//div[@class="showpage"]/form/b/text()')[2][1:])
        page += 1
    print(datetime.now(), l)
    return num


def cqbdtgc():
    num = 0
    all = 1
    page = all
    l = 0
    datatype = '产业类'
    datafrom = '全球半导体观察'
    # datasort = '政策法规'
    while page <= all:
        url = 'https://www.dramx.com/Info/' + str(page) + '.html'
        r = requests.get(url=url, headers=headers)
        r.encoding = 'utf-8'
        root = etree.HTML(r.text)
        content_list = root.xpath('//*[@id="divArticleList"]/div/div[2]')
        for c in content_list:
            try:
                href = 'https://www.dramx.com' + c.xpath('./h3/a/@href')[0]
                title = c.xpath('./h3/a/text()')[0]
                time = c.xpath('./p/text()')[0]
                if time[-2:] == '分享':
                    time = time.split('小时')
                    time = str(datetime.now())[:10]
                sort1 = c.xpath('./div/p[1]/a/text()')
                sort2 = c.xpath('./div/p[2]/a/text()')[0]
                for s in sort1:
                    sort2 += '，' + s.replace(' ', '')
                if checksql(datatype, datafrom, title, href, time, sort2):
                    print(datatype, datafrom, title, href, time, sort2, '已存在')
                    page = all + page
                else:
                    sql = makesql(datatype, datafrom, title, href, time, sort2)
                    #             print(sql)
                    savesql(sql)
                    num += 1
                l += 1
            except Exception as e:
                print(e)
        all = int(root.xpath('//*[@id="divArticleList"]/div/a/text()')[-2])
        print(page, all)
        page += 1
    print(datetime.now(), l)
    return num


def bdtcygc():
    num = 0
    all = 1
    page = all
    l = 0
    datatype = '产业类'
    datafrom = '半导体行业观察'
    while page <= 100:
        url = 'http://www.semiinsights.com/index.php?m=content&c=index&a=showmore&page=' + \
            str(page)
        r = requests.get(url=url, headers=headers)
        r.encoding = 'utf-8'
        root = etree.HTML(r.text)
        content_list = root.xpath('/html/body/li/div[2]')
        for c in content_list:
            try:
                href = c.xpath('./h5/a/@href')[0]
                title = c.xpath('./h5/a/text()')[0]
                time = c.xpath('./div/span/text()')[0]
                datasort = c.xpath('./div/div/span[1]/a/text()')[0]
                time = str(time).replace('.', '-')
                if checksql(datatype, datafrom, title, href, time, datasort):
                    print(datatype, datafrom, title, href, time, datasort,
                          '已存在')
                    page = page + all
                else:
                    sql = makesql(datatype, datafrom, title, href, time,
                                  datasort)
                    #             print(sql)
                    savesql(sql)
                    num += 1
                l += 1
            except Exception as e:
                print(e)
    #     all = int(root.xpath('//ul[@class="pagination-list"]/li/a/text()')[-2])
    #     print(all)
        page += 1
    print(datetime.now(), l)
    return num


def icdzw():
    num = 0
    all = 1
    page = all
    l = 0
    datatype = '产业类'
    datafrom = '21ic电子网'
    while page <= all:
        url = 'https://www.21ic.com/semi/news/index_' + str(page)
        r = requests.get(url=url, headers=headers)
        r.encoding = 'utf-8'
        root = etree.HTML(r.text)
        content_list = root.xpath('//div[@class="newitem revision2020"]/ul/li')
        for c in content_list:
            try:
                href = 'https://www.21ic.com' + c.xpath('./h2/a/@href')[0]
                title = c.xpath('./h2/a/text()')[0]
                title = title.replace("'", '"')
                datasort = ''
                sort = c.xpath('./p[2]/span[2]/a/text()')
                for s in sort:
                    datasort += s + '，'
                datasort = datasort[:-1]
                time = str(datetime.now())[:10]
                if checksql(datatype, datafrom, title, href, time, datasort):
                    print(datatype, datafrom, title, href, time, datasort,
                          '已存在')
                    page = page + all
                else:
                    sql = makesql(datatype, datafrom, title, href, time,
                                  datasort)
                    # print(sql)
                    savesql(sql)
                    num += 1
                l += 1
            except Exception as e:
                print(e)
        all = int(root.xpath('//div[@class="epage"]/a/@data-page')[-1]) + 1
        print(page, all)
        page += 1
    print(datetime.now(), l)
    return num


def zgbdthychjs1():
    num = 0
    all = 1
    page = all
    l = 0
    datatype = '技术类'
    datafrom = '中国半导体行业协会'
    datasort = '新品纵横'
    while page <= all:
        url = 'http://www.csia.net.cn/Article/ShowClass.asp?ClassID=32&page=' + str(
            page)
        r = requests.get(url=url, headers=headers)
        r.encoding = 'GBK'
        root = etree.HTML(r.text)
        content_list = root.xpath('//*[@id="ArticleBody"]/ul/li/p')
        for c in content_list:
            try:
                href = 'http://www.csia.net.cn' + c.xpath('./a/@href')[0]
                title = c.xpath('./a/span/text()')[0]
                time = c.xpath('./span/text()')[1][1:-1]
                if len(time) == 0:
                    time = c.xpath('./span/span/text()')[0]
                if checksql(datatype, datafrom, title, href, time, datasort):
                    print(datatype, datafrom, title, href, time, datasort,
                          '已存在')
                    page = page + all
                else:
                    sql = makesql(datatype, datafrom, title, href, time,
                                  datasort)
                    #             print(sql)
                    savesql(sql)
                    num += 1
                l += 1
            except Exception as e:
                print(e)
        all = int(root.xpath('//div[@class="showpage"]/form/b/text()')[2][1:])
        page += 1
    print(datetime.now(), l)
    return num


def zgbdthychjs2():
    num = 0
    all = 1
    page = all
    l = 0
    datatype = '技术类'
    datafrom = '中国半导体行业协会'
    datasort = '技术新闻'
    while page <= all:
        url = 'http://www.csia.net.cn/Article/ShowClass.asp?ClassID=33&page=' + str(
            page)
        r = requests.get(url=url, headers=headers)
        r.encoding = 'GBK'
        root = etree.HTML(r.text)
        content_list = root.xpath('//*[@id="ArticleBody"]/ul/li/p')
        for c in content_list:
            try:
                href = 'http://www.csia.net.cn' + c.xpath('./a/@href')[0]
                title = c.xpath('./a/span/text()')[0]
                time = c.xpath('./span/text()')[1][1:-1]
                if len(time) == 0:
                    time = c.xpath('./span/span/text()')[0]
                if checksql(datatype, datafrom, title, href, time, datasort):
                    print(datatype, datafrom, title, href, time, datasort,
                          '已存在')
                    page = page + all
                else:
                    sql = makesql(datatype, datafrom, title, href, time,
                                  datasort)
                    #             print(sql)
                    savesql(sql)
                    num += 1
                l += 1
            except Exception as e:
                print(e)
        all = int(root.xpath('//div[@class="showpage"]/form/b/text()')[2][1:])
        page += 1
    print(datetime.now(), l)
    return num


def dzfsy():
    num = 0
    all = 1
    page = all
    l = 0
    datatype = '技术类'
    datafrom = '电子发烧友'
    p = []
    while page <= all:
        url = 'http://www.elecfans.com/tech-148-' + str(page) + '.html'
        r = requests.get(url=url, headers=headers)
        r.encoding = 'utf-8'
        root = etree.HTML(r.text)
        content_list = root.xpath('/html/body/div[4]/div[2]/div[2]/ul/li')
        p2 = len(content_list)
        for c in content_list:
            try:
                href = c.xpath('./div/h3/a/@href')[0]
                title = c.xpath('./div/h3/a/text()')[0]
                time = str(datetime.now())[:10]
                datasort = ''
                sort = c.xpath('./div/div/span[2]/a/text()')
                for i in sort:
                    datasort += i + '，'
                datasort = datasort[:-1]
                if checksql(datatype, datafrom, title, href, time, datasort):
                    print(datatype, datafrom, title, href, time, datasort,
                          '已存在')
                    page = page + all
                else:
                    sql = makesql(datatype, datafrom, title, href, time,
                                  datasort)
                    # print(sql)
                    savesql(sql)
                    num += 1
                l += 1
            except Exception as e:
                print(e)
        all = int(
            root.xpath('//*[@id="user_page_news"]/a/@href')[-1].replace(
                '/tech-148-', '').replace('.html', ''))
        print(page, all)
        page += 1
    print(datetime.now(), l)
    return num


zhanfastart()