##shangbiaowang
import requests
from scrapy import Selector
import urllib.parse
import re
import MySQLdb
import pymongo
import time
import random

from tools import user_agent_list

client = pymongo.MongoClient('localhost', 27017)
shangbiao = client['shangbiao']
datas= shangbiao['datas']


file1='F:\\顺企网数据整理\\114名录excel\\名录省市\\江苏\\yixing3'
f1 = open(file1, 'rb')
companys1 = []
for info in f1.readlines():
    i = info.decode('utf8', 'ignore').replace('\r', '').replace('\n', '')
    companys1.append(i)
# url = 'http://www.tm.cn/search?sort=&TmType=&value=%E6%97%A0%E9%94%A1%E6%B1%9F%E5%8D%97%E7%94%B5%E7%BC%86%E6%9C%89%E9%99%90%E5%85%AC%E5%8F%B8&searchType=3&hotype=&status=&words='
for m in companys1:
    time.sleep(random.randint(10,15))
    print('正在查询%s'%m)
    headers = {
        'User-Agent': 'User-Agent:Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}
    url_0 = 'https://www.tm.cn/search?sort=&TmType=&value=' + urllib.parse.quote(m) + '&searchType=3&hotype=&status=&words='
    resp = requests.get(url_0,timeout = 300,headers=headers)
    response_0 = Selector(text=resp.text)
    nums = response_0.css(
        'body > div.main > div.mb40.clearfix.buss_mark_box.backfff > div.clearfix.rank_box > span > em:nth-child(2)::text').extract()
    if nums == ['0']:
        print('%s无商标信息' % m)
    else:
        info_z = {}
        info_z['公司名称'] = m
        total_count = response_0.css('span.total_count::text').extract()
        if total_count == []:
            print('只有1页数据')
            img = response_0.css(
                'body > div.main > div.mb40.clearfix.buss_mark_box.backfff > ul > li:nth-child(1) > div > a > img::attr(src)').extract()
            images = []
            for pic in img:
                pics = 'http://www.tm.cn' + pic
                images.append(pics)
            title = response_0.css(
                'ul > li > div > div.left.searanswer_main > h3 > span.thide.left.title::text').extract()
            state = response_0.css(
                'ul > li > div > div.left.searanswer_main > h3 > span:nth-child(2)::text').extract()
            types = response_0.css('ul > li > div > div.left.searanswer_main > p > span.c_blue::text').extract()
            user = response_0.css(
                'ul > li > div > div.left.searanswer_main > ol > li:nth-child(1)::text').extract()
            codes = response_0.css(
                'ul > li > div > div.left.searanswer_main > ol > li:nth-child(2)::text').extract()
            info = {}
            items = []
            for a, b, c, d, e, f in zip(title, state, types, user, codes, images):
                info['title'] = a
                info['state'] = b
                info['type'] = c
                info['申请人'] = d
                info['注册号'] = e
                info['images'] = f
                # print(info)
                items.append(info.copy())
                #       print(items)
            info_z['商标信息'] = items

        else:
            for n in total_count:
                total = re.findall(u'[0-9]', n)
                m_0 = ''.join(total)
            pages = int(m_0)
            print('有%s页数据' % m_0)
            items = []
            for i in range(1, pages + 1):
                print('正在解析第%s页信息' % i)
                url_1 = 'https://www.tm.cn/search?sort=&TmType=&value=' + urllib.parse.quote(
                    m) + '&searchType=3&page=%s' % i
                resp = requests.get(url=url_1,timeout = 300,headers=headers)
                response = Selector(text=resp.text)
                img = response.css(
                    'body > div.main > div.mb40.clearfix.buss_mark_box.backfff > ul > li> div > a > img::attr(src)').extract()
                images = []
                for pic in img:
                    pics = 'http://www.tm.cn' + pic
                    images.append(pics)
                title = response.css(
                    'ul > li > div > div.left.searanswer_main > h3 > span.thide.left.title::text').extract()
                state = response.css(
                    'ul > li > div > div.left.searanswer_main > h3 > span:nth-child(2)::text').extract()
                types = response.css(
                    'ul > li > div > div.left.searanswer_main > p > span.c_blue::text').extract()
                users = response.css(
                    'ul > li > div > div.left.searanswer_main > ol > li:nth-child(1)::text').extract()
                codes = response.css(
                    'ul > li > div > div.left.searanswer_main > ol > li:nth-child(2)::text').extract()
                info = {}
                for a, b, c, d, e, f in zip(title, state, types, users, codes, images):
                    info['title'] = a
                    info['state'] = b
                    info['type'] = c
                    info['申请人'] = d
                    info['注册号'] = e
                    info['images'] = f
                    # print(info)
                    items.append(info.copy())
                    #       print(items)
                info_z['商标信息'] = items
        datas.insert(info_z)
