code
stringlengths 20
13.2k
| label
stringlengths 21
6.26k
|
---|---|
1 from django.http import HttpResponse, Http404
2 from django.shortcuts import render
3 import datetime
4 from django.http import HttpResponseRedirect
5 from django.core.mail import send_mail
6 from django.contrib.auth.views import login as loginview
7 from registration.backends.simple import views
8 from django.contrib.auth import authenticate, get_user_model, login
9 from registration import signals
10 from scrapyproject.views import mongodb_user_creation, linux_user_creation
11 from scrapyproject.scrapy_packages import settings
12 try:
13 # Python 3
14 from urllib.parse import urlparse
15 except ImportError:
16 # Python 2
17 from urlparse import urlparse
18
19 try:
20 from urllib.parse import quote
21 except:
22 from urllib import quote
23
24 User = get_user_model()
25
26
27 class MyRegistrationView(views.RegistrationView):
28 def register(self, form):
29 new_user = form.save()
30 new_user = authenticate(
31 username=getattr(new_user, User.USERNAME_FIELD),
32 password=form.cleaned_data['password1']
33 )
34
35 #perform additional account creation here (MongoDB, local Unix accounts, etc.)
36
37 mongodb_user_creation(getattr(new_user, User.USERNAME_FIELD), form.cleaned_data['password1'])
38
39 if settings.LINUX_USER_CREATION_ENABLED:
40 try:
41 linux_user_creation(getattr(new_user, User.USERNAME_FIELD), form.cleaned_data['password1'])
42 except:
43 pass
44
45 login(self.request, new_user)
46 signals.user_registered.send(sender=self.__class__,
47 user=new_user,
48 request=self.request)
49 return new_user
50
51 def get_success_url(self, user):
52 return "/project"
53
54
55 def custom_login(request):
56 if request.user.is_authenticated():
57 return HttpResponseRedirect('/project')
58 else:
59 return loginview(request)
60
61
62 def custom_register(request):
63 if request.user.is_authenticated():
64 return HttpResponseRedirect('/project')
65 else:
66 register = MyRegistrationView.as_view()
67 return register(request)
| 21 - warning: bare-except
42 - warning: bare-except
51 - warning: unused-argument
56 - refactor: no-else-return
63 - refactor: no-else-return
1 - warning: unused-import
1 - warning: unused-import
2 - warning: unused-import
3 - warning: unused-import
5 - warning: unused-import
14 - warning: unused-import
20 - warning: unused-import
|
1 # -*- coding: utf-8 -*-
2 """
3 -------------------------------------------------
4 File Name: models.py
5 Description :
6 Author : JHao
7 date: 2016/11/18
8 -------------------------------------------------
9 Change Activity:
10 2016/11/18:
11 -------------------------------------------------
12 """
13
14 from django.db import models
15 from django.conf import settings
16
17
18 # Create your models here.
19
20 class Tag(models.Model):
21 tag_name = models.CharField('标签名称', max_length=30)
22
23 def __str__(self):
24 return self.tag_name
25
26
27 class Article(models.Model):
28 title = models.CharField(max_length=200) # 博客标题
29 category = models.ForeignKey('Category', verbose_name='文章类型', on_delete=models.CASCADE)
30 date_time = models.DateField(auto_now_add=True) # 博客日期
31 content = models.TextField(blank=True, null=True) # 文章正文
32 digest = models.TextField(blank=True, null=True) # 文章摘要
33 author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name='作者', on_delete=models.CASCADE)
34 view = models.BigIntegerField(default=0) # 阅读数
35 comment = models.BigIntegerField(default=0) # 评论数
36 picture = models.CharField(max_length=200) # 标题图片地址
37 tag = models.ManyToManyField(Tag) # 标签
38
39 def __str__(self):
40 return self.title
41
42 def sourceUrl(self):
43 source_url = settings.HOST + '/blog/detail/{id}'.format(id=self.pk)
44 return source_url # 给网易云跟帖使用
45
46 def viewed(self):
47 """
48 增加阅读数
49 :return:
50 """
51 self.view += 1
52 self.save(update_fields=['view'])
53
54 def commenced(self):
55 """
56 增加评论数
57 :return:
58 """
59 self.comment += 1
60 self.save(update_fields=['comment'])
61
62 class Meta: # 按时间降序
63 ordering = ['-date_time']
64
65
66 class Category(models.Model):
67 name = models.CharField('文章类型', max_length=30)
68 created_time = models.DateTimeField('创建时间', auto_now_add=True)
69 last_mod_time = models.DateTimeField('修改时间', auto_now=True)
70
71 class Meta:
72 ordering = ['name']
73 verbose_name = "文章类型"
74 verbose_name_plural = verbose_name
75
76 def __str__(self):
77 return self.name
78
79
80 class Comment(models.Model):
81 title = models.CharField("标题", max_length=100)
82 source_id = models.CharField('文章id或source名称', max_length=25)
83 create_time = models.DateTimeField('评论时间', auto_now=True)
84 user_name = models.CharField('评论用户', max_length=25)
85 url = models.CharField('链接', max_length=100)
86 comment = models.CharField('评论内容', max_length=500)
| 20 - refactor: too-few-public-methods
62 - refactor: too-few-public-methods
71 - refactor: too-few-public-methods
66 - refactor: too-few-public-methods
80 - refactor: too-few-public-methods
|
1 from django.contrib.syndication.views import Feed
2 from django.template.defaultfilters import truncatewords
3 from django.urls import reverse_lazy
4 from .models import Post
5
6
7
8 class LatestPostsFeed(Feed):
9 title ='My Blog'
10 link=reverse_lazy('post_list')
11 description = 'new post of my Blog.'
12
13
14 def items(self):
15 return Post.published.all()[:5]
16
17 def item_title(self, item):
18 return super().item_title(item)
19
20
21 def item_description(self, item):
22 return truncatewords(item.body,30)
| 4 - error: relative-beyond-top-level
17 - warning: useless-parent-delegation
|
1 from django.conf.urls import include, url
2 from . import views
3
4 urlpatterns = [
5 url(r'^$', views.main_page, name="mainpage"),
6 url(r'^create/$', views.create_new, name="newproject"),
7 url(r'^manage/(?P<projectname>[\w]+)/', views.manage_project, name="manageproject"),
8 url(r'^delete/(?P<projectname>[\w]+)/', views.delete_project, name="deleteproject"),
9 url(r'^createitem/(?P<projectname>[\w]+)/', views.create_item, name="newitem"),
10 url(r'^edititems/(?P<projectname>[\w]+)/', views.itemslist, name="listitems"),
11 url(r'^deleteitem/(?P<projectname>[\w]+)/(?P<itemname>[\w]+)/', views.deleteitem, name="deleteitem"),
12 url(r'^edititem/(?P<projectname>[\w]+)/(?P<itemname>[\w]+)/', views.edititem, name="edititem"),
13 url(r'^addpipeline/(?P<projectname>[\w]+)/', views.addpipeline, name="addpipeline"),
14 url(r'^editpipelines/(?P<projectname>[\w]+)/', views.pipelinelist, name="listpipelines"),
15 url(r'^editpipeline/(?P<projectname>[\w]+)/(?P<pipelinename>[\w]+)/', views.editpipeline, name="editpipeline"),
16 url(r'^deletepipeline/(?P<projectname>[\w]+)/(?P<pipelinename>[\w]+)/', views.deletepipeline, name="deletepipeline"),
17 url(r'^linkgenerator/(?P<projectname>[\w]+)/', views.linkgenerator, name="linkgenerator"),
18 url(r'^scraper/(?P<projectname>[\w]+)/', views.scraper, name="scraper"),
19 url(r'^deploy/(?P<projectname>[\w]+)/', views.deploy, name='deploy'),
20 url(r'^changepassword/$', views.change_password, name="changepass"),
21 url(r'^deploystatus/(?P<projectname>[\w]+)/', views.deployment_status, name="deploystatus"),
22 url(r'^startproject/(?P<projectname>[\w]+)/(?P<worker>[\w]+)/', views.start_project, name="startproject"),
23 url(r'^stopproject/(?P<projectname>[\w]+)/(?P<worker>[\w]+)/', views.stop_project, name="stopproject"),
24 url(r'^allworkerstatus/(?P<projectname>[\w]+)/', views.get_project_status_from_all_workers, name="allworkerstatus"),
25 url(r'^getlog/(?P<projectname>[\w]+)/(?P<worker>[\w]+)/', views.see_log_file, name="seelogfile"),
26 url(r'^allprojectstatus/', views.gather_status_for_all_projects, name="allprojectstatus"),
27 url(r'^editsettings/(?P<settingtype>[\w]+)/(?P<projectname>[\w]+)/', views.editsettings, name="editsettings"),
28 url(r'^startonall/(?P<projectname>[\w]+)/', views.start_project_on_all, name="startonall"),
29 url(r'^stoponall/(?P<projectname>[\w]+)/', views.stop_project_on_all, name="stoponall"),
30 url(r'^globalstatus/', views.get_global_system_status, name="globalstatus"),
31 url(r'^sharedb/(?P<projectname>[\w]+)/', views.share_db, name="sharedatabase"),
32 url(r'^shareproject/(?P<projectname>[\w]+)/', views.share_project, name="shareproject"),
33 url(r'^dbpreview/(?P<db>[\w]+)/', views.database_preview, name="dbpreview"),
34 ] | 2 - error: no-name-in-module
1 - warning: unused-import
|
1 from django import forms
2 from crispy_forms.helper import FormHelper
3 from crispy_forms.layout import Submit
4 from django.contrib.auth.forms import PasswordChangeForm
5
6
7 class CreateProject(forms.Form):
8 projectname = forms.SlugField(label="Enter project name", max_length=50, required=True)
9 helper = FormHelper()
10 helper.form_method = 'POST'
11 helper.add_input(Submit('submit', 'Create Project'))
12 helper.add_input(Submit('cancel', 'Cancel', css_class='btn-default'))
13
14
15 class DeleteProject(forms.Form):
16 helper = FormHelper()
17 helper.form_method = 'POST'
18 helper.add_input(Submit('submit', 'Confirm'))
19 helper.add_input(Submit('cancel', 'Cancel', css_class='btn-default'))
20
21
22 class CreatePipeline(forms.Form):
23 pipelinename = forms.SlugField(label="Pipeline name", max_length=50, required=True)
24 pipelineorder = forms.IntegerField(label="Order", required=True, min_value=1, max_value=900)
25 pipelinefunction = forms.CharField(label="Pipeline function:", required=False, widget=forms.Textarea)
26 helper = FormHelper()
27 helper.form_tag = False
28
29
30 class LinkGenerator(forms.Form):
31 function = forms.CharField(label="Write your link generator function here:", required=False, widget=forms.Textarea)
32 helper = FormHelper()
33 helper.form_tag = False
34
35
36 class Scraper(forms.Form):
37 function = forms.CharField(label="Write your scraper function here:", required=False, widget=forms.Textarea)
38 helper = FormHelper()
39 helper.form_tag = False
40
41
42 class ItemName(forms.Form):
43 itemname = forms.SlugField(label="Enter item name", max_length=50, required=True)
44 helper = FormHelper()
45 helper.form_tag = False
46
47
48 class FieldName(forms.Form):
49 fieldname = forms.SlugField(label="Field 1", max_length=50, required=False)
50 extra_field_count = forms.CharField(widget=forms.HiddenInput())
51 helper = FormHelper()
52 helper.form_tag = False
53
54 def __init__(self, *args, **kwargs):
55 extra_fields = kwargs.pop('extra', 0)
56
57 super(FieldName, self).__init__(*args, **kwargs)
58 self.fields['extra_field_count'].initial = extra_fields
59
60 for index in range(int(extra_fields)):
61 # generate extra fields in the number specified via extra_fields
62 self.fields['field_{index}'.format(index=index+2)] = forms.CharField(required=False)
63
64
65 class ChangePass(PasswordChangeForm):
66 helper = FormHelper()
67 helper.form_method = 'POST'
68 helper.add_input(Submit('submit', 'Change'))
69
70
71 class Settings(forms.Form):
72 settings = forms.CharField(required=False, widget=forms.Textarea)
73 helper = FormHelper()
74 helper.form_tag = False
75
76
77 class ShareDB(forms.Form):
78 username = forms.CharField(label="Enter the account name for the user with whom you want to share the database", max_length=150, required=True)
79 helper = FormHelper()
80 helper.form_method = 'POST'
81 helper.add_input(Submit('submit', 'Share'))
82 helper.add_input(Submit('cancel', 'Cancel', css_class='btn-default'))
83
84
85 class ShareProject(forms.Form):
86 username = forms.CharField(label="Enter the account name for the user with whom you want to share the project", max_length=150, required=True)
87 helper = FormHelper()
88 helper.form_method = 'POST'
89 helper.add_input(Submit('submit', 'Share'))
90 helper.add_input(Submit('cancel', 'Cancel', css_class='btn-default')) | 7 - refactor: too-few-public-methods
15 - refactor: too-few-public-methods
22 - refactor: too-few-public-methods
30 - refactor: too-few-public-methods
36 - refactor: too-few-public-methods
42 - refactor: too-few-public-methods
57 - refactor: super-with-arguments
48 - refactor: too-few-public-methods
65 - refactor: too-few-public-methods
71 - refactor: too-few-public-methods
77 - refactor: too-few-public-methods
85 - refactor: too-few-public-methods
|
1 # -*- coding: utf-8 -*-
2 BOT_NAME = 'tc_zufang'
3
4 SPIDER_MODULES = ['tc_zufang.spiders']
5 NEWSPIDER_MODULE = 'tc_zufang.spiders'
6
7 # Crawl responsibly by identifying yourself (and your website) on the user-agent
8 #USER_AGENT = 'tc_zufang (+http://www.yourdomain.com)'
9 #item Pipeline同时处理item的最大值为100
10 # CONCURRENT_ITEMS=100
11 #scrapy downloader并发请求最大值为16
12 #CONCURRENT_REQUESTS=4
13 #对单个网站进行并发请求的最大值为8
14 #CONCURRENT_REQUESTS_PER_DOMAIN=2
15 #抓取网站的最大允许的抓取深度值
16 DEPTH_LIMIT=0
17 # Obey robots.txt rules
18 ROBOTSTXT_OBEY = True
19 DOWNLOAD_TIMEOUT=10
20 DNSCACHE_ENABLED=True
21 #避免爬虫被禁的策略1,禁用cookie
22 # Disable cookies (enabled by default)
23 COOKIES_ENABLED = False
24 CONCURRENT_REQUESTS=4
25 #CONCURRENT_REQUESTS_PER_IP=2
26 #CONCURRENT_REQUESTS_PER_DOMAIN=2
27 #设置下载延时,防止爬虫被禁
28 DOWNLOAD_DELAY = 5
29 DOWNLOADER_MIDDLEWARES = {
30 'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 110,
31 "tc_zufang.Proxy_Middleware.ProxyMiddleware":100,
32 'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 100,
33 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 550,
34 'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': 560,
35 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 590,
36 'scrapy.downloadermiddlewares.chunked.ChunkedTransferMiddleware': 830,
37 'scrapy.downloadermiddlewares.stats.DownloaderStats': 850,
38 'tc_zufang.timeout_middleware.Timeout_Middleware':610,
39 'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': None,
40 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 300,
41 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
42 'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': None,
43 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': 400,
44 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': None,
45 'scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware': None,
46 'tc_zufang.rotate_useragent_dowmloadmiddleware.RotateUserAgentMiddleware':400,
47 'tc_zufang.redirect_middleware.Redirect_Middleware':500,
48
49 }
50 #使用scrapy-redis组件,分布式运行多个爬虫
51
52
53 #配置日志存储目录
54 SCHEDULER = "scrapy_redis.scheduler.Scheduler"
55 DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
56 SCHEDULER_PERSIST = True
57 SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.SpiderPriorityQueue'
58 REDIS_URL = None
59 REDIS_HOST = '127.0.0.1' # 也可以根据情况改成 localhost
60 REDIS_PORT = '6379'
61 #LOG_FILE = "logs/scrapy.log"
62
| Clean Code: No Issues Detected
|
1 import connection
2 import queue
3 from scrapy.utils.misc import load_object
4 from scrapy.utils.job import job_dir
5
6 SCHEDULER_PERSIST = False
7 QUEUE_CLASS = 'queue.SpiderQueue'
8 IDLE_BEFORE_CLOSE = 0
9
10
11 class Scheduler(object):
12
13 def __init__(self, server, persist,
14 queue_key, queue_cls, idle_before_close,
15 stats, *args, **kwargs):
16 self.server = server
17 self.persist = persist
18 self.queue_key = queue_key
19 self.queue_cls = queue_cls
20 self.idle_before_close = idle_before_close
21 self.stats = stats
22
23 def __len__(self):
24 return len(self.queue)
25
26 @classmethod
27 def from_crawler(cls, crawler):
28 if not crawler.spider.islinkgenerator:
29 settings = crawler.settings
30 persist = settings.get('SCHEDULER_PERSIST', SCHEDULER_PERSIST)
31 queue_key = "%s:requests" % crawler.spider.name
32 queue_cls = queue.SpiderQueue
33 idle_before_close = settings.get('SCHEDULER_IDLE_BEFORE_CLOSE', IDLE_BEFORE_CLOSE)
34 server = connection.from_settings(settings, crawler.spider.name)
35 stats = crawler.stats
36 return cls(server, persist, queue_key, queue_cls, idle_before_close, stats)
37 else:
38 settings = crawler.settings
39 dupefilter_cls = load_object(settings['DUPEFILTER_CLASS'])
40 dupefilter = dupefilter_cls.from_settings(settings)
41 pqclass = load_object(settings['SCHEDULER_PRIORITY_QUEUE'])
42 dqclass = load_object(settings['SCHEDULER_DISK_QUEUE'])
43 mqclass = load_object(settings['SCHEDULER_MEMORY_QUEUE'])
44 logunser = settings.getbool('LOG_UNSERIALIZABLE_REQUESTS', settings.getbool('SCHEDULER_DEBUG'))
45 core_scheduler = load_object('scrapy.core.scheduler.Scheduler')
46 return core_scheduler(dupefilter, jobdir=job_dir(settings), logunser=logunser,
47 stats=crawler.stats, pqclass=pqclass, dqclass=dqclass, mqclass=mqclass)
48
49 def open(self, spider):
50 self.spider = spider
51 self.queue = self.queue_cls(self.server, spider, self.queue_key)
52
53 if len(self.queue):
54 spider.log("Resuming crawl (%d requests scheduled)" % len(self.queue))
55
56 def close(self, reason):
57 if not self.persist:
58 self.queue.clear()
59 connection.close(self.server)
60
61 def enqueue_request(self, request):
62 if self.stats:
63 self.stats.inc_value('scheduler/enqueued/rabbitmq', spider=self.spider)
64 self.queue.push(request)
65
66 def next_request(self):
67 request = self.queue.pop()
68 if request and self.stats:
69 self.stats.inc_value('scheduler/dequeued/rabbitmq', spider=self.spider)
70 return request
71
72 def has_pending_requests(self):
73 return len(self) > 0
| 11 - refactor: useless-object-inheritance
11 - refactor: too-many-instance-attributes
13 - refactor: too-many-arguments
13 - refactor: too-many-positional-arguments
13 - warning: unused-argument
13 - warning: unused-argument
27 - refactor: too-many-locals
28 - refactor: no-else-return
32 - error: no-member
56 - warning: unused-argument
50 - warning: attribute-defined-outside-init
51 - warning: attribute-defined-outside-init
|
1 # -*- coding: utf-8 -*-
2
3 # Define your item pipelines here
4 #
5 # Don't forget to add your pipeline to the ITEM_PIPELINES setting
6 # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
7
8
9 class PropertiesPipeline(object):
10 def process_item(self, item, spider):
11 return item
12
13
14 ITEM_PIPELINES = {
15
16 'scrapy.pipelines.images.ImagesPipeline': 1,
17 'properties.pipelines.geo.GeoPipeline': 400,
18 }
19 IMAGES_STORE = 'images'
20 IMAGES_THUMBS = { 'small': (30, 30) } | 9 - refactor: useless-object-inheritance
10 - warning: unused-argument
9 - refactor: too-few-public-methods
|
1 from scrapy.loader.processors import MapCompose, Join
2 from scrapy.loader import ItemLoader
3 from properties.items import PropertiesItem
4 import datetime
5 from urllib.parse import urlparse
6 import socket
7
8 import scrapy
9
10 class BasicSpider(scrapy.Spider):
11 name = "basictest"
12 allowed_domains = ["web"]
13 start_urls=(
14 'https://developers.facebook.com/blog/post/2021/01/26/introducing-instagram-content-publishing-api/?utm_source=email&utm_medium=fb4d-newsletter-february21&utm_campaign=organic&utm_offering=business-tools&utm_product=instagram&utm_content=body-button-instagram-graph-API&utm_location=2',
15 )
16
17 def parse (self,response):
18 """ @url https://developers.facebook.com/blog/post/2021/01/26/introducing-instagram-content-publishing-api/?utm_source=email&utm_medium=fb4d-newsletter-february21&utm_campaign=organic&utm_offering=business-tools&utm_product=instagram&utm_content=body-button-instagram-graph-API&utm_location=2
19 @return item 1
20 @scrapes title price
21 @scrapes url project"""
22
23
24 l = ItemLoader(item=PropertiesItem(), response=response)
25 # Load fields using XPath expressions
26 l.add_xpath('title', '/html/body/div[1]/div[5]/div[2]/div/div/div/div[2]/div[2]/div[2]/div[1]/div/div/div[2]/div/div/p[1]/text()',
27 MapCompose(unicode.strip, unicode.title))
28 # l.add_xpath('price', './/*[@itemprop="price"][1]/text()',
29 # MapCompose(lambda i: i.replace(',', ''),
30 # float),
31 # re='[,.0-9]+')
32 # l.add_xpath('description', '//*[@itemprop="description"]'
33 # '[1]/text()',
34 # MapCompose(unicode.strip), Join())
35
36 # Housekeeping fields
37 l.add_value('url', response.url)
38 l.add_value('project', self.settings.get('BOT_NAME'))
39 l.add_value('spider', self.name)
40 l.add_value('server', socket.gethostname())
41 l.add_value('date', datetime.datetime.now())
42 return l.load_item() | 18 - warning: bad-indentation
24 - error: undefined-variable
27 - error: undefined-variable
27 - error: undefined-variable
37 - error: undefined-variable
38 - error: undefined-variable
39 - error: undefined-variable
42 - error: return-outside-function
10 - refactor: too-few-public-methods
1 - warning: unused-import
5 - warning: unused-import
|
1 from scrapy.item import Item, Field
2
3 import datetime
4 import socket
5
6
7 class PropertiesItem(Item):
8 # Primary fields
9 title = PropertiesItem()
10 price = Field()
11 description = Field()
12 address = Field()
13 image_urls = Field()
14 # Calculated fields
15 images = Field()
16 location = Field()
17 # Housekeeping fields
18
19 l.add_value('url', response.url)
20 l.add_value('project', self.settings.get('BOT_NAME'))
21 l.add_value('spider', self.name)
22 l.add_value('server', socket.gethostname())
23 l.add_value('date', datetime.datetime.now())
24
25
26
27 return l.load_item() | 9 - error: undefined-variable
19 - error: undefined-variable
19 - error: undefined-variable
20 - error: undefined-variable
20 - error: undefined-variable
21 - error: undefined-variable
21 - error: undefined-variable
22 - error: undefined-variable
23 - error: undefined-variable
27 - error: return-outside-function
27 - error: undefined-variable
7 - refactor: too-few-public-methods
|
1
2
3 import os
4
5 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
6 SECRET_KEY = 'f!7k7a9k10)fbx7#@y@u9u@v3%b)f%h6xxnxf71(21z1uj^#+e'
7 DEBUG = True
8 ALLOWED_HOSTS = []
9
10 INSTALLED_APPS = [
11 'django.contrib.admin',
12 'django.contrib.auth',
13 'django.contrib.contenttypes',
14 'django.contrib.sessions',
15 'django.contrib.messages',
16 'django.contrib.staticfiles',
17 'users',
18 # 'oauth2_provider',
19 # 'oauth2_provider',
20 'corsheaders',
21 'django.contrib.sites.apps.SitesConfig',
22 'django.contrib.humanize.apps.HumanizeConfig',
23 'django_nyt.apps.DjangoNytConfig',
24 'mptt',
25 'sekizai',
26 'sorl.thumbnail',
27 'wiki.apps.WikiConfig',
28 'wiki.plugins.attachments.apps.AttachmentsConfig',
29 'wiki.plugins.notifications.apps.NotificationsConfig',
30 'wiki.plugins.images.apps.ImagesConfig',
31 'wiki.plugins.macros.apps.MacrosConfig',
32 ]
33
34 # AUTHENTICATION_BACKENDS = (
35 # 'oauth2_provider.backends.OAuth2Backend',
36 # # Uncomment following if you want to access the admin
37 # #'django.contrib.auth.backends.ModelBackend'
38
39 # )
40 MIDDLEWARE = [
41 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
42 'oauth2_provider.middleware.OAuth2TokenMiddleware',
43 'corsheaders.middleware.CorsMiddleware',
44 'django.middleware.security.SecurityMiddleware',
45 'django.contrib.sessions.middleware.SessionMiddleware',
46 'django.middleware.common.CommonMiddleware',
47 'django.middleware.csrf.CsrfViewMiddleware',
48 'django.contrib.auth.middleware.AuthenticationMiddleware',
49 'django.contrib.messages.middleware.MessageMiddleware',
50 'django.middleware.clickjacking.XFrameOptionsMiddleware',
51 ]
52
53 ROOT_URLCONF = 'iam.urls'
54
55 TEMPLATES = [
56 {
57 'BACKEND': 'django.template.backends.django.DjangoTemplates',
58 'DIRS': [],
59 'APP_DIRS': True,
60 'OPTIONS': {
61 'context_processors': [
62 'django.contrib.auth.context_processors.auth',
63 'django.template.context_processors.debug',
64 'django.template.context_processors.i18n',
65 'django.template.context_processors.media',
66 'django.template.context_processors.request',
67 'django.template.context_processors.static',
68 'django.template.context_processors.tz',
69 'django.contrib.messages.context_processors.messages',
70 "sekizai.context_processors.sekizai",
71 ],
72 },
73 },
74 ]
75
76 WSGI_APPLICATION = 'iam.wsgi.application'
77
78
79 DATABASES = {
80 'default': {
81 'ENGINE': 'django.db.backends.sqlite3',
82 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
83 }
84 }
85
86
87 AUTH_PASSWORD_VALIDATORS = [
88 {
89 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
90 },
91 {
92 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
93 },
94 {
95 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
96 },
97 {
98 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
99 },
100 ]
101
102
103 # Internationalization
104 # https://docs.djangoproject.com/en/2.2/topics/i18n/
105
106 LANGUAGE_CODE = 'en-us'
107
108 TIME_ZONE = 'UTC'
109
110 USE_I18N = True
111
112 USE_L10N = True
113
114 USE_TZ = True
115 SITE_ID = 1
116
117 # Static files (CSS, JavaScript, Images)
118 # https://docs.djangoproject.com/en/2.2/howto/static-files/
119
120 STATIC_URL = '/static/'
121 AUTH_USER_MODEL='users.User'
122 LOGIN_URL='/admin/login/'
123
124 CORS_ORIGIN_ALLOW_ALL = True
125
126 WIKI_ACCOUNT_HANDLING = True
127 WIKI_ACCOUNT_SIGNUP_ALLOWED = True
128
129 # export ID =vW1RcAl7Mb0d5gyHNQIAcH110lWoOW2BmWJIero8
130 # export SECRET=DZFpuNjRdt5xUEzxXovAp40bU3lQvoMvF3awEStn61RXWE0Ses4RgzHWKJKTvUCHfRkhcBi3ebsEfSjfEO96vo2Sh6pZlxJ6f7KcUbhvqMMPoVxRwv4vfdWEoWMGPeIO
131 # # | Clean Code: No Issues Detected
|
1 from django.apps import AppConfig
2
3
4 class CorescrapConfig(AppConfig):
5 name = 'corescrap'
| 4 - refactor: too-few-public-methods
|
1 import scrapy
2 from properties.items import PropertiesItem
3 from scrapy.loader import ItemLoader
4 from itemloaders.processors import MapCompose, Join
5 class BasicSpider(scrapy.Spider):
6 name = 'basic'
7 allowed_domains = ['web']
8 start_urls = ['http://web:9312/properties/property_000000.html']
9
10 def parse(self, response):
11 #Cleaning up – item loaders and housekeeping fields
12 l = ItemLoader(item=PropertiesItem(), response=response)
13 l.add_xpath("title", '//*[@itemprop="name"][1]/text()' ,MapCompose(unicode.strip, unicode.title))
14 l.add_xpath("price", '//*[@itemprop="price"][1]/text()',MapCompose(lambda i: i.replace(',', ''), float),re('[0.9]+')
15 l.add_xpath("description", '//*[@itemprop="description"][1]/text()', MapCompose(unicode.strip), Join())
16 l.add_xpath("address ", '//*[@itemtype="http://schema.org/Place"][1]/text()',MapCompose(unicode.strip))
17 l.add_xpath("image_urls", '//*[@itemprop="image"][1]/@src', MapCompose(lambda i: urlparse.urljoin(response.url, i)))
18
19 return l.load_item()
20
21
22
23
24 # def parse(self, response):
25 # item = PropertiesItem()
26 # item['title'] = response.xpath(
27 # '//*[@itemprop="list-group-item"][1]/text()').extract()
28 # item['price'] = response.xpath('//*[@itemprop="price"][1]/text()').re('[.0-9]+')
29 # item['description'] = response.xpath('//*[@itemprop="description"][1]/text()').extract()
30
31
32 # return item
33 # def parse(self, response):
34 # self.log("title:%s"%response.xpath(
35 # '//*[@itemprop="name"][1]/text()').extract()
36 # )
37 # self.log("price:%s" % response.xpath(
38 # '//*[@itemprop="price"][1]/text()').re('[0.9]+'))
39 # self.log("description: %s" % response.xpath(
40 # '//*[@itemprop="description"][1]/text()').extract())
41 # self.log("address: %s" % response.xpath(
42 # '//*[@itemtype="http://schema.org/Place"][1]/text()').extract())
43
44 # self.log("image_urls: %s" % response.xpath('//*[@itemprop="image"][1]/@src').extract())
| 14 - error: syntax-error
|
1 from datetime import datetime
2 from scrapy.spiders import SitemapSpider
3
4 class FilteredSitemapSpider(SitemapSpider):
5 name = 'filtered_sitemap_spider'
6 allowed_domains = ['example.com']
7 sitemap_urls = ['http://example.com/sitemap.xml']
8
9 def sitemap_filter(self, entries):
10 for entry in entries:
11 date_time = datetime.strptime(entry['lastmod'], '%Y-%m-%d')
12 if date_time.year >= 2005:
13 yield entry | 4 - refactor: too-few-public-methods
|
1 from django import template
2 from ..models import Post
3 from django.utils.safestring import mark_safe
4 import markdown
5 from django.db.models import Count
6 register = template.Library()
7
8
9 @register.filter(name='markdown')
10 def markdown_fromat(text):
11 return mark_safe(markdown.markdown(text))
12
13 @register.simple_tag
14 def total_posts():
15 return Post.published.count()
16
17 @register.inclusion_tag('latest_posts.html')
18 def show_latest_posts(count=3):
19 latest_posts = Post.published.order_by('-publish')[:count]
20 return {'latest_posts': latest_posts}
21
22
23
24 @register.simple_tag
25 # In the preceding template tag, you build a QuerySet using the annotate() function
26 # to aggregate the total number of comments for each post. You use the Count
27 # aggregation function to store the number of comments in the computed field total_
28 # comments for each Post object. You order the QuerySet by the computed field in
29 # descending order. You also provide an optional count variable to limit the total
30 def get_most_commented_posts(count=2):
31 return Post.published.annotate(
32 total_comments=Count('comments')
33 ).order_by('-total_comments')[:count] | 2 - error: relative-beyond-top-level
|
1 from types import resolve_bases
2 import scrapy
3 from scrapy.spidermiddlewares.httperror import HttpError
4 from twisted.internet.error import DNSLookupError
5 from twisted.internet.error import TimeoutError,TCPTimedOutError
6
7
8
9 class DemoSpider(scrapy.Spider):
10 name='demo'
11 start_urls=[
12 "http://www.httpbin.org/", # HTTP 200 expected
13 "http://www.httpbin.org/status/404", # Webpage not found
14 "http://www.httpbin.org/status/500", # Internal server error
15 "http://www.httpbin.org:12345/", # timeout expected
16 "http://www.httphttpbinbin.org/",
17 ]
18
19 def start_requests(self):
20 for u in self.start_urls:
21 yield scrapy.Request(u,callback=self.parse_httpbin),
22 dont_filter=True
23
24
25 def parse_httpbin(self, response):
26 self.logger.info('Recieved response from {}'.format(response.url))
27 # ...
28
29
30 def errback_httpbin(self,failure):
31 self.logger.error(repr(failure))
32
33 if failure.check(HttpError):
34 response=failure.value.response
35 self.logger.error('htttp Error occireed on %s',response.url)
36
37 elif failure.check(DNSLookupError) :
38 response=failure.request
39 self.logger.error("DNSLookupError occurred on %s", request.url)
40
41 elif failure.check(TimeoutError,TCPTimedOutError):
42 request =failure.request
43 self.logger.eerror("timeout occured on %s",request.url)
44
45
46
47
| 26 - warning: bad-indentation
21 - refactor: trailing-comma-tuple
5 - warning: redefined-builtin
22 - warning: unused-variable
39 - error: possibly-used-before-assignment
1 - warning: unused-import
|
1 # -*- coding: utf-8 -*-
2 import random
3 import requests
4 def GetIps():
5 li=[]
6 global count
7 url ='http://139.199.182.250:8000/?types=0&count=300'
8 ips=requests.get(url)
9 for ip in eval(ips.content):
10 li.append(ip[0]+':'+ip[1])
11 return li
12
13 GetIps()
| 6 - warning: global-variable-not-assigned
8 - warning: missing-timeout
9 - warning: eval-used
2 - warning: unused-import
|
1 from django.contrib import admin
2 from .models import Products,feeds,MyModel,Post
3 # Register your models here.
4
5 admin.site.register(Products)
6 admin.site.register(feeds)
7 admin.site.register(MyModel)
8
9 admin.site.register(Post)
| 2 - error: relative-beyond-top-level
|
1 # import requests
2 # url = "https://proxy-orbit1.p.rapidapi.com/v1/"
3 # headers = {
4 # 'x-rapidapi-key': "b188eee73cmsha4c027c9ee4e2b7p1755ebjsn1e0e0b615bcf",
5 # 'x-rapidapi-host': "proxy-orbit1.p.rapidapi.com"
6 # }
7 # # response = requests.request("GET", url, headers=headers)
8 # print(response.text)
9
10 import requests
11 url= "https://libraries.io/api/"
12 headers={'?api_key':'306cf1684a42e4be5ec0a1c60362c2ef',
13 # 'platform':'NPM/base62/dependent_repositories'
14 }
15 response = requests.request("GET", url, headers=headers)
16 print(response.text)
17
18
19
20
21
22 Example: https://libraries.io/api/NPM/base62/dependent_repositories?api_key=306cf1684a42e4be5ec0a1c60362c2ef
23
24
25
26
27
28
29
30
31
32
33
34 import requests
35
36 url = "https://scrapingant.p.rapidapi.com/post"
37
38 payload = "{\"cookies\": \"cookie_name_1=cookie_value_1;cookie_name_2=cookie_value_2\"\"return_text\": false,\"url\": \"https://example.com\"}"
39 headers = {
40 'content-type': "application/json",
41 'x-rapidapi-key': "b188eee73cmsha4c027c9ee4e2b7p1755ebjsn1e0e0b615bcf",
42 'x-rapidapi-host': "scrapingant.p.rapidapi.com"
43 }
44
45 response = requests.request("POST", url, data=payload, headers=headers)
46
47 print(response.text)
48
49
| 22 - error: syntax-error
|
1 from django.db import models
2
3 # Create your models here.
4
5 from datetime import datetime
6 from elasticsearch_dsl import DocType, Date, Nested, Boolean, \
7 analyzer, InnerObjectWrapper, Completion, Keyword, Text, Integer
8
9 from elasticsearch_dsl.analysis import CustomAnalyzer as _CustomAnalyzer
10
11 from elasticsearch_dsl.connections import connections
12 connections.create_connection(hosts=["localhost"])
13
14
15 class CustomAnalyzer(_CustomAnalyzer):
16 def get_analysis_definition(self):
17 return {}
18
19
20 ik_analyzer = CustomAnalyzer("ik_max_word", filter=["lowercase"])
21
22
23 class ArticleType(DocType):
24 """
25 # elasticsearch_dsl安装5.4版本
26 """
27 # 文章类型
28 suggest = Completion(analyzer=ik_analyzer)
29 title = Text(analyzer="ik_max_word")
30 create_date = Date()
31 url = Keyword()
32 view = Integer()
33 category = Text(analyzer="ik_max_word")
34 content = Text(analyzer="ik_max_word")
35
36 class Meta:
37 index = "pm"
38 doc_type = "article"
39
40
41 if __name__ == "__main__":
42 data = ArticleType.init()
43 print(data)
44
| 15 - refactor: too-few-public-methods
36 - refactor: too-few-public-methods
23 - refactor: too-few-public-methods
1 - warning: unused-import
5 - warning: unused-import
6 - warning: unused-import
6 - warning: unused-import
6 - warning: unused-import
6 - warning: unused-import
|
1
2 from haystack import indexes
3 from django . conf import settings
4 from .models import Article ,Category ,Tag
5
6
7 class ArticleIndex ( indexes . SearchIndex , indexes . Indexable ):
8 text = indexes . CharField ( document = True , use_template = True )
9
10 def get_model ( self ):
11 return Article
12
13 def index_queryset ( self , using = None ):
14 return self . get_model (). objects . filter ( status = 'p' ) | 4 - error: relative-beyond-top-level
13 - warning: unused-argument
3 - warning: unused-import
4 - warning: unused-import
4 - warning: unused-import
|
1 # -*- coding: utf-8 -*-
2 from pymongo import MongoClient
3 from scrapy import log
4 import traceback
5 from scrapy.exceptions import DropItem
6
7 class SingleMongodbPipeline(object):
8 MONGODB_SERVER = "101.200.46.191"
9 MONGODB_PORT = 27017
10 MONGODB_DB = "zufang_fs"
11
12 def __init__(self):
13 #初始化mongodb连接
14 try:
15 client = MongoClient(self.MONGODB_SERVER, self.MONGODB_PORT)
16 self.db = client[self.MONGODB_DB]
17 except Exception as e:
18 traceback.print_exc()
19
20 @classmethod
21 def from_crawler(cls, crawler):
22 cls.MONGODB_SERVER = crawler.settings.get('SingleMONGODB_SERVER', '101.200.46.191')
23 cls.MONGODB_PORT = crawler.settings.getint('SingleMONGODB_PORT', 27017)
24 cls.MONGODB_DB = crawler.settings.get('SingleMONGODB_DB', 'zufang_fs')
25 pipe = cls()
26 pipe.crawler = crawler
27 return pipe
28
29 def process_item(self, item, spider):
30 if item['pub_time'] == 0:
31 raise DropItem("Duplicate item found: %s" % item)
32 if item['method'] == 0:
33 raise DropItem("Duplicate item found: %s" % item)
34 if item['community']==0:
35 raise DropItem("Duplicate item found: %s" % item)
36 if item['money']==0:
37 raise DropItem("Duplicate item found: %s" % item)
38 if item['area'] == 0:
39 raise DropItem("Duplicate item found: %s" % item)
40 if item['city'] == 0:
41 raise DropItem("Duplicate item found: %s" % item)
42 # if item['phone'] == 0:
43 # raise DropItem("Duplicate item found: %s" % item)
44 # if item['img1'] == 0:
45 # raise DropItem("Duplicate item found: %s" % item)
46 # if item['img2'] == 0:
47 # raise DropItem("Duplicate item found: %s" % item)
48 zufang_detail = {
49 'title': item.get('title'),
50 'money': item.get('money'),
51 'method': item.get('method'),
52 'area': item.get('area', ''),
53 'community': item.get('community', ''),
54 'targeturl': item.get('targeturl'),
55 'pub_time': item.get('pub_time', ''),
56 'city':item.get('city',''),
57 'phone':item.get('phone',''),
58 'img1':item.get('img1',''),
59 'img2':item.get('img2',''),
60 }
61 result = self.db['zufang_detail'].insert(zufang_detail)
62 print '[success] the '+item['targeturl']+'wrote to MongoDB database'
63 return item | 62 - error: syntax-error
|
1 # Generated by Django 3.1.3 on 2020-11-14 04:52
2
3 from django.db import migrations, models
4 import tinymce.models
5
6
7 class Migration(migrations.Migration):
8
9 dependencies = [
10 ('core', '0005_feeds_content'),
11 ]
12
13 operations = [
14 migrations.CreateModel(
15 name='MyModel',
16 fields=[
17 ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
18 ('content', tinymce.models.HTMLField()),
19 ],
20 ),
21 migrations.RemoveField(
22 model_name='feeds',
23 name='content',
24 ),
25 migrations.RemoveField(
26 model_name='feeds',
27 name='description',
28 ),
29 ]
| 7 - refactor: too-few-public-methods
|
1 # Generated by Django 3.1.3 on 2020-11-13 06:20
2
3 from django.db import migrations
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('core', '0002_products'),
10 ]
11
12 operations = [
13 migrations.RenameModel(
14 old_name='Post',
15 new_name='feeds',
16 ),
17 ]
| 6 - refactor: too-few-public-methods
|
1
2 from django.shortcuts import render, get_object_or_404
3 from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
4 from django.contrib.auth.models import User
5 from django.views.generic import (
6 ListView,
7 DetailView,
8 CreateView,
9 UpdateView,
10 DeleteView
11 )
12 from .models import Post, Products,MyModel,feeds
13
14
15
16
17 def home(request):
18
19 context={
20 'posts':Post.objects.all()
21
22 }
23
24 return render (request,'blog/home.html',context)
25
26 class PostListView(ListView):
27 model = Post
28 template_name ='blog/home.html' # <app>/<model>_<viewtype>.html
29 context_object_name ='posts'
30 ordering = ['-date_posted']
31 paginate_by = 5
32
33 class UserPostListView(ListView):
34 model = Post
35 template_name = 'blog/user_posts.html' # <app>/<model>_<viewtype>.html
36 context_object_name = 'posts'
37 paginate_by = 5
38
39 def get_queryset(self):
40 user = get_object_or_404(User, username=self.kwargs.get('username'))
41 return Post.objects.filter(author=user).order_by('-date_posted')
42
43
44 class PostDetailView(DetailView):
45 model=Post
46 template_name = 'blog/post_detail.html'
47
48
49 class PostCreateView(LoginRequiredMixin, CreateView):
50 model = Post
51 fields = ['title', 'content','description']
52 template_name = 'blog/post_form.html' # <app>/<model>_<viewtype>.html
53
54
55 def form_valid(self, form):
56 form.instance.author = self.request.user
57 return super().form_valid(form)
58
59
60
61
62
63 class PostUpdateView(LoginRequiredMixin,UserPassesTestMixin,UpdateView):
64
65 model=Post
66 fields=['title','content','description']
67 template_name='blog/post_form.html'
68
69 def form_valid(self, form):
70 form.instance.author=self.request.user
71 return super().form_valid(form)
72
73 def test_func(self):
74
75 post =self.get_object()
76 if self.request.user==post.author:
77 return True
78 return False
79
80
81
82 class PostDeleteView(LoginRequiredMixin,UserPassesTestMixin,DeleteView):
83
84 model=Post
85 success_url='/'
86 template_name = 'blog/post_confirm_delete.html'
87
88
89
90
91 def test_func(self):
92
93 post =self.get_object()
94 if self.request.user==post.author:
95 return True
96 return False
97
98
99
100
101
102 def index(request):
103 fore=Products.objects.all()
104 feed=feeds.objects.all()
105
106
107
108
109 context={
110 'fore':fore,
111 'feed':feed
112 }
113
114
115
116
117 return render(request, 'index.html',context)
118 def about(request):
119 return render(request, 'about.html')
120 def product(request):
121 form =productForm(request.POST)
122
123 if form.is_valid():
124 form.save()
125 form =productForm()
126
127 context={
128 'form':form
129 }
130
131 return render(request, 'product.html',context)
132
133 def contact(request):
134 feed=feeds.objects.all()
135
136
137
138 return render(request, "contact.html",{'feed':feed}) | 19 - warning: bad-indentation
24 - warning: bad-indentation
45 - warning: bad-indentation
46 - warning: bad-indentation
65 - warning: bad-indentation
66 - warning: bad-indentation
67 - warning: bad-indentation
69 - warning: bad-indentation
70 - warning: bad-indentation
71 - warning: bad-indentation
73 - warning: bad-indentation
75 - warning: bad-indentation
76 - warning: bad-indentation
77 - warning: bad-indentation
78 - warning: bad-indentation
84 - warning: bad-indentation
85 - warning: bad-indentation
86 - warning: bad-indentation
91 - warning: bad-indentation
93 - warning: bad-indentation
94 - warning: bad-indentation
95 - warning: bad-indentation
96 - warning: bad-indentation
103 - warning: bad-indentation
104 - warning: bad-indentation
109 - warning: bad-indentation
117 - warning: bad-indentation
119 - warning: bad-indentation
121 - warning: bad-indentation
123 - warning: bad-indentation
124 - warning: bad-indentation
125 - warning: bad-indentation
127 - warning: bad-indentation
131 - warning: bad-indentation
134 - warning: bad-indentation
138 - warning: bad-indentation
12 - error: relative-beyond-top-level
26 - refactor: too-few-public-methods
33 - refactor: too-few-public-methods
44 - refactor: too-few-public-methods
49 - refactor: too-few-public-methods
82 - refactor: too-few-public-methods
121 - error: undefined-variable
125 - error: undefined-variable
12 - warning: unused-import
|
1 # -*- coding: utf-8 -*-
2 from scrapy_redis.spiders import RedisSpider
3 from scrapy.selector import Selector
4 from tc_zufang.utils.result_parse import list_first_item
5 from scrapy.http import Request
6 from tc_zufang.utils.InsertRedis import inserintotc,inserintota
7 import re
8 defaultencoding = 'utf-8'
9 '''
10 58同城的爬虫
11 '''
12 #继承自RedisSpider,则start_urls可以从redis读取
13 #继承自BaseSpider,则start_urls需要写出来
14 class TczufangSpider(RedisSpider):
15 name='basic'
16 start_urls=(
17 'http://dg.58.com/chuzu/',
18 'http://sw.58.com/chuzu/',
19 'http://sz.58.com/chuzu/',
20 'http://gz.58.com/chuzu/',
21 # 'http://fs.58.com/chuzu/',
22 # 'http://zs.58.com/chuzu/',
23 # 'http://zh.58.com/chuzu/',
24 # 'http://huizhou.58.com/chuzu/',
25 # 'http://jm.58.com/chuzu/',
26 # 'http://st.58.com/chuzu/',
27 # 'http://zhanjiang.58.com/chuzu/',
28 # 'http://zq.58.com/chuzu/',
29 # 'http://mm.58.com/chuzu/',
30 # 'http://jy.58.com/chuzu/',
31 # 'http://mz.58.com/chuzu/',
32 # 'http://qingyuan.58.com/chuzu/',
33 # 'http://yj.58.com/chuzu/',
34 # 'http://sg.58.com/chuzu/',
35 # 'http://heyuan.58.com/chuzu/',
36 # 'http://yf.58.com/chuzu/',
37 # 'http://chaozhou.58.com/chuzu/',
38 # 'http://taishan.58.com/chuzu/',
39 # 'http://yangchun.58.com/chuzu/',
40 # 'http://sd.58.com/chuzu/',
41 # 'http://huidong.58.com/chuzu/',
42 # 'http:// boluo.58.com/chuzu/',
43 # )
44 # redis_key = 'tczufangCrawler:start_urls'
45 #解析从start_urls下载返回的页面
46 #页面页面有两个目的:
47 #第一个:解析获取下一页的地址,将下一页的地址传递给爬虫调度器,以便作为爬虫的下一次请求
48 #第二个:获取详情页地址,再对详情页进行下一步的解析
49 redis_key = 'start_urls'
50 def parse(self, response):
51 #获取所访问的地址
52 response_url=re.findall('^http\:\/\/\w+\.58\.com',response.url)
53 response_selector = Selector(response)
54 next_link=list_first_item(response_selector.xpath(u'//div[contains(@class,"pager")]/a[contains(@class,"next")]/@href').extract())
55 detail_link=response_selector.xpath(u'//div[contains(@class,"listBox")]/ul[contains(@class,"listUl")]/li/@logr').extract()
56
57 if next_link:
58 if detail_link:
59 # print next_link
60 # yield Request(next_link,callback=self.parse)
61 inserintotc(next_link, 1)
62 print '#########[success] the next link ' + next_link + ' is insert into the redis queue#########'
63 for detail_link in response_selector.xpath(u'//div[contains(@class,"listBox")]/ul[contains(@class,"listUl")]/li/@logr').extract():
64 #gz_2_39755299868183_28191154595392_sortid:1486483205000 @ ses:busitime ^ desc @ pubid:5453707因为58同城的详情页做了爬取限制,所以由自己构造详情页id
65 #构造详情页url
66 # detail_link='http://dg.58.com/zufang/'+detail_link.split('_')[3]+'x.shtml'
67 detail_link = response_url[0]+'/zufang/' + detail_link.split('_')[3] + 'x.shtml'
68 #对详情页进行解析cd
69 if detail_link:
70 inserintota(detail_link,2)
71 print '[success] the detail link ' + detail_link + ' is insert into the redis queue' | 16 - error: syntax-error
|
1 import scrapy
2
3
4 class WebiSpider(scrapy.Spider):
5 name = 'webi'
6 allowed_domains = ['web']
7 start_urls = ['http://web/']
8
9 def parse(self, response):
10 pass
| 4 - refactor: too-few-public-methods
|
1 from django.contrib import admin
2 from django.urls import path,include
3 from django.conf import settings
4 from django.conf.urls.static import static
5 from django.contrib.sitemaps.views import sitemap
6 from blog.sitemaps import PostSitemap
7 from django.conf.urls import url, include
8 # from .. import core
9 sitemaps={
10 'posts':PostSitemap,
11 }
12
13 urlpatterns = [
14 path('admin/', admin.site.urls, ),
15 path('',include('blog.urls')),
16 path('core/',include('core.urls')),
17 path('api/',include('api.urls')),
18 # path('oauth/',include('oauth.urls')),
19 path('accounts/', include('allauth.urls')),
20 path('sitemap.xml', sitemap, {'sitemaps': sitemaps},
21 name='django.contrib.sitemaps.views.sitemap')
22
23
24 ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | 7 - warning: reimported
7 - warning: unused-import
|
1 import scrapy
2 class DemoSpider(scrapy.Spider):
3 name = 'demo'
4 start_urls = ['http://www.something.com/users/login.php']
5 def parse(self, response):
6 return scrapy.FormRequest.from_response(
7 response,
8 formdata = {'username': 'admin', 'password': 'confidential'},
9 callback = self.after_login
10 )
11
12 def after_login(self, response):
13 if "authentication failed" in response.body:
14 self.logger.error("Login failed")
15 return
16 # You can continue scraping here | 3 - warning: bad-indentation
4 - warning: bad-indentation
5 - warning: bad-indentation
6 - warning: bad-indentation
12 - warning: bad-indentation
13 - warning: bad-indentation
14 - warning: bad-indentation
15 - warning: bad-indentation
12 - refactor: useless-return
|
1 # -*- coding: utf-8 -*-
2 import redis
3 def inserintotc(str,type):
4 try:
5 r = redis.Redis(host='127.0.0.1', port=6379, db=0)
6 except:
7 print '连接redis失败'
8 else:
9 if type == 1:
10 r.lpush('start_urls', str)
11 def inserintota(str,type):
12 try:
13 r = redis.Redis(host='127.0.0.1', port=6379, db=0)
14 except:
15 print '连接redis失败'
16 else:
17 if type == 2:
18 r.lpush('tczufang_tc:requests', str) | 7 - error: syntax-error
|
1 # -*- coding: utf-8 -*-
2 res=u'\u4e30\u6cf0\u57ce'
3 # rr=res.encode('gbk')
4 print res | 4 - error: syntax-error
|
1
2 import hashlib
3 import datetime
4
5
6 def date_convert(value):
7 # 日期转化
8 try:
9 create_date = datetime.datetime.strptime(value, "%Y/%m/%d").date()
10 except Exception as e:
11 print(e)
12 create_date = datetime.datetime.now().date()
13
14 return create_date
15
16
17 def get_md5(url):
18 # url md5加密
19 if isinstance(url, str):
20 url = url.encode("utf-8")
21 m = hashlib.md5()
22 m.update(url)
23 return m.hexdigest()
24
25
26 if __name__ == '__main__':
27 print(date_convert('2020/02/28'))
28 print(get_md5('http://www.woshipm.com/it/3443027.html'))
| 10 - warning: broad-exception-caught
|
1 # You need to create an Item name 'played' for running this script
2 # item['ack_signal'] = int(response.meta['ack_signal']) - this line is used for sending ack signal to RabbitMQ
3 def parse(self, response):
4 item = played()
5 songs = response.xpath('//li[@class="player-in-playlist-holder"]')
6 indexr = response.url.find('date=')
7 indexr = indexr + 5
8 date = response.url[indexr:indexr + 10]
9
10 for song in songs:
11 item['timeplayed'] = song.xpath('.//span[@class="time"]/text()').extract()[0]
12 item['artist'] = song.xpath('.//div[@class="jp-title"]/strong//span//text()').extract()[0]
13 item['song'] = song.xpath('.//div[@class="jp-title"]/strong//em//text()').extract()[0]
14 item['dateplayed'] = date
15 item['ack_signal'] = int(response.meta['ack_signal'])
16 yield item | 4 - error: undefined-variable
3 - warning: unused-argument
|
1 import scrapy
2
3 class MySpider(scrapy.Spider):
4 name = 'myspider'
5 start_urls = ['http://example.com']
6
7 def parse(self, response):
8 print(f"Existing settings: {self.settings.attributes.keys()}")
9 class MyExtension:
10 def __init__(self, log_is_enabled=False):
11 if log_is_enabled:
12 print("log is enabled!")
13
14 @classmethod
15 def from_crawler(cls, crawler):
16 settings = crawler.settings
17 return cls(settings.getbool('LOG_ENABLED')) | 7 - warning: unused-argument
3 - refactor: too-few-public-methods
9 - refactor: too-few-public-methods
|
1 # -*- coding: utf-8 -*-
2 from scrapy_redis.spiders import RedisSpider
3 from scrapy.selector import Selector
4 from tc_zufang.utils.result_parse import list_first_item
5 from scrapy.http import Request
6 from tc_zufang.items import TcZufangItem
7 import re
8 defaultencoding = 'utf-8'
9 '''
10 58同城的爬虫
11 '''
12 #继承自RedisSpider,则start_urls可以从redis读取
13 #继承自BaseSpider,则start_urls需要写出来
14 class TczufangSpider(RedisSpider):
15 name='tczufang'
16 redis_key = 'tczufang_tc:requests'
17 #解析从start_urls下载返回的页面
18 #页面页面有两个目的:
19 #第一个:解析获取下一页的地址,将下一页的地址传递给爬虫调度器,以便作为爬虫的下一次请求
20 #第二个:获取详情页地址,再对详情页进行下一步的解析
21 #对详情页进行下一步的解析
22 def parse(self, response):
23 tczufangItem=TcZufangItem()
24 response_url = re.findall('^http\:\/\/\w+\.58\.com', response.url)
25 response_selector = Selector(response)
26 # 字段的提取可以使用在终端上scrapy shell进行调试使用
27 # 帖子名称
28 raw_title=list_first_item(response_selector.xpath(u'//div[contains(@class,"house-title")]/h1[contains(@class,"c_333 f20")]/text()').extract())
29 if raw_title:
30 tczufangItem['title'] =raw_title.encode('utf8')
31 #t帖子发布时间,进一步处理
32 raw_time=list_first_item(response_selector.xpath(u'//div[contains(@class,"house-title")]/p[contains(@class,"house-update-info c_888 f12")]/text()').extract())
33 try:
34 tczufangItem['pub_time'] =re.findall(r'\d+\-\d+\-\d+\s+\d+\:\d+\:\d+',raw_time)[0]
35 except:
36 tczufangItem['pub_time']=0
37 #租金
38 tczufangItem['money']=list_first_item(response_selector.xpath(u'//div[contains(@class,"house-pay-way f16")]/span[contains(@class,"c_ff552e")]/b[contains(@class,"f36")]/text()').extract())
39 # 租赁方式
40 raw_method=list_first_item(response_selector.xpath(u'//ul[contains(@class,"f14")]/li[1]/span[2]/text()').extract())
41 try:
42 tczufangItem['method'] =raw_method.encode('utf8')
43 except:
44 tczufangItem['method']=0
45 # 所在区域
46 try:
47 area=response_selector.xpath(u'//ul[contains(@class,"f14")]/li/span/a[contains(@class,"c_333")]/text()').extract()[1]
48 except:
49 area=''
50 if area:
51 area=area
52 try:
53 area2=response_selector.xpath(u'//ul[contains(@class,"f14")]/li/span/a[contains(@class,"c_333")]/text()').extract()[2]
54 except:
55 area2=''
56 raw_area=area+"-"+area2
57 if raw_area:
58 raw_area=raw_area.encode('utf8')
59 tczufangItem['area'] =raw_area if raw_area else None
60 # 所在小区
61 try:
62 raw_community = response_selector.xpath(u'//ul[contains(@class,"f14")]/li/span/a[contains(@class,"c_333")]/text()').extract()[0]
63 if raw_community:
64 raw_community=raw_community.encode('utf8')
65 tczufangItem['community']=raw_community if raw_community else None
66 except:
67 tczufangItem['community']=0
68 # 帖子详情url
69 tczufangItem['targeturl']=response.url
70 #帖子所在城市
71 tczufangItem['city']=response.url.split("//")[1].split('.')[0]
72 #帖子的联系电话
73 try:
74 tczufangItem['phone']=response_selector.xpath(u'//div[contains(@class,"house-fraud-tip")]/span[1]/em[contains(@class,"phone-num")]/text()').extract()[0]
75 except:
76 tczufangItem['phone']=0
77 # 图片1的联系电话
78 try:
79 tczufangItem['img1'] = response_selector.xpath(u'//ul[contains(@class,"pic-list-wrap pa")]/li[1]/@data-src').extract()[0]
80 except:
81 tczufangItem['img1'] = 0
82 # 图片1的联系电话
83 try:
84 tczufangItem['img2'] = response_selector.xpath(u'//ul[contains(@class,"pic-list-wrap pa")]/li[2]/@data-src').extract()[0]
85 except:
86 tczufangItem['img2'] = 0
87 yield tczufangItem | 86 - warning: bad-indentation
24 - warning: anomalous-backslash-in-string
24 - warning: anomalous-backslash-in-string
24 - warning: anomalous-backslash-in-string
24 - warning: anomalous-backslash-in-string
24 - warning: anomalous-backslash-in-string
24 - warning: anomalous-backslash-in-string
28 - warning: redundant-u-string-prefix
32 - warning: redundant-u-string-prefix
35 - warning: bare-except
38 - warning: redundant-u-string-prefix
40 - warning: redundant-u-string-prefix
43 - warning: bare-except
48 - warning: bare-except
47 - warning: redundant-u-string-prefix
51 - warning: self-assigning-variable
54 - warning: bare-except
53 - warning: redundant-u-string-prefix
66 - warning: bare-except
62 - warning: redundant-u-string-prefix
75 - warning: bare-except
74 - warning: redundant-u-string-prefix
80 - warning: bare-except
79 - warning: redundant-u-string-prefix
85 - warning: bare-except
84 - warning: redundant-u-string-prefix
22 - refactor: too-many-statements
24 - warning: unused-variable
14 - refactor: too-few-public-methods
5 - warning: unused-import
|
1 # from __future__ import unicode_literals
2 # from django.utils.encoding import python_2_unicode_compatible
3 # from django.db import models
4 # from django.db.models.signals import pre_delete
5 # from django.dispatch import receiver
6 # from scrapy_djangoitem import DjangoItem
7 # from dynamic_scraper.models import Scraper, SchedulerRuntime
8
9
10 # @python_2_unicode_compatible
11 # class NewsWebsite(models.Model):
12 # name = models.CharField(max_length=200)
13 # url = models.URLField()
14 # scraper = models.ForeignKey(Scraper, blank=True, null=True, on_delete=models.SET_NULL)
15 # scraper_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
16
17 # def __str__(self):
18 # return self.name
19
20
21 # @python_2_unicode_compatible
22 # class Article(models.Model):
23 # title = models.CharField(max_length=200)
24 # news_website = models.ForeignKey(NewsWebsite)
25 # description = models.TextField(blank=True)
26 # url = models.URLField(blank=True)
27 # thumbnail = models.CharField(max_length=200, blank=True)
28 # checker_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
29
30 # def __str__(self):
31 # return self.title
32
33
34 # class ArticleItem(DjangoItem):
35 # django_model = Article
36
37
38 # @receiver(pre_delete)
39 # def pre_delete_handler(sender, instance, using, **kwargs):
40 # if isinstance(instance, NewsWebsite):
41 # if instance.scraper_runtime:
42 # instance.scraper_runtime.delete()
43
44 # if isinstance(instance, Article):
45 # if instance.checker_runtime:
46 # instance.checker_runtime.delete()
47
48 # pre_delete.connect(pre_delete_handler) | Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 import re
3 import json
4 import scrapy
5 import copy
6 from articles.items import PmArticlesItem
7 from articles.utils.common import date_convert
8
9
10 class PmSpiderSpider(scrapy.Spider):
11 name = 'pm_spider'
12 allowed_domains = ['woshipm.com']
13 # start_urls = ['http://www.woshipm.com/__api/v1/stream-list/page/1']
14 base_url = 'http://www.woshipm.com/__api/v1/stream-list/page/{}'
15
16 def start_requests(self):
17 for i in range(1, 10):
18 url = self.base_url.format(i)
19 yield scrapy.Request(url=url, callback=self.parse)
20
21 def parse(self, response):
22 item = PmArticlesItem()
23 # print(response.text)
24 data_set = json.loads(response.text)
25 # print(datas.get('payload'))
26 if data_set:
27 for data in data_set.get('payload'):
28 # print(data)
29 item["title"] = data.get("title", '')
30 item["create_date"] = date_convert(data.get("date", ''))
31 item["url"] = data.get("permalink", '')
32 # item["content"] = data.get("snipper", '').replace('\n', '').replace('\r', '')
33 item["view"] = data.get("view", '')
34 item["tag"] = re.search(r'tag">(.*?)<', data.get("category", '')).group(1)
35 item["url_id"] = data.get('id', '')
36 # print(item)
37 yield scrapy.Request(url=item["url"], callback=self.parse_detail, meta=copy.deepcopy({'item': item}))
38
39 def parse_detail(self, response):
40 item = response.meta['item']
41 content = response.xpath("//div[@class='grap']//text()").re(r'\S+')
42 item["content"] = ''.join(content)
43 # print(item)
44 yield item
45
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2
3 # Define here the models for your scraped items
4 #
5 # See documentation in:
6 # https://docs.scrapy.org/en/latest/topics/items.html
7
8 import redis
9 import scrapy
10 import datetime
11 from scrapy.loader.processors import MapCompose
12 from articles.model.es_types import ArticleType
13
14 from elasticsearch_dsl.connections import connections
15 es = connections.create_connection(ArticleType._doc_type.using)
16
17 redis_cli = redis.StrictRedis()
18
19
20 def gen_suggests(index, info_tuple):
21 # 根据字符串生成搜索建议数组
22 used_words = set()
23 suggests = []
24 for text, weight in info_tuple:
25 if text:
26 # 调用es的analyze接口分析字符串
27 words = es.indices.analyze(index=index, analyzer="ik_max_word", params={'filter': ["lowercase"]}, body=text)
28 anylyzed_words = set([r["token"] for r in words["tokens"] if len(r["token"]) > 1])
29 new_words = anylyzed_words - used_words
30 else:
31 new_words = set()
32
33 if new_words:
34 suggests.append({"input": list(new_words), "weight": weight})
35
36 return suggests
37
38
39 class PmArticlesItem(scrapy.Item):
40 # define the fields for your item here like:
41 title = scrapy.Field()
42 create_date = scrapy.Field()
43 url = scrapy.Field()
44 content = scrapy.Field()
45 view = scrapy.Field()
46 tag = scrapy.Field()
47 url_id = scrapy.Field()
48
49 def save_to_es(self):
50 article = ArticleType()
51 article.title = self['title']
52 article.create_date = self["create_date"]
53 article.content = self["content"]
54 article.url = self["url"]
55 article.view = self["view"]
56 article.tag = self["tag"]
57 article.meta.id = self["url_id"]
58
59 article.suggest = gen_suggests(ArticleType._doc_type.index, ((article.title, 10), (article.tag, 7)))
60
61 article.save()
62
63 redis_cli.incr("pm_count") # redis存储爬虫数量
64
65 return
66
| 15 - warning: protected-access
28 - refactor: consider-using-set-comprehension
59 - warning: protected-access
49 - refactor: useless-return
39 - refactor: too-few-public-methods
10 - warning: unused-import
11 - warning: unused-import
|
1 from django.urls import path
2 from . import views
3 from django.conf.urls import include, url
4 from django.views import generic
5 from material.frontend import urls as frontend_urls
6
7 urlpatterns = [
8 path('', views.home, name='home'),
9 path('$/', generic.RedirectView.as_view(url='/workflow/', permanent=False)),
10 path('/', include(frontend_urls)),
11 ]
12
13
14 # Viewflow PRO Feature Set
15
16 # Celery integration
17 # django-guardian integration
18 # Flow graph visualization
19 # Flow BPMN export
20 # Material Frontend
21
22 # Process dashboard view
23 # Flow migration support
24 # Subprocess support
25 # REST API support
26
| 2 - error: no-name-in-module
3 - warning: unused-import
|
1 from django.shortcuts import render
2 from .forms import SearchForm
3 import requests
4 def base(request):
5 # import requests
6
7 # # url = "https://gplaystore.p.rapidapi.com/newFreeApps"
8 # url="https://libraries.io/api/"
9 # querystring = {"platforms":"NPM/base62"}
10
11 # headers = {'x-rapidapi-key': "?api_key=306cf1684a42e4be5ec0a1c60362c2ef'" }
12
13 # response = requests.request("GET", url, headers=headers, params=querystring)
14
15 # print(response.text)
16
17 return render(request, 'base.html'
18 )
19
20 def home(request):
21
22
23
24 # Platforms=(' https://libraries.io/api/platforms?api_key=306cf1684a42e4be5ec0a1c60362c2ef')
25 # Project=('https://libraries.io/api/NPM/base62?api_key=306cf1684a42e4be5ec0a1c60362c2ef')
26
27 # url=requests()
28 # url='https://libraries.io/api/:platform/:name/dependent_repositories?api_key=306cf1684a42e4be5ec0a1c60362c2ef'
29 # url=requests.get('https://libraries.io/api/github/librariesio/repositories?api_key=306cf1684a42e4be5ec0a1c60362c2ef')
30 url=requests.get('https://libraries.io/api/platforms?api_key=306cf1684a42e4be5ec0a1c60362c2ef')
31
32 form=url.json()
33 return render(request, 'index.html',{
34 'form':form
35 }
36 )
37
38
39 def Search(request):
40 # form= SearchForm()
41 # query=None
42 # results=[]
43
44 # # if 'query' in requests.GET:
45 # # form=SearchForm(request.GET)
46 # # if form.is_valid():
47 # # query=form.cleaned_data['query']
48 # # results=Post.published.annotate(
49 # # search =SearchVector('title','body'),
50 # # ).filter(search=query)
51 r=requests.get('https://libraries.io/api/search?q=&api_key=306cf1684a42e4be5ec0a1c60362c2ef')
52
53 dr=r.json()
54 return render(request, 'Search.html',{
55 'search':dr
56 }
57 )
58
59
60
61 # def post_search(request):
62 # form= SearchForm()
63
64 # payload={'key1':'search?q=','key2':['form','&api_key=306cf1684a42e4be5ec0a1c60362c2ef']}
65
66 # url=requests.get=('https://libraries.io/api/get',params=payload)
67 # # results=[]
68 # # if 'query' in request.GET:
69 # # form=SearchForm(
70 # # if form.is_valid():
71 # # query=form.cleaned_data['query']
72 # # results=Post.published.annotate(
73 # # search =SearchVector('title','body'),
74 # # ).filter(search=query)
75 # return render(request,'search.html',{
76 # 'url':url,
77 # # 'query':query,
78 # # 'results':results
79 # })
80
81
| 17 - warning: bad-indentation
2 - error: relative-beyond-top-level
30 - warning: missing-timeout
51 - warning: missing-timeout
2 - warning: unused-import
|
1 from django.http.response import HttpResponse
2 from requests_oauthlib import OAuth2Session
3
4
5 import json
6
7 import requests_oauthlib
8 from django.HttpResponse import request
9 import requests
10 from django.shortcuts import redirect, session,
11
12 # payload={'key1':'search?q=','key2':['form','&api_key=306cf1684a42e4be5ec0a1c60362c2ef']}
13 # client_id = '&api_key=306cf1684a42e4be5ec0a1c60362c2ef'
14 client_id = "<your client key>"
15 client_secret = "<your client secret>"
16 authorization_base_url = 'https://github.com/login/oauth/authorize'
17 token_url = 'https://github.com/login/oauth/access_token'
18
19
20
21 @app.route("/login")
22 def login():
23 github = OAuth2Session(client_id)
24 authorization_url, state = github.authorization_url(authorization_base_url)
25
26 # State is used to prevent CSRF, keep this for later.
27 session['oauth_state'] = state
28 return redirect(authorization_url)
29
30
31
32 @app.route("/callback")
33 def callback():
34 github = OAuth2Session(client_id, state=session['oauth_state'])
35 token = github.fetch_token(token_url, client_secret=client_secret,
36 authorization_response=request.url)
37
38 return json(github.get('https://api.github.com/user').json()) | 10 - error: syntax-error
|
1 #rabbitmq and mongodb settings
2 SCHEDULER = ".rabbitmq.scheduler.Scheduler"
3 SCHEDULER_PERSIST = True
4 RABBITMQ_HOST = 'ip address'
5 RABBITMQ_PORT = 5672
6 RABBITMQ_USERNAME = 'guest'
7 RABBITMQ_PASSWORD = 'guest'
8
9 MONGODB_PUBLIC_ADDRESS = 'ip:port' # This will be shown on the web interface, but won't be used for connecting to DB
10 MONGODB_URI = 'ip:port' # Actual uri to connect to DB
11 MONGODB_USER = ''
12 MONGODB_PASSWORD = ''
13 MONGODB_SHARDED = False
14 MONGODB_BUFFER_DATA = 100
15
16 LINK_GENERATOR = 'http://192.168.0.209:6800' # Set your link generator worker address here
17 SCRAPERS = ['http://192.168.0.210:6800',
18 'http://192.168.0.211:6800', 'http://192.168.0.212:6800'] # Set your scraper worker addresses here
19
20 LINUX_USER_CREATION_ENABLED = False # Set this to True if you want a linux user account created during registration
| Clean Code: No Issues Detected
|
1 from django.urls import path
2 from . import views
3
4
5
6 urlpatterns = [
7 path('', views.api, name='api'),
8 path('t/', views.simple_upload, name='test'),
9
10 ]
| 2 - error: no-name-in-module
|
1 # -*- coding: utf-8 -*-
2
3 try:
4 import pika
5 except ImportError:
6 raise ImportError("Please install pika before running scrapy-rabbitmq.")
7
8
9 RABBITMQ_CONNECTION_TYPE = 'blocking'
10 RABBITMQ_CONNECTION_PARAMETERS = {'host': 'localhost'}
11
12
13 def from_settings(settings, spider_name):
14
15 connection_type = settings.get('RABBITMQ_CONNECTION_TYPE',
16 RABBITMQ_CONNECTION_TYPE)
17 queue_name = "%s:requests" % spider_name
18 connection_host = settings.get('RABBITMQ_HOST')
19 connection_port = settings.get('RABBITMQ_PORT')
20 connection_username = settings.get('RABBITMQ_USERNAME')
21 connection_pass = settings.get('RABBITMQ_PASSWORD')
22
23 connection_attempts = 5
24 retry_delay = 3
25
26 credentials = pika.PlainCredentials(connection_username, connection_pass)
27
28 connection = {
29 'blocking': pika.BlockingConnection,
30 'libev': pika.LibevConnection,
31 'select': pika.SelectConnection,
32 'tornado': pika.TornadoConnection,
33 'twisted': pika.TwistedConnection
34 }[connection_type](pika.ConnectionParameters(host=connection_host,
35 port=connection_port, virtual_host='/',
36 credentials=credentials,
37 connection_attempts=connection_attempts,
38 retry_delay=retry_delay))
39
40 channel = connection.channel()
41 channel.queue_declare(queue=queue_name, durable=True)
42
43 return channel
44
45
46 def close(channel):
47 channel.close()
| 6 - warning: raise-missing-from
|
1 # -*- coding: utf-8 -*-
2
3 #如果没有下一页的地址则返回none
4 list_first_item = lambda x:x[0] if x else None
| Clean Code: No Issues Detected
|
1 from django.conf.urls import url, include
2 import oauth2_provider.views as oauth2_views
3 from django.conf import settings
4 from .views import ApiEndpoint
5 from django.urls import include, path
6
7 # OAuth2 provider endpoints
8 oauth2_endpoint_views = [
9 path('authorize/', oauth2_views.AuthorizationView.as_view(), name="authorize"),
10 path('token/', oauth2_views.TokenView.as_view(), name="token"),
11 path('revoke-token/', oauth2_views.RevokeTokenView.as_view(), name="revoke-token"),
12 ]
13
14 if settings.DEBUG:
15 # OAuth2 Application Management endpoints
16 oauth2_endpoint_views += [
17 path('applications/', oauth2_views.ApplicationList.as_view(), name="list"),
18 path('applications/register/', oauth2_views.ApplicationRegistration.as_view(), name="register"),
19 path('applications/<pk>/', oauth2_views.ApplicationDetail.as_view(), name="detail"),
20 path('applications/<pk>/delete/', oauth2_views.ApplicationDelete.as_view(), name="delete"),
21 path('applications/<pk>/update/', oauth2_views.ApplicationUpdate.as_view(), name="update"),
22 ]
23
24 # OAuth2 Token Management endpoints
25 oauth2_endpoint_views += [
26 path('authorized-tokens/', oauth2_views.AuthorizedTokensListView.as_view(), name="authorized-token-list"),
27 path('authorized-tokens/<pk>/delete/', oauth2_views.AuthorizedTokenDeleteView.as_view(),
28 name="authorized-token-delete"),
29 ]
30
31 urlpatterns = [
32 # OAuth 2 endpoints:
33 path('o/', include(oauth2_endpoint_views, namespace="oauth2_provider")),
34 path('api/hello', ApiEndpoint.as_view()), # an example resource endpoint
35 ]
| 4 - error: relative-beyond-top-level
5 - warning: reimported
1 - warning: unused-import
|
1 # Generated by Django 3.1.3 on 2020-11-13 06:33
2
3 from django.db import migrations, models
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('core', '0003_auto_20201113_0620'),
10 ]
11
12 operations = [
13 migrations.AddField(
14 model_name='feeds',
15 name='description',
16 field=models.TextField(blank=True),
17 ),
18 migrations.AlterField(
19 model_name='feeds',
20 name='overview',
21 field=models.TextField(max_length=20),
22 ),
23 ]
| 6 - refactor: too-few-public-methods
|
1 from django import forms
2
3 from .models import Products
4
5
6
7
8
9
10 class productForm(forms.ModelForm):
11 class Meta:
12 model=Products
13 fields=['title','description','price'] | 11 - warning: bad-indentation
12 - warning: bad-indentation
13 - warning: bad-indentation
3 - error: relative-beyond-top-level
11 - refactor: too-few-public-methods
10 - refactor: too-few-public-methods
|
1 import http.client
2
3 conn = http.client.HTTPSConnection("bloomberg-market-and-financial-news.p.rapidapi.com")
4
5 headers = {
6 'x-rapidapi-key': "bd689f15b2msh55122d4390ca494p17cddcjsn225c43ecc6d4",
7 'x-rapidapi-host': "bloomberg-market-and-financial-news.p.rapidapi.com"
8 }
9
10 conn.request("GET", "/market/get-cross-currencies?id=aed%2Caud%2Cbrl%2Ccad%2Cchf%2Ccnh%2Ccny%2Ccop%2Cczk%2Cdkk%2Ceur%2Cgbp%2Chkd%2Chuf%2Cidr%2Cils%2Cinr%2Cjpy%2Ckrw%2Cmxn%2Cmyr%2Cnok%2Cnzd%2Cphp%2Cpln%2Crub%2Csek%2Csgd%2Cthb%2Ctry%2Ctwd%2Cusd%2Czar", headers=headers)
11
12 res = conn.getresponse()
13 data = res.read()
14
15
16 # print(data.decode("utf-8"))
17 print(data.json()) | Clean Code: No Issues Detected
|
1 from django import forms
2
3
4 #Building a search view
5
6
7
8 class SearchForm(forms.Form):
9 query =forms.CharField()
10
11
12 class uploadForm(forms.ModelForm):
13 images=forms.ImageField()
14
15
16
17 # # from .forms import EmailPostForm, CommentForm , SearchForm
18 # User Repositories='https://libraries.io/api/github/:login/repositories?api_key=306cf1684a42e4be5ec0a1c60362c2ef'
19 # user=' https://libraries.io/api/github/andrew?api_key=306cf1684a42e4be5ec0a1c60362c2ef'
20 # Repository=' https://libraries.io/api/github/:owner/:name?api_key=306cf1684a42e4be5ec0a1c60362c2ef'
21 # =' https://libraries.io/api/github/gruntjs/grunt/projects?api_key=306cf1684a42e4be5ec0a1c60362c2ef '
22 # ProjectSearch=' https://libraries.io/api/search?q=grunt&api_key=306cf1684a42e4be5ec0a1c60362c2ef'
23 # Platforms= ' GET https://libraries.io/api/platforms?api_key=306cf1684a42e4be5ec0a1c60362c2ef '
24 # https://libraries.io/api/NPM/base62?api_key=306cf1684a42e4be5ec0a1c60362c2ef '
25
26 # ProjectDependen https://libraries.io/api/:platform/:name/:version/dependencies?api_key=306cf1684a42e4be5ec0a1c60362c2ef'
27 # ' https://libraries.io/api/NPM/base62/2.0.1/dependencies?api_key=306cf1684a42e4be5ec0a1c60362c2ef '
28 # DependentReposito= https://libraries.io/api/NPM/base62/dependent_repositories?api_key=306cf1684a42e4be5ec0a1c60362c2ef '
29 # ProjectContributo= https://libraries.io/api/NPM/base62/contributors?api_key=306cf1684a42e4be5ec0a1c60362c2ef '
30 # ProjectSourceRank='https://libraries.io/api/NPM/base62/sourcerank?api_key=306cf1684a42e4be5ec0a1c60362c2ef' | 8 - refactor: too-few-public-methods
12 - refactor: too-few-public-methods
|
1 from django.conf.urls import url
2 from . import views
3
4 urlpatterns = [
5 url('api/', views.apiurl, name='index'),
6
7 ] | 2 - error: no-name-in-module
|
1 """mysite URL Configuration
2
3 The `urlpatterns` list routes URLs to views. For more information please see:
4 https://docs.djangoproject.com/en/1.8/topics/http/urls/
5 Examples:
6 Function views
7 1. Add an import: from my_app import views
8 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
9 Class-based views
10 1. Add an import: from other_app.views import Home
11 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
12 Including another URLconf
13 1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
14 """
15 from django.conf.urls import include, url
16 from django.contrib import admin
17 from mysite.views import custom_login, custom_register
18 from django.contrib.auth.views import logout
19 import scrapyproject.urls as projecturls
20
21 urlpatterns = [
22 url(r'^admin/', include(admin.site.urls)),
23 url(r'^accounts/login/$', custom_login, name='login'),
24 url(r'^accounts/register/$', custom_register, name='registration_register'),
25 url(r'^accounts/logout/$', logout, {'next_page': '/project'}, name='logout'),
26 url(r'^project/', include(projecturls)),
27 ]
| Clean Code: No Issues Detected
|
1 import collections
2 from scrapy.exceptions import DropItem
3 from scrapy.exceptions import DropItem
4
5 import pymongo
6
7 class TutoPipeline(object):
8 vat=2.55
9
10 def process_item(self, item, spider):
11 if item["price"]:
12 if item['exclues_vat']:
13 item['price']= item['price']*self.vat
14 return item
15
16 else:
17 raise DropItem("missing price in %s"% item)
18
19 return item
20
21
22
23 class MongoPipline(object):
24 collections_name='scrapy_list'
25
26 def __init__(self,mongo_uri,mongo_db):
27 self.mongo_uri= mongo_uri
28 self.mongo_db=mongo_db
29
30 @classmethod
31 def from_crewler(cls,crawler):
32 return cls(
33 mongo_uri=crawler.settings.get('MONGO_URI'),
34
35 mongo_db=crawler.settings.get('MONGO_DB','Lists')
36 )
37
38 def open_spider(self,spider):
39 self.client=pymongo.MongoClient(self.mongo_uri)
40 self.db=self.client[self.mongo_db]
41
42
43 def close_spider(self,spider):
44 self.client.close()
45
46
47 def process_item(self,item,spider):
48 self.db[self.collection_name].insert(dict(item))
49 return item
50
51 # You can specify the MongoDB address and
52 # database name in Scrapy settings and MongoDB
53 # collection can be named after the item class.
54 # The following code describes
55 # how to use from_crawler() method to collect the resources properly −
56
57
58
59 class DuplicatePiline(object):
60 def __init__(self):
61 self.ids_seen=set()
62
63
64 def process_item(self,item,spider):
65 if item['id' ] in self.ids_seen:
66 raise DropItem("Repacted Item Found:%s"%item)
67
68 else:
69 self.ids_seen.add(item['id'])
70
71 return item
72
| 3 - warning: reimported
7 - refactor: useless-object-inheritance
12 - refactor: no-else-return
10 - warning: unused-argument
7 - refactor: too-few-public-methods
23 - refactor: useless-object-inheritance
38 - warning: unused-argument
43 - warning: unused-argument
48 - error: no-member
47 - warning: unused-argument
39 - warning: attribute-defined-outside-init
40 - warning: attribute-defined-outside-init
59 - refactor: useless-object-inheritance
65 - refactor: no-else-raise
64 - warning: unused-argument
59 - refactor: too-few-public-methods
1 - warning: unused-import
|
1 import logging
2 import scrapy
3
4 logger = logging.getLogger('mycustomlogger')
5
6 class MySpider(scrapy.Spider):
7
8 name = 'myspider1'
9 start_urls = ['https://scrapinghub.com']
10
11 def parse(self, response):
12 logger.info('Parse function called on %s', response.url) | 6 - refactor: too-few-public-methods
|
1 import scrapy
2
3
4 class PySpider(scrapy.Spider):
5 name = 'quots'
6 # start_urls = [
7 def start_requests(self):
8 urls=['https://pypi.org/']
9
10
11 for url in urls:
12 yield scrapy.Request(url=url, callback=self.parse)
13
14
15 # return super().start_requests()()
16
17 def parse(self, response):
18 page=response.url.split("/")[-0]
19 response.xpath('/html/body/main/div[4]/div/text()').get()
20
21
22 filename=f'pyp-{page}.html'
23
24
25 with open (filename,'wb')as f:
26 f.write(response.body)
27 self.log(f'saved file{filename}')
28
29
30 # return super().parse(response) | Clean Code: No Issues Detected
|
1 from django import forms
2
3 from core.models import Comment
4
5 #Building a search view
6 class SearchForm(forms.Form):
7 query =forms.CharField()
8
9
10
11 class EmailPostForm(forms.Form):
12 name = forms.CharField(max_length=25)
13 email = forms.EmailField()
14 to = forms.EmailField()
15 comments = forms.CharField(required=False,
16 widget=forms.Textarea)
17
18
19 class CommentForm(forms.ModelForm):
20 url = forms.URLField(label='网址', required=False)
21 email = forms.EmailField(label='电子邮箱', required=True)
22 name = forms.CharField(
23 label='姓名',
24 widget=forms.TextInput(
25 attrs={
26 'value': "",
27 'size': "30",
28 'maxlength': "245",
29 'aria-required': 'true'}))
30 parent_comment_id = forms.IntegerField(
31 widget=forms.HiddenInput, required=False)
32
33 class Meta:
34 model = Comment
35 fields = ['body'] | 6 - refactor: too-few-public-methods
11 - refactor: too-few-public-methods
33 - refactor: too-few-public-methods
19 - refactor: too-few-public-methods
|
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3 from mongoengine import *
4 from django.db import models
5
6 # Create your models here.
7 class ItemInfo(Document):
8 # 帖子名称
9 title = StringField()
10 # 租金
11 money = StringField()
12 # 租赁方式
13 method = StringField()
14 # 所在区域
15 area = StringField()
16 # 所在小区
17 community = StringField()
18 # 帖子详情url
19 targeturl = StringField()
20 # 帖子发布时间
21 pub_time = StringField()
22 # 所在城市
23 city = StringField()
24 phone = StringField()
25 img1= StringField()
26 img2 = StringField()
27 #指定是数据表格
28 meta={'collection':'zufang_detail'}
| 3 - warning: wildcard-import
7 - error: undefined-variable
9 - error: undefined-variable
11 - error: undefined-variable
13 - error: undefined-variable
15 - error: undefined-variable
17 - error: undefined-variable
19 - error: undefined-variable
21 - error: undefined-variable
23 - error: undefined-variable
24 - error: undefined-variable
25 - error: undefined-variable
26 - error: undefined-variable
7 - refactor: too-few-public-methods
4 - warning: unused-import
|
1 from django.shortcuts import render
2 from urllib.request import urlopen
3 from django.shortcuts import render
4 from django.views import View
5 import requests
6
7 # class apiurl(View):
8
9 def apiurl(request):
10 url =requests('https://api.github.com/')
11
12 data=url.requests.json()
13 context ={
14 'data':data
15 }
16
17 return render(request,'index.html', context)
18
19
| 3 - warning: reimported
10 - error: not-callable
2 - warning: unused-import
4 - warning: unused-import
|
1 # -*- coding: utf-8 -*-
2 """
3 -------------------------------------------------
4 File Name: urls.py
5 Description :
6 Author : JHao
7 date: 2017/4/13
8 -------------------------------------------------
9 Change Activity:
10 2017/4/13:
11 -------------------------------------------------
12 """
13 __author__ = 'JHao'
14
15 from blog import views
16 from django.urls import path
17
18 urlpatterns = [
19 path('', views.index, name='index'),
20 path('list/', views.blog_list, name='list'),
21 path('tag/<str:name>/', views.tag, name='tag'),
22 path('category/<str:name>/', views.category, name='category'),
23 path('detail/<int:pk>/', views.detail, name='detail'),
24 path('archive/', views.archive, name='archive'),
25 path('search/', views.search, name='search'),
26 path('message/', views.message, name='message'),
27 path('getComment/', views.get_comment, name='get_comment'),
28
29 ]
30
| Clean Code: No Issues Detected
|
1 from fpdf import FPDF
2 from PIL import Image
3 import you
4 import os
5 pdf = FPDF ()
6 imagelist = [] # Contains the list of all images to be converted to PDF.
7
8
9 # --------------- USER INPUT -------------------- #
10
11 folder = "/home/rudi/Documents/Pictures/1.png" # Folder containing all the images.
12 name = "pdf" # Name of the output PDF file.
13
14
15 # ------------- ADD ALL THE IMAGES IN A LIST ------------- #
16
17 for dirpath , dirnames , filenames in os . walk ( folder ):
18 for filename in [ f for f in filenames if f . endswith ( ".jpg" )]:
19 full_path = os . path . join ( dirpath , filename )
20 imagelist . append ( full_path )
21
22 imagelist . sort () # Sort the images by name.
23 for i in range ( 0 , len ( imagelist )):
24 print ( imagelist [ i ])
25
26 # --------------- ROTATE ANY LANDSCAPE MODE IMAGE IF PRESENT ----------------- #
27
28 for i in range ( 0 , len ( imagelist )):
29 im1 = Image . open ( imagelist [ i ]) # Open the image.
30 width , height = im1 . size # Get the width and height of that image.
31 if width > height :
32 im2 = im1 . transpose ( Image . ROTATE_270 ) # If width > height, rotate the image.
33 os . remove ( imagelist [ i ]) # Delete the previous image.
34 im2 . save ( imagelist [ i ]) # Save the rotated image.
35 # im.save
36
37 print ( " \n Found " + str ( len ( imagelist )) + " image files. Converting to PDF.... \n " )
38
39
40 # -------------- CONVERT TO PDF ------------ #
41
42 for image in imagelist :
43 pdf . add_page ()
44 pdf . image ( image , 0 , 0 , 210 , 297 ) # 210 and 297 are the dimensions of an A4 size sheet.
45
46 pdf . output ( folder + name , "F" ) # Save the PDF.
47
48 print ( "PDF generated successfully!" ) | 32 - error: no-member
3 - warning: unused-import
|
1 # -*- coding: utf-8 -*-
2 # Create your views here.
3
4 import json
5 from django.http import JsonResponse
6 from django_blog.util import PageInfo
7 from blog.models import Article, Comment
8 from django.views.decorators.csrf import csrf_exempt
9 from django.shortcuts import render, get_object_or_404
10
11
12 def get_page(request):
13 page_number = request.GET.get("page")
14 return 1 if not page_number or not page_number.isdigit() else int(page_number)
15
16
17 def index(request):
18 _blog_list = Article.objects.all().order_by('-date_time')[0:5]
19 _blog_hot = Article.objects.all().order_by('-view')[0:6]
20 return render(request, 'blog/index.html', {"blog_list": _blog_list, "blog_hot": _blog_hot})
21
22
23 def blog_list(request):
24 """
25 列表
26 :param request:
27 :return:
28 """
29 page_number = get_page(request)
30 blog_count = Article.objects.count()
31 page_info = PageInfo(page_number, blog_count)
32 _blog_list = Article.objects.all()[page_info.index_start: page_info.index_end]
33 return render(request, 'blog/list.html', {"blog_list": _blog_list, "page_info": page_info})
34
35
36 def category(request, name):
37 """
38 分类
39 :param request:
40 :param name:
41 :return:
42 """
43 page_number = get_page(request)
44 blog_count = Article.objects.filter(category__name=name).count()
45 page_info = PageInfo(page_number, blog_count)
46 _blog_list = Article.objects.filter(category__name=name)[page_info.index_start: page_info.index_end]
47 return render(request, 'blog/category.html', {"blog_list": _blog_list, "page_info": page_info,
48 "category": name})
49
50
51 def tag(request, name):
52 """
53 标签
54 :param request:
55 :param name
56 :return:
57 """
58 page_number = get_page(request)
59 blog_count = Article.objects.filter(tag__tag_name=name).count()
60 page_info = PageInfo(page_number, blog_count)
61 _blog_list = Article.objects.filter(tag__tag_name=name)[page_info.index_start: page_info.index_end]
62 return render(request, 'blog/tag.html', {"blog_list": _blog_list,
63 "tag": name,
64 "page_info": page_info})
65
66
67 def archive(request):
68 """
69 文章归档
70 :param request:
71 :return:
72 """
73 _blog_list = Article.objects.values("id", "title", "date_time").order_by('-date_time')
74 archive_dict = {}
75 for blog in _blog_list:
76 pub_month = blog.get("date_time").strftime("%Y年%m月")
77 if pub_month in archive_dict:
78 archive_dict[pub_month].append(blog)
79 else:
80 archive_dict[pub_month] = [blog]
81 data = sorted([{"date": _[0], "blogs": _[1]} for _ in archive_dict.items()], key=lambda item: item["date"],
82 reverse=True)
83 return render(request, 'blog/archive.html', {"data": data})
84
85
86 def message(request):
87 return render(request, 'blog/message_board.html', {"source_id": "message"})
88
89
90 @csrf_exempt
91 def get_comment(request):
92 """
93 接收畅言的评论回推, post方式回推
94 :param request:
95 :return:
96 """
97 arg = request.POST
98 data = arg.get('data')
99 data = json.loads(data)
100 title = data.get('title')
101 url = data.get('url')
102 source_id = data.get('sourceid')
103 if source_id not in ['message']:
104 article = Article.objects.get(pk=source_id)
105 article.commenced()
106 comments = data.get('comments')[0]
107 content = comments.get('content')
108 user = comments.get('user').get('nickname')
109 Comment(title=title, source_id=source_id, user_name=user, url=url, comment=content).save()
110 return JsonResponse({"status": "ok"})
111
112
113 def detail(request, pk):
114 """
115 博文详情
116 :param request:
117 :param pk:
118 :return:
119 """
120 blog = get_object_or_404(Article, pk=pk)
121 blog.viewed()
122 return render(request, 'blog/detail.html', {"blog": blog})
123
124
125 def search(request):
126 """
127 搜索
128 :param request:
129 :return:
130 """
131 key = request.GET['key']
132 page_number = get_page(request)
133 blog_count = Article.objects.filter(title__icontains=key).count()
134 page_info = PageInfo(page_number, blog_count)
135 _blog_list = Article.objects.filter(title__icontains=key)[page_info.index_start: page_info.index_end]
136 return render(request, 'blog/search.html', {"blog_list": _blog_list, "pages": page_info, "key": key})
137
138
139 def page_not_found_error(request, exception):
140 return render(request, "404.html", status=404)
141
142
143 def page_error(request):
144 return render(request, "404.html", status=500)
| 139 - warning: unused-argument
|
1 # -*- coding: utf-8 -*-
2 """
3 -------------------------------------------------
4 File Name: blogroll
5 Description :
6 Author : JHao
7 date: 2020/10/9
8 -------------------------------------------------
9 Change Activity:
10 2020/10/9:
11 -------------------------------------------------
12 """
13 __author__ = 'JHao'
14
15 sites = [
16 {"url": "https://www.zaoshu.io/", "name": "造数", "desc": "智能云爬虫"},
17 {"url": "http://brucedone.com/", "name": "大鱼的鱼塘", "desc": "大鱼的鱼塘 - 一个总会有收获的地方"},
18 {"url": "http://www.songluyi.com/", "name": "灯塔水母", "desc": "灯塔水母"},
19 {"url": "http://blog.topspeedsnail.com/", "name": "斗大的熊猫", "desc": "本博客专注于技术,Linux,编程,Python,C,Ubuntu、开源软件、Github等"},
20 {"url": "https://www.urlteam.org/", "name": "URL-team", "desc": "URL-team"},
21 ] | Clean Code: No Issues Detected
|
1 from django.contrib import admin
2 from .models import Project, Item, Field, Pipeline
3
4 # Register your models here.
5 admin.site.register(Project)
6 admin.site.register(Item)
7 admin.site.register(Field)
8 admin.site.register(Pipeline) | 2 - error: relative-beyond-top-level
|
1 # -*- coding: utf-8 -*-
2 """
3 -------------------------------------------------
4 File Name: context_processors.py
5 Description :
6 Author : JHao
7 date: 2017/4/14
8 -------------------------------------------------
9 Change Activity:
10 2017/4/14:
11 -------------------------------------------------
12 """
13 __author__ = 'JHao'
14
15 import importlib
16 from django_blog import blogroll
17 from blog.models import Category, Article, Tag, Comment
18
19
20 def sidebar(request):
21 category_list = Category.objects.all()
22 # 所有类型
23
24 blog_top = Article.objects.all().values("id", "title", "view").order_by('-view')[0:6]
25 # 文章排行
26
27 tag_list = Tag.objects.all()
28 # 标签
29
30 comment = Comment.objects.all().order_by('-create_time')[0:6]
31 # 评论
32
33 importlib.reload(blogroll)
34 # 友链
35
36 return {
37 'category_list': category_list,
38 'blog_top': blog_top,
39 'tag_list': tag_list,
40 'comment_list': comment,
41 'blogroll': blogroll.sites
42
43 }
44
45
46 if __name__ == '__main__':
47 pass
| 20 - warning: unused-argument
|
1 # -*- coding: utf-8 -*-
2
3 # Define your item pipelines here
4 #
5 # Don't forget to add your pipeline to the ITEM_PIPELINES setting
6 # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
7
8 import logging
9 # import MySQLdb
10 # import MySQLdb.cursors
11 import copy
12 import pymysql
13 from twisted.enterprise import adbapi
14
15
16 # class ArticlesPipeline(object):
17 # def process_item(self, item, spider):
18 # return item
19
20
21 class MysqlTwistedPipeline(object):
22 def __init__(self, dbpool):
23 self.dbpool = dbpool
24
25 @classmethod
26 def from_settings(cls, settings): # 函数名固定,会被scrapy调用,直接可用settings的值
27 """
28 数据库建立连接
29 :param settings: 配置参数
30 :return: 实例化参数
31 """
32 adbparams = dict(
33 host=settings['MYSQL_HOST'],
34 db=settings['MYSQL_DBNAME'],
35 user=settings['MYSQL_USER'],
36 password=settings['MYSQL_PASSWORD'],
37 cursorclass=pymysql.cursors.DictCursor # 指定cursor类型
38 )
39
40 # 连接数据池ConnectionPool,使用pymysql或者Mysqldb连接
41 dbpool = adbapi.ConnectionPool('pymysql', **adbparams)
42 # 返回实例化参数
43 return cls(dbpool)
44
45 def process_item(self, item, spider):
46 """
47 使用twisted将MySQL插入变成异步执行。通过连接池执行具体的sql操作,返回一个对象
48 """
49 # 防止入库速度过慢导致数据重复
50 item = copy.deepcopy(item)
51 query = self.dbpool.runInteraction(self.do_insert, item) # 指定操作方法和操作数据
52 # 添加异常处理
53 query.addCallback(self.handle_error) # 处理异常
54
55 def do_insert(self, cursor, item):
56 # 对数据库进行插入操作,并不需要commit,twisted会自动commit
57 insert_sql = """
58 insert into pm_article(title, create_date, url, content, view, tag, url_id) VALUES (%s, %s, %s, %s, %s, %s, %s)
59 """
60 cursor.execute(insert_sql, (item['title'], item['create_date'], item['url'],
61 item['content'], item['view'], item['tag'], item['url_id']))
62
63 def handle_error(self, failure):
64 if failure:
65 # 打印错误信息
66 print(failure)
67
68
69 class ElasticsearchPipeline(object):
70 # 将数据写入到es中
71 def process_item(self, item, spider):
72 # 将item转换为es的数据
73 item.save_to_es()
74
75 return item
| 21 - refactor: useless-object-inheritance
32 - refactor: use-dict-literal
45 - warning: unused-argument
69 - refactor: useless-object-inheritance
71 - warning: unused-argument
69 - refactor: too-few-public-methods
8 - warning: unused-import
|
1 from django.urls import path
2 from .views import (
3 PostListView,
4 PostDetailView,
5 # PostCreateView,
6 # PostUpdateView,
7 # PostDeleteView,
8 # UserPostListView
9 )
10 from . import views
11
12 from .feeds import LatestPostsFeed
13
14 urlpatterns = [
15 path('', views.home, name='home'),
16 path('blogs/', views.PostListView.as_view(), name='post_list'),
17 path('blog/<int:pk>/', PostDetailView.as_view(), name='post-detail'),
18 path('about/', views.about, name='about'),
19 path('<int:post_id>/share/',views.post_share, name='post_share'),
20 path('feed/', LatestPostsFeed(), name='post_feed'),
21 path('search/', views.post_search, name='post_search'),
22
23 path('api/', views.post_api, name='post_api'),
24
25 path('blog/', views.post_list, name='post_list'),
26 path('<int:year>/<slug:post>/',
27 views.post_detail,
28 name='post_detail'),
29 path('tag/<slug:tag_slug>/',
30 views.post_list, name='post_list_by_tag'),
31 ]
| 2 - error: relative-beyond-top-level
10 - error: no-name-in-module
12 - error: relative-beyond-top-level
2 - warning: unused-import
|
1 # -*- coding: utf-8 -*-
2 import scrapy
3
4 from properties.items import PropertiesItem
5 class BasicSpider(scrapy.Spider):
6 name = 'basic'
7 allowed_domains = ['web']
8 start_urls = (
9 # 'http://web:9312/properties/property_000000.html',
10 # 'https://www.coreapi.org/#examples',
11 # 'https://www.freecodecamp.org/news/git-ssh-how-to',
12 'https://djangopackages.org',
13 )
14 # start_urls = ['https://django-dynamic-scraper.readthedocs.io/en/latest/getting_started.html',]
15
16 def parse(self, response):
17 l.add_xpath('title', '//*[@itemprop="name"][1]/text()',
18 MapCompose(unicode.strip, unicode.title))
19 l.add_xpath('price', './/*[@itemprop="price"][1]/text()',
20 MapCompose(lambda i: i.replace(',', ''), float),
21 re='[,.0-9]+')
22 l.add_xpath('description', '//*[@itemprop="description"]'
23 '[1]/text()', MapCompose(unicode.strip), Join())
24 l.add_xpath('address',
25 '//*[@itemtype="http://schema.org/Place"][1]/text()',
26 MapCompose(unicode.strip))
27 l.add_xpath('image_urls', '//*[@itemprop="image"][1]/@src',
28 MapCompose(
29 lambda i: urlparse.urljoin(response.url, i)))
30
31 # l.add_xpath('title', '//*[@itemprop="name"][1]/text()')
32 # l.add_xpath('price', './/*[@itemprop="price"]'
33 # '[1]/text()', re='[,.0-9]+')
34 # l.add_xpath('description', '//*[@itemprop="description"]'
35 # '[1]/text()')
36 # l.add_xpath('address', '//*[@itemtype='
37 # '"http://schema.org/Place"][1]/text()')
38 # l.add_xpath('image_urls', '//*[@itemprop="image"][1]/@src')
39 return l.load_item()
40
41
42
43
44 # item = PropertiesItem()
45 # item['title'] = response.xpath(
46 # '//*[@id="myrotatingnav"]/div/div[1]').extract()
47 # # item['price'] = response.xpath(
48 # # '//*[@itemprop="price"][1]/text()').re('[.0-9]+')
49 # item['description'] = response.xpath(
50 # '//*[@id="myrotatingnav"]/div/div[1]/a[1]').extract()
51 # # item['address'] = response.xpath(
52 # # '//*[@itemtype="http://schema.org/'
53 # # 'Place"][1]/text()').extract()
54 # # item['image_urls'] = response.xpath(
55 # # '//*[@itemprop="image"][1]/@src').extract()
56 # return item
57
58
59
60
61 # self.log("title: %s" % response.xpath(
62 # '//*[@itemprop="name"][1]/text()').extract())
63 # self.log("price: %s" % response.xpath(
64 # '//*[@itemprop="price"][1]/text()').re('[.0-9]+'))
65 # self.log("description: %s" % response.xpath(
66 # '//*[@itemprop="description"][1]/text()').extract())
67 # self.log("address: %s" % response.xpath(
68 # '//*[@itemtype="http://schema.org/'
69 # 'Place"][1]/text()').extract())
70 # self.log("image_urls: %s" % response.xpath(
71 # '//*[@itemprop="image"][1]/@src').extract())
72
| 22 - error: syntax-error
|
1 # -*- coding: utf-8 -*-
2 # Generated by Django 1.11.29 on 2021-02-24 08:54
3 from __future__ import unicode_literals
4
5 from django.db import migrations, models
6 import open_news.models
7
8
9 class Migration(migrations.Migration):
10
11 dependencies = [
12 ('open_news', '0001_initial'),
13 ]
14
15 operations = [
16 migrations.CreateModel(
17 name='Document',
18 fields=[
19 ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
20 ('file', models.FileField(upload_to=open_news.models.upload_location)),
21 ],
22 ),
23 ]
| 9 - refactor: too-few-public-methods
|
1 # -*- coding: utf-8 -*-
2 #定义需要抓取存进数据库的字段
3 from scrapy.item import Item,Field
4 class TcZufangItem(Item):
5 #帖子名称
6 title=Field()
7 #租金
8 money=Field()
9 #租赁方式
10 method=Field()
11 #所在区域
12 area=Field()
13 #所在小区
14 community=Field()
15 #帖子详情url
16 targeturl=Field()
17 #帖子发布时间
18 pub_time=Field()
19 #所在城市
20 city=Field()
21 # 联系电话
22 phone= Field()
23 # 图片1
24 img1 = Field()
25 # 图片2
26 img2 = Field()
27
28
29
| 4 - refactor: too-few-public-methods
|
1 import requests
2 import json
3
4 url='https://www.scraping-bot.io/rawHtmlPage.html'
5 username = 'yourUsername'
6 apiKey = 'yourApiKey'
7
8 apiUrl = "http://api.scraping-bot.io/scrape/raw-html"
9
10 payload = json.dumps({"url":url})
11 headers = {
12 'Content-Type': "application/json"
13 }
14
15 response = requests.request("POST", apiUrl, data=payload, auth=(username,apiKey), headers=headers)
16
17 print(response.text)
18
19
20
21 import requests
22 import json
23
24 url='https://www.scraping-bot.io/rawHtmlPage.html'
25 username = 'yourUsername'
26 apiKey = 'yourApiKey'
27
28 apiEndPoint = "http://api.scraping-bot.io/scrape/raw-html"
29
30 options = {
31 "useChrome": False,#set to True if you want to use headless chrome for javascript rendering
32 "premiumProxy": False, # set to True if you want to use premium proxies Unblock Amazon,Google,Rakuten
33 "proxyCountry": None, # allows you to choose a country proxy (example: proxyCountry:"FR")
34 "waitForNetworkRequests":False # wait for most ajax requests to finish until returning the Html content (this option can only be used if useChrome is set to true),
35 # this can slowdown or fail your scraping if some requests are never ending only use if really needed to get some price loaded asynchronously for example
36 }
37
38 payload = json.dumps({"url":url,"options":options})
39 headers = {
40 'Content-Type': "application/json"
41 }
42
43 response = requests.request("POST", apiEndPoint, data=payload, auth=(username,apiKey), headers=headers)
44
45 print(response.text)
46
47 https://libraries.io/api/NPM/base62?api_key=306cf1684a42e4be5ec0a1c60362c2ef
48 import requests
49 import json
50
51 url='https://www.scraping-bot.io/example-ebay.html'
52 username = 'yourUsername'
53 apiKey = '306cf1684a42e4be5ec0a1c60362c2ef'
54
55 apiEndPoint = "http://api.scraping-bot.io/scrape/retail"
56
57 payload = json.dumps({"url":url,"options":options})
58 headers = {
59 'Content-Type': "application/json"
60 }
61
62 response = requests.request("POST", apiEndPoint, data=payload, auth=(username,apiKey), headers=headers)
63
64 print(response.text) | 47 - error: syntax-error
|
1 # -*- coding: utf-8 -*-
2 from django.shortcuts import render
3 from . models import ItemInfo
4 from django.core.paginator import Paginator
5 from mongoengine import connect
6 connect("zufang_fs",host='127.0.0.1')
7 # Create your views here.
8 def document(request):
9 limit=15
10 zufang_info=ItemInfo.objects
11 pageinator=Paginator(zufang_info,limit)
12 page=request.GET.get('page',1)
13 loaded = pageinator.page(page)
14 cities=zufang_info.distinct("city")
15 citycount=len(cities)
16 context={
17 'itemInfo':loaded,
18 'counts':zufang_info.count,
19 'cities':cities,
20 'citycount':citycount
21 }
22 return render(request,'document.html',context)
23 def binzhuantu():
24 ##饼状图
25 citys = []
26 zufang_info = ItemInfo.objects
27 sums = float(zufang_info.count())
28 cities = zufang_info.distinct("city")
29 for city in cities:
30 length = float(len(zufang_info(city=city)))
31 ocu = round(float(length / sums * 100))
32 item = [city.encode('raw_unicode_escape'), ocu]
33 citys.append(item)
34 return citys
35
36 def chart(request):
37 ##饼状图
38 citys=binzhuantu()
39 # #柱状图
40 # zufang_info = ItemInfo.objects
41 # res = zufang_info.all()
42 # cities = zufang_info.distinct("city")
43 # cc = []
44 # time = []
45 # counts = []
46 # for re in res:
47 # if re.pub_time != None:
48 # if re.pub_time > '2017-03-01':
49 # if re.pub_time < '2017-04-01':
50 # time.append(re.city)
51 # for city in cities:
52 # count = time.count(city)
53 # counts.append(count)
54 # item = city.encode('utf8')
55 # cc.append(item)
56 context ={
57 # 'count': counts,
58 # 'citys': cc,
59 'cities':citys,
60 }
61 return render(request,'chart.html',context)
62 def cloud(request):
63 zufang_info = ItemInfo.objects
64 res = zufang_info.distinct('community')
65 length=len(res)
66 context={
67 'count':length,
68 'wenzi':res
69 }
70 return render(request, 'test.html',context)
71
72 def test(request):
73 zufang_info = ItemInfo.objects
74 rr=[]
75 res = zufang_info.distinct('community')
76 i=0
77 while i<500:
78 item=res[i]
79 rr.append(item)
80 i=i+1
81 length = len(res)
82 context = {
83 'count': length,
84 'wenzi': rr
85 }
86 return render(request,'test.html',context) | 3 - error: relative-beyond-top-level
|
1 # -*- coding: utf-8 -*-
2 from scrapy_redis.spiders import RedisSpider
3 from scrapy.selector import Selector
4 class testSpider(RedisSpider):
5 name = 'testip'
6 redis_key = 'testip'
7 def parse(self,response):
8 response_selector = Selector(response)
9 code=response_selector.xpath(r'//div[contains(@class,"well")]/p[1]/code/text()')
10 print code
11
| 10 - error: syntax-error
|
1 from django.urls import path,include
2 from blog import views
3
4
5 urlpatterns = [
6 # path('', views.index, name='base'),
7 path('', views.list, name='list'),
8
9 # path('home/', views.home, name='home'),
10 # path('search/', views.Search, name='home_search'),
11
12 # path('', views.home, name='home'),
13 ]
| 1 - warning: unused-import
|
1
2 import os
3 from django.urls import reverse_lazy
4
5 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
6 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
7
8
9 SECRET_KEY = 'vsfygxju9)=k8qxmc9!__ng%dooyn-w7il_z+w)grvkz4ks!)u'
10
11 # SECURITY WARNING: don't run with debug turned on in production!
12 DEBUG = True
13
14 ALLOWED_HOSTS = []
15
16
17 # Application definition
18
19 INSTALLED_APPS = [
20 "django.contrib.humanize.apps.HumanizeConfig",
21 "django.contrib.auth.apps.AuthConfig",
22 "django.contrib.contenttypes.apps.ContentTypesConfig",
23 "django.contrib.sessions.apps.SessionsConfig",
24 "django.contrib.sites.apps.SitesConfig",
25 "django.contrib.messages.apps.MessagesConfig",
26 "django.contrib.staticfiles.apps.StaticFilesConfig",
27 "django.contrib.admin.apps.AdminConfig",
28 "django.contrib.admindocs.apps.AdminDocsConfig",
29 "sekizai",
30 "sorl.thumbnail",
31 "django_nyt.apps.DjangoNytConfig",
32 "wiki.apps.WikiConfig",
33 "wiki.plugins.macros.apps.MacrosConfig",
34 "wiki.plugins.help.apps.HelpConfig",
35 "wiki.plugins.links.apps.LinksConfig",
36 "wiki.plugins.images.apps.ImagesConfig",
37 "wiki.plugins.attachments.apps.AttachmentsConfig",
38 "wiki.plugins.notifications.apps.NotificationsConfig",
39 "wiki.plugins.editsection.apps.EditSectionConfig",
40 "wiki.plugins.globalhistory.apps.GlobalHistoryConfig",
41 "mptt",
42 ]
43
44
45 MIDDLEWARE = [
46 "django.contrib.sessions.middleware.SessionMiddleware",
47 "django.middleware.common.CommonMiddleware",
48 "django.middleware.csrf.CsrfViewMiddleware",
49 "django.contrib.auth.middleware.AuthenticationMiddleware",
50 "django.contrib.messages.middleware.MessageMiddleware",
51 "django.middleware.clickjacking.XFrameOptionsMiddleware",
52 "django.middleware.security.SecurityMiddleware",
53 ]
54 SITE_ID=1
55
56 ROOT_URLCONF = 'wikidj.urls'
57
58 TEMPLATES = [
59 {
60 "BACKEND": "django.template.backends.django.DjangoTemplates",
61 "DIRS": [
62 os.path.join(BASE_DIR, "templates"),
63 ],
64 "APP_DIRS": True,
65 "OPTIONS": {
66 "context_processors": [
67 "django.contrib.auth.context_processors.auth",
68 "django.template.context_processors.debug",
69 "django.template.context_processors.i18n",
70 "django.template.context_processors.request",
71 "django.template.context_processors.tz",
72 "django.contrib.messages.context_processors.messages",
73 "sekizai.context_processors.sekizai",
74 ],
75 "debug": DEBUG,
76 },
77 },
78 ]
79 WSGI_APPLICATION = 'wikidj.wsgi.application'
80
81
82 # Database
83 # https://docs.djangoproject.com/en/2.2/ref/settings/#databases
84
85 DATABASES = {
86 'default': {
87 'ENGINE': 'django.db.backends.sqlite3',
88 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
89 }
90 }
91
92
93 # Password validation
94 # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
95
96 AUTH_PASSWORD_VALIDATORS = [
97 {
98 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
99 },
100 {
101 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
102 },
103 {
104 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
105 },
106 {
107 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
108 },
109 ]
110
111
112 # Internationalization
113 # https://docs.djangoproject.com/en/2.2/topics/i18n/
114
115 LANGUAGE_CODE = 'en-us'
116
117 TIME_ZONE = 'UTC'
118
119 USE_I18N = True
120
121 USE_L10N = True
122
123 USE_TZ = True
124
125
126 # Static files (CSS, JavaScript, Images)
127 # https://docs.djangoproject.com/en/2.2/howto/static-files/
128
129 STATIC_URL = "/static/"
130 STATIC_ROOT = os.path.join(BASE_DIR, "static")
131 MEDIA_ROOT = os.path.join(BASE_DIR, "media")
132 MEDIA_URL = "/media/"
133
134
135 WIKI_ANONYMOUS_WRITE = True
136 WIKI_ANONYMOUS_CREATE = False
137 LOGIN_REDIRECT_URL = reverse_lazy('wiki:get', kwargs={'path': ''})
138
139
140
141 # urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | Clean Code: No Issues Detected
|
1
2 from django import template
3 from django.db.models import Q
4 from django.conf import settings
5 from django.template.defaultfilters import stringfilter
6 from django.utils.safestring import mark_safe
7 import random
8 from django.urls import reverse
9 # from blog.models import Article, Category, Tag, Links, SideBar, LinkShowType
10 from django.utils.encoding import force_text
11 from django.shortcuts import get_object_or_404
12 import hashlib
13 import urllib
14 # from comments.models import Comment
15 from DjangoBlog.utils import cache_decorator, cache
16 from django.contrib.auth import get_user_model
17 from oauth.models import OAuthUser
18 from DjangoBlog.utils import get_current_site
19 import logging
20
21
22
23 logger = logging.getLogger(__name__)
24
25 register = template.Library()
26
27
28 @register.simple_tag
29 def timeformat(data):
30 try:
31 return data.strftime(settings.TIME_FORMAT)
32 # print(data.strftime(settings.TIME_FORMAT))
33 # return "ddd"
34 except Exception as e:
35 logger.error(e)
36 return ""
37
38
39 @register.simple_tag
40 def datetimeformat(data):
41 try:
42 return data.strftime(settings.DATE_TIME_FORMAT)
43 except Exception as e:
44 logger.error(e)
45 return ""
46
47
48
49 @register.filter(is_safe=True)
50 @stringfilter
51 def custom_markdown(content):
52 from DjangoBlog.utils import CommonMarkdown
53 return mark_safe(CommonMarkdown.get_markdown(content))
| 34 - warning: broad-exception-caught
43 - warning: broad-exception-caught
3 - warning: unused-import
7 - warning: unused-import
8 - warning: unused-import
10 - warning: unused-import
11 - warning: unused-import
12 - warning: unused-import
13 - warning: unused-import
15 - warning: unused-import
15 - warning: unused-import
16 - warning: unused-import
17 - warning: unused-import
18 - warning: unused-import
|
1 # from core.models import Item
2 from django.shortcuts import render
3 # from django.views.generic import ListView,DetailView
4 from django.shortcuts import render, get_object_or_404
5 from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
6 from .models import Post
7 from django.views.generic import (
8 ListView,
9 DetailView,
10 # CreateView,
11 # UpdateView,
12 # DeleteView
13 )
14 from django.core.mail import send_mail
15 from .forms import EmailPostForm
16
17 from core.models import Comment
18 from .forms import EmailPostForm, CommentForm , SearchForm
19 from taggit.models import Tag
20 from django.db.models import Count
21 from django.contrib.postgres.search import SearchVector #Building a search view veter
22
23
24
25 def post_search(request):
26 form= SearchForm()
27 query=None
28 results=[]
29 if 'query' in request.GET:
30 form=SearchForm(request.GET)
31 if form.is_valid():
32 query=form.cleaned_data['query']
33 results=Post.published.annotate(
34 search =SearchVector('title','body'),
35 ).filter(search=query)
36 return render(request,'search.html',{
37 'form':form,
38 'query':query,
39 'results':results
40 })
41
42
43
44
45
46
47
48
49
50 def post_share(request, post_id):
51 # Retrieve post by id
52 post = get_object_or_404(Post, id=post_id, status='published')
53 sent = False
54 if request.method == 'POST':
55
56 # Form was submitted
57 form = EmailPostForm(request.POST)
58 if form.is_valid():
59 # Form fields passed validation
60 cd = form.cleaned_data
61 # ... send email
62 post_url = request.build_absolute_uri(
63 post.get_absolute_url())
64 subject = f"{cd['name']} recommends you read "f"{post.title}"
65 message = f"Read {post.title} at {post_url}\n\n" f"{cd['name']}\'s comments: {cd['comments']}"
66 send_mail(subject, message, 'rp9545416@gmail.com',[cd['to']])
67 sent = True
68
69 else:
70 form=EmailPostForm()
71 return render(request, 'share.html', {'post': post,
72 'form': form,
73 'sent': sent})
74
75
76 class PostDetailView(DetailView):
77 model = Post
78 class PostListView(ListView):
79 queryset=Post.published.all()
80 context_object_name='posts'
81 paginate_by=2
82 template_name='list.html'
83
84
85 def post_list(request , tag_slug=None):
86 object_list=Post.published.all()
87 tag=None
88
89 if tag_slug:
90 tag=get_object_or_404(Tag,slug=tag_slug)
91 object_list=object_list.filter(tags__in=[tag])
92 paginator=Paginator(object_list, 2) # 3 posts in each page
93 page=request.GET.get('page')
94 try:
95 posts=paginator.page(page)
96 except PageNotAnInteger:
97 # If page is not an integer deliver the first page
98 posts=paginator.page(1)
99 except EmptyPage:
100 # If page is out of range deliver last page of results
101 posts=paginator.page(paginator.num_pages)
102
103 return render(request,
104 'list.html',
105 {'posts': posts,
106 'page': page,
107 'tag': tag})
108
109
110 def post_detail(request, year, month, day, post):
111 post=get_object_or_404(Post, slug = post,
112 status = 'published',
113 publish__year = year,
114 publish__month = month,
115 publish__day = day)
116
117 comments=post.comments.filter(active=True)
118 new_comment=None
119
120 # List of similar posts
121 post_tags_ids = post.tags.values_list('id', flat=True)
122 similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(id=post.id)
123 similar_posts=similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags','-publish')[:4]
124
125 if request.method== 'POST':
126 #comment aas passed
127 comment_form=CommentForm(data=request.POST)
128 if comment_form.is_valid():
129 #new coment object
130 new_comment=comment_form.save(comment=False)
131
132 new_comment.post
133 new_comment.save()
134 else:
135 comment_form=CommentForm()
136
137
138 return render(request,
139 'blog/post_detail.html',
140 {'post': post,
141 'comments': comments,
142 'new_comment': new_comment,
143 'comment_form': comment_form,
144 'similar_posts': similar_posts})
145
146
147 def home(request):
148
149 return render(request, 'base.html')
150
151
152 def about(request):
153 return render(request, 'about.html')
154
155 # def product(request):
156 # return render (request ,'product.html' )
157
158 # class ItemdDetailView(DetailView):
159 # model=Item
160 # template_name="product.html"
161
162
163 # def checkout(request):
164 # return render (request ,'checkout.html')
| 4 - warning: reimported
6 - error: relative-beyond-top-level
15 - error: relative-beyond-top-level
18 - error: relative-beyond-top-level
18 - warning: reimported
64 - error: possibly-used-before-assignment
50 - refactor: inconsistent-return-statements
76 - refactor: too-few-public-methods
78 - refactor: too-few-public-methods
132 - warning: pointless-statement
17 - warning: unused-import
|
1 from django.db import models
2 from tinymce.models import HTMLField
3 from django.utils import timezone
4 from django.contrib.auth.models import User
5 from django.urls import reverse
6
7
8 class Post(models.Model):
9 title = models.CharField(max_length=100)
10 content = models.TextField()
11 description =HTMLField()
12
13 date_posted = models.DateTimeField(default=timezone.now)
14 author = models.ForeignKey(User, on_delete=models.CASCADE)
15
16 def __str__(self):
17 return self.title
18
19 def get_absolute_url(self):
20 return reverse('post-detail', kwargs={'pk': self.pk})
21
22
23
24
25
26
27
28
29 class feeds(models.Model):
30 title = models.CharField(max_length=100)
31 overview = models.TextField(max_length=20)
32 timestamp = models.DateTimeField(auto_now_add=True)
33 description =HTMLField()
34
35 thumbnail = models.ImageField()
36 featured = models.BooleanField()
37 # content = HTMLField()
38
39
40
41 def __str__(self):
42 return self.title
43
44 class Products(models.Model):
45 title =models.CharField(max_length=100)
46 description =models.TextField(blank=True)
47 price =models.DecimalField(decimal_places=2,max_digits=1000)
48 summary =models.TextField(blank=False, null=False)
49 # featured =models.BooleanField()
50
51
52
53
54 class MyModel(models.Model):
55 ...
56 content = HTMLField() | 45 - warning: bad-indentation
46 - warning: bad-indentation
47 - warning: bad-indentation
48 - warning: bad-indentation
29 - refactor: too-few-public-methods
44 - refactor: too-few-public-methods
55 - warning: unnecessary-ellipsis
54 - refactor: too-few-public-methods
|
1 from django.http import response
2 from django.shortcuts import render
3 from .forms import DocumentForm
4 import requests
5
6 from django.shortcuts import render
7 from django.conf import settings
8 from django.core.files.storage import FileSystemStorage
9
10 def simple_upload(request):
11 if request.method == 'POST':
12 myfile = DocumentForm(request.POST, request.FILES)
13
14
15 myfile = request.FILES['file']
16
17 fs = FileSystemStorage()
18
19 filename = fs.save(myfile.name, myfile)
20 uploaded_file_url = fs.url(filename)
21
22 return render(request, 'imple_upload.html', {
23 'uploaded_file_url': uploaded_file_url
24 })
25 return render(request, 'simple_upload.html')
26
27 def model_form_upload(request):
28 if request.method == 'POST':
29 form = DocumentForm(request.POST, request.FILES)
30 if form.is_valid():
31 form.save()
32 return redirect('home')
33 else:
34 form = DocumentForm()
35 return render(request, 'core/model_form_upload.html', {
36 'form': form
37 })
38
39
40 def api(request):
41
42 api_key ='306cf1684a42e4be5ec0a1c60362c2ef'
43 name='npm'
44
45 api_url="https://libraries.io/api/search?q={}&api_key={}".format(name ,api_key)
46 response=requests.get(api_url)
47 response_dict = response.json()
48
49 return render(request, 'api.html',{'api': response_dict, }
50
51
52 )
53
54
55
56
57
58
59
60
61
62
63 # return render(request,'search.html',{
64 # 'url':url,
65 # # 'query':query,
66 # # 'results':results
67 # })
68
69
| 42 - warning: bad-indentation
43 - warning: bad-indentation
45 - warning: bad-indentation
46 - warning: bad-indentation
47 - warning: bad-indentation
49 - warning: bad-indentation
3 - error: relative-beyond-top-level
6 - warning: reimported
32 - error: undefined-variable
46 - warning: redefined-outer-name
46 - warning: missing-timeout
1 - warning: unused-import
7 - warning: unused-import
|
1 # -*- coding: utf-8 -*-
2 import smtplib
3 from email.mime.text import MIMEText
4 from email.header import Header
5 def sendMessage_warning():
6 server = smtplib.SMTP('smtp.163.com', 25)
7 server.login('seven_2016@163.com', 'ssy102009')
8 msg = MIMEText('爬虫slave被封警告!请求解封!', 'plain', 'utf-8')
9 msg['From'] = 'seven_2016@163.com <seven_2016@163.com>'
10 msg['Subject'] = Header(u'爬虫被封禁警告!', 'utf8').encode()
11 msg['To'] = u'seven <751401459@qq.com>'
12 server.sendmail('seven_2016@163.com', ['751401459@qq.com'], msg.as_string())
| 10 - warning: redundant-u-string-prefix
11 - warning: redundant-u-string-prefix
|
1 #Stage 2 Update (Python 3)
2 from __future__ import unicode_literals
3 from django.utils.encoding import python_2_unicode_compatible
4 from django.db import models
5 from django.db.models.signals import pre_delete
6 from django.dispatch import receiver
7 from scrapy_djangoitem import DjangoItem
8 from dynamic_scraper.models import Scraper, SchedulerRuntime
9
10
11 @python_2_unicode_compatible
12 class NewsWebsite(models.Model):
13 name = models.CharField(max_length=200)
14 url = models.URLField()
15 scraper = models.ForeignKey(Scraper, blank=True, null=True, on_delete=models.SET_NULL)
16 scraper_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
17
18 def __str__(self):
19 return self.name
20
21
22 @python_2_unicode_compatible
23 class Article(models.Model):
24 title = models.CharField(max_length=200)
25 news_website = models.ForeignKey(NewsWebsite)
26 description = models.TextField(blank=True)
27 url = models.URLField(blank=True)
28 thumbnail = models.CharField(max_length=200, blank=True)
29 checker_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
30
31 def __str__(self):
32 return self.title
33
34
35 class ArticleItem(DjangoItem):
36 django_model = Article
37
38
39 @receiver(pre_delete)
40 def pre_delete_handler(sender, instance, using, **kwargs):
41 if isinstance(instance, NewsWebsite):
42 if instance.scraper_runtime:
43 instance.scraper_runtime.delete()
44
45 if isinstance(instance, Article):
46 if instance.checker_runtime:
47 instance.checker_runtime.delete()
48
49 pre_delete.connect(pre_delete_handler)
50
51
52 def upload_location(instance, filename):
53 return '%s/documents/%s' % (instance.user.username, filename)
54
55 class Document(models.Model):
56 # user = models.ForeignKey(settings.AUTH_USER_MODEL)
57 # category = models.ForeignKey(Category, on_delete=models.CASCADE)
58 file = models.FileField(upload_to=upload_location)
59
60 def __str__(self):
61 return self.filename()
62
63 def filename(self):
64 return os.path.basename(self.file.name) | 12 - refactor: too-few-public-methods
23 - refactor: too-few-public-methods
35 - refactor: too-few-public-methods
40 - warning: unused-argument
40 - warning: unused-argument
40 - warning: unused-argument
64 - error: undefined-variable
|
1 from django.contrib.sitemaps import Sitemap
2 from . models import Post
3
4
5
6 class PostSitemap(Sitemap):
7 changefreq='weekly' # You create a custom sitemap by inheriting the Sitemap class of the sitemaps
8 priority = 0.9 # module. The changefreq and priority attributes indicate the change frequency
9 # of your post pages and their relevance in your website (the maximum value is 1 ).
10
11
12 def items(self):
13 return Post.published.all()
14
15
16 def lastmod(self,obj):
17 return obj.updated
18
| 2 - error: relative-beyond-top-level
|
1 # Define here the models for your scraped items
2 #
3 # See documentation in:
4 # https://docs.scrapy.org/en/latest/topics/items.html
5
6 import scrapy
7 from scrapy.item import Item,Field
8
9
10 class PropertiesItem():
11
12 title=Field()
13 price=Field()
14 description=Field()
15 address = Field()
16 image_urls = Field()
17
18 #imagescalculaitons
19 images = Field()
20 locations = Field()
21 #housekeeping
22 url=Field()
23 project = Field()
24 spider=Field()
25 server = Field()
26 date=Field()
27
| 10 - refactor: too-few-public-methods
6 - warning: unused-import
7 - warning: unused-import
|
1 # -*- coding: utf-8 -*-
2
3
4 import redis
5 redis_cli = redis.StrictRedis()
6 redis_cli.incr("pm_count")
7
| Clean Code: No Issues Detected
|
1
2 from .settings import *
3 from .dev import *
4 # Test codehilite with pygments
5
6 WIKI_MARKDOWN_KWARGS = {
7 "extensions": [
8 "codehilite",
9 "footnotes",
10 "attr_list",
11 "headerid",
12 "extra",
13 ]
14 } | 2 - error: relative-beyond-top-level
2 - warning: wildcard-import
3 - error: relative-beyond-top-level
3 - warning: wildcard-import
|
1 # -*- coding: utf-8 -*-
2 """
3 -------------------------------------------------
4 File Name: util
5 Description :
6 Author : JHao
7 date: 2020/9/30
8 -------------------------------------------------
9 Change Activity:
10 2020/9/30:
11 -------------------------------------------------
12 """
13 __author__ = 'JHao'
14
15 from math import ceil
16
17
18 class PageInfo(object):
19
20 def __init__(self, page, total, limit=8):
21 """
22
23 :param page: 页数
24 :param total: 总条数
25 :param limit: 每页条数
26 """
27 self._limit = limit
28 self._total = total
29 self._page = page
30 self._index_start = (int(page) - 1) * int(limit)
31 self._index_end = int(page) * int(limit)
32
33 @property
34 def index_start(self):
35 return self._index_start
36
37 @property
38 def index_end(self):
39 return self._index_end
40
41 @property
42 def current_page(self):
43 return self._page
44
45 @property
46 def total_page(self):
47 return ceil(self._total / self._limit)
48
49 @property
50 def total_number(self):
51 return self._total
| 18 - refactor: useless-object-inheritance
|
1 import scrapy
2
3 def authentication_failed(response):
4 pass
5
6
7
8 class LoginSpider(scrapy.Spider):
9 name='ex'
10 start_urls=['https://www.facebook.com/login.php']
11
12 def parse(self,response):
13 return scrapy.FormRequest.from_response(
14 response,formdata={'username':'john','password':'secret'},
15 callback=self.after_login
16 )
17
18 def after_login(self,response):
19 if authentication_failed(response):
20 self.logger.error('Login Failed')
21 return
22
23
24
25
26
27 page = response.url.split("/")[-2]
28 filename = f'quotes-{page}.html'
29 with open(filename, 'wb') as f:
30 f.write(response.body) | 3 - warning: unused-argument
|
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 from django.db import migrations, models
5
6
7 class Migration(migrations.Migration):
8
9 dependencies = [
10 ('scrapyproject', '0008_scrapersdeploy'),
11 ]
12
13 operations = [
14 migrations.AddField(
15 model_name='linkgendeploy',
16 name='version',
17 field=models.IntegerField(default=0),
18 ),
19 migrations.AddField(
20 model_name='scrapersdeploy',
21 name='version',
22 field=models.IntegerField(default=0),
23 ),
24 ]
| 7 - refactor: too-few-public-methods
|
1 from . settings import *
2 DEBUG = True
3
4
5 for template_engine in TEMPLATES:
6 template_engine["OPTIONS"]["debug"] = True
7
8
9 EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
10
11
12 try:
13 import debug_toolbar # @UnusedImport
14
15 MIDDLEWARE = list(MIDDLEWARE) + [
16 "debug_toolbar.middleware.DebugToolbarMiddleware",
17 ]
18 INSTALLED_APPS = list(INSTALLED_APPS) + ["debug_toolbar"]
19 INTERNAL_IPS = ("127.0.0.1",)
20 DEBUG_TOOLBAR_CONFIG = {"INTERCEPT_REDIRECTS": False}
21 except ImportError:
22 pass | 1 - error: relative-beyond-top-level
1 - warning: wildcard-import
5 - error: undefined-variable
15 - error: used-before-assignment
18 - error: used-before-assignment
13 - warning: unused-import
|
1 # Define here the models for your scraped items
2 #
3 # See documentation in:
4 # https://docs.scrapy.org/en/latest/topics/items.html
5
6 import scrapy
7
8 from scrapy import Item, Field
9 # define the fields for your item here like:
10 #
11 class SainsburysItem(scrapy.Item):
12 name = scrapy.Field()
13
14
15
16
17 class SainsburysItem(Item):
18 url = Field()
19 product_name = Field()
20 product_image = Field()
21 price_per_unit = Field()
22 unit = Field()
23 rating = Field()
24 product_reviews = Field()
25 item_code = Field()
26 nutritions = Field()
27 product_origin = Field()
28
29
30 class FlatSainsburysItem(Item):
31 url = Field()
32 product_name = Field()
33 product_image = Field()
34 price_per_unit = Field()
35 unit = Field()
36 rating = Field()
37 product_reviews = Field()
38 item_code = Field()
39 product_origin = Field()
40 energy = Field()
41 energy_kj = Field()
42 kcal = Field()
43 fibre_g = Field()
44 carbohydrates_g = Field()
45 of_which_sugars = Field()
| 11 - refactor: too-few-public-methods
17 - error: function-redefined
17 - refactor: too-few-public-methods
30 - refactor: too-few-public-methods
|
1
2 from turtle import Turtle
3
4 STARTING_POSITIONS = [(0, 0), (-20, 0), (-40, 0)]
5 MOVE_DISTANCE = 20
6 UP = 90
7 DOWN = 270
8 RIGHT = 0
9 LEFT = 180
10
11
12 class Snake:
13 # The code here is going to determine what should happen when we initialize a new snake object
14 def __init__(self):
15 # below we create a new attribute for our class
16 self.segments = []
17 # We create a snake:
18 self.create_snake()
19 self.head = self.segments[0]
20
21
22 # CREATING SNAKE (2 functions)
23 def create_snake(self):
24 for position in STARTING_POSITIONS:
25 # we are calling the function and passing there the position that we are looping through
26 self.add_segment(position)
27
28 def add_segment(self, position):
29 new_segment = Turtle("square")
30 new_segment.color("white")
31 new_segment.penup()
32 new_segment.goto(position)
33 self.segments.append(new_segment)
34
35 # Creating a snake extend function
36 def extend(self):
37 # we are using the list of segments and counting from the end of list to get the last one segment of the snake
38 # after we are going to hold segment's position using a method of Turtle class
39 # then we add the new_segment to the same position as the last segment
40 self.add_segment(self.segments[-1].position())
41
42
43
44
45 # Creating another method for snake class
46 def move(self):
47 for seg_num in range(len(self.segments)-1, 0, -1):
48 new_x = self.segments[seg_num - 1].xcor()
49 new_y = self.segments[seg_num - 1].ycor()
50 self.segments[seg_num].goto(new_x, new_y)
51
52 self.head.forward(MOVE_DISTANCE)
53
54 def up(self):
55 # if the current heading is pointed down it can't move up
56 # because the snake can't go backword
57 if self.head.heading() != DOWN:
58 self.head.setheading(UP)
59
60 def down(self):
61 if self.head.heading() != UP:
62 self.head.setheading(DOWN)
63
64 def left(self):
65 if self.head.heading() != RIGHT:
66 self.head.setheading(LEFT)
67
68 def right(self):
69 if self.head.heading() != LEFT:
70 self.head.setheading(RIGHT)
71
72
73
74
75
| Clean Code: No Issues Detected
|
1
2 from turtle import Screen
3 import time
4 from snake import Snake
5 from food import Food
6 from scoreboard import Score
7
8 # SETTING UP THE SCREEN:
9 screen = Screen()
10 screen.setup(width=600, height=600)
11 screen.bgcolor("black")
12 screen.title("My Snake Game")
13 # to turn off the screen tracer
14 screen.tracer(0)
15
16 # CREATING A SNAKE OBJECT:
17 snake = Snake()
18
19 # CREATING A FOOD OBJECT:
20 food = Food()
21
22 # CREATING A SCORE OBJECT:
23 score = Score()
24
25 # CREATING A KEY CONTROL:
26 screen.listen()
27 # these methods snake.up ,,, we have in a snake class (up = 90, down = 270, left = 180, right = 0)
28 screen.onkey(key="Up", fun=snake.up)
29 screen.onkey(key="Down", fun=snake.down)
30 screen.onkey(key="Left", fun=snake.left)
31 screen.onkey(key="Right", fun=snake.right)
32
33 game_is_on = True
34 while game_is_on:
35 # while the game is on the screen is going to be updated every 0.1 second
36 # It is saying delay for 0.1 sec and then update:
37 screen.update()
38 time.sleep(0.1)
39 # every time the screen refreshes we get the snake to move forwards by one step
40 snake.move()
41
42 # DETECT COLLISION WITH THE FOOD
43 # if the snake head is within 15 px of the food or closer they have collided
44 if snake.head.distance(food) < 15:
45 food.refresh()
46 snake.extend()
47 print("nom nom nom")
48 # when the snake collide with the food we increase the score:
49 score.increase_score()
50
51
52 # # DETECT COLLISION WITH THE TAIL METHOD 1:
53 # # we can loop through our list of segments in the snake
54 # for segment in snake.segments:
55 # # if head has distance from any segment in segments list less than 10 px - that a collision
56 # # if the head collides with any segment in the tail: trigger GAME OVER
57 # # the first segment is the head so we should exclude it from the list of segments
58 # if segment == snake.head:
59 # pass
60 # elif snake.head.distance(segment) < 10:
61 # game_is_on = False
62 # score.game_over()
63
64 # DETECT COLLISION WITH THE TAIL METHOD 2 SLICING:
65 # we can loop through our list of segments in the snake using slicing method of python
66 # we are taking all positions inside the list without the first head segment
67 for segment in snake.segments[1:]:
68 # if head has distance from any segment in segments list less than 10 px - that a collision
69 # if the head collides with any segment in the tail: trigger GAME OVER
70
71 if snake.head.distance(segment) < 10:
72 game_is_on = False
73 score.game_over()
74
75
76
77
78
79
80 # DETECT COLLISION WITH THE WALL
81 if snake.head.xcor() >280 or snake.head.xcor() < -280 or snake.head.ycor() > 280 or snake.head.ycor() < -280:
82 score.game_over()
83 game_is_on = False
84
85
86
87
88
89
90
91
92
93
94
95 screen.exitonclick() | Clean Code: No Issues Detected
|
1
2 from turtle import Turtle
3 ALIGMENT = "center"
4 FONT = ("Arial", 18, "normal")
5
6
7 class Score(Turtle):
8 def __init__(self):
9 super().__init__()
10 self.score = 0
11 self.color("white")
12 self.penup()
13 self.goto(0, 270)
14 self.write(f"Current score: {self.score}", align="center", font=("Arial", 18, "normal"))
15 self.hideturtle()
16 self.update_score()
17
18 def update_score(self):
19 self.write(f"Current score: {self.score}", align="center", font=("Arial", 18, "normal"))
20
21 def game_over(self):
22 self.goto(0, 0)
23 self.write("GAME OVER", align=ALIGMENT, font=FONT)
24
25 def increase_score(self):
26 self.score += 1
27 # to clear the previous score before we update:
28 self.clear()
29 self.update_score()
30
31
| Clean Code: No Issues Detected
|
1
2 from turtle import Turtle
3 import random
4
5 # we want this Food class to inherit from the Turtle class, so it will have all the capapibilities from
6 # the turtle class, but also some specific things that we want
7
8
9 class Food(Turtle):
10 # creating initializer for this class
11 def __init__(self):
12 # we inherit things from the super class:
13 super().__init__()
14 # below we are using methods from Turtle class:
15 self.shape("circle")
16 self.penup()
17 # normal sise is 20x20, we want to stretch the length and the width for 0.5 so we have 10x10
18 self.shapesize(stretch_len=0.5, stretch_wid=0.5)
19 self.color("blue")
20 self.speed("fastest")
21 # call the method refresh so the food goes in random location
22 self.refresh()
23
24 def refresh(self):
25 # our screen is 600x600
26 # we want to place our food from -280 to 280 in coordinates:
27 random_x = random.randint(-280, 280)
28 random_y = random.randint(-280, 280)
29 # telling our food to go to random_y and random_x:
30 self.goto(random_x, random_y)
31
32 # All this methods will happen as soon as we create a new object
33 # This food object we initialize in main.py
34
| Clean Code: No Issues Detected
|
1 import numpy as np
2 import cv2
3 import imutils
4
5 picture = 'puzzle.jpg'
6
7 def load_transform_img(picture):
8 image = cv2.imread(picture)
9 image = imutils.resize(image, height=800)
10 org = image.copy()
11 #cv2.imshow('orginal', image)
12
13 mask = np.zeros(image.shape[:2], dtype = "uint8")
14 cv2.rectangle(mask, (15, 150), (440, 700), 255, -1)
15 #cv2.imshow("Mask", mask)
16
17 image = cv2.bitwise_and(image, image, mask = mask)
18 #cv2.imshow("Applying the Mask", image)
19
20 image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
21 #cv2.imshow('image', image)
22 blurred = cv2.GaussianBlur(image, (5, 5), 0)
23 edged = cv2.Canny(blurred, 140, 230)
24 #cv2.imshow("Canny", edged)
25
26 (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
27
28 print(len(cnts))
29
30 cv2.fillPoly(edged, pts =cnts, color=(255,255,255))
31 #cv2.imshow('filled', edged)
32
33 fedged = cv2.Canny(edged, 140, 230)
34 #cv2.imshow("fedged", fedged)
35
36 (cnts, _) = cv2.findContours(fedged.copy(), cv2.RETR_EXTERNAL,
37 cv2.CHAIN_APPROX_SIMPLE)
38
39
40 boxes = fedged.copy()
41 #cv2.drawContours(boxes, cnts, 10, (100 , 200, 100), 2)
42 #cv2.imshow("Boxes", boxes)
43
44 image = cv2.bitwise_and(org, org, mask = edged)
45 #cv2.imshow("Applying the Mask2", image)
46
47 puzzlelist = []
48 for (i, c) in enumerate(cnts):
49 (x, y, w, h) = cv2.boundingRect(c)
50
51 print("Box #{}".format(i + 1))
52 box = org[y:y + h, x:x + w]
53 cv2.imwrite(f'temp/box{i+1}.jpg',box)
54 #cv2.imshow("Box", box)
55 gray = cv2.cvtColor(box, cv2.COLOR_BGR2GRAY)
56 #cv2.imshow("gray", gray)
57 mask = np.zeros(gray.shape[:2], dtype = "uint8")
58
59 y1,y2 = 35, 50
60 for i in range(4):
61 cv2.rectangle(mask, (15, y1), (37, y2), 255, -1)
62 y1,y2 = y1+40, y2+40
63
64 #cv2.imshow("Mask2 ", mask)
65 masked = cv2.bitwise_and(gray, gray, mask = mask)
66
67 y1,y2 = 35, 50
68 temp = []
69 for i in range(4):
70 value = masked[y1:y2,15:37]
71 #cv2.imshow(f'val{i}',value)
72 max_val = max(value.flatten())
73 if max_val >= 45:
74 temp.append(max_val)
75 y1,y2 = y1+40, y2+40
76 puzzlelist.append(temp[::-1])
77 #cv2.waitKey(0)
78 return puzzlelist[::-1] , len(cnts)
| 7 - refactor: too-many-locals
7 - warning: redefined-outer-name
40 - warning: unused-variable
|
1 from collections import deque
2 import random
3 import copy
4 import sys
5 import loading_pc
6 import os
7
8
9 def move(new_list, from_, to):
10
11 temp = new_list[from_].pop()
12 for _i in range(0,4):
13 if len(new_list[from_])>0 and abs(int(temp) - int(new_list[from_][-1]))<3 and len(new_list[to])<3:
14 temp = new_list[from_].pop()
15 new_list[to].append(temp)
16 new_list[to].append(temp)
17 return new_list
18
19 def possible_moves(table, boxes):
20 pos=[]
21 for i in range(0, boxes):
22 for j in range(0, boxes):
23 pos.append((i,j))
24
25 possible = []
26 for from_, to in pos:
27 if (len(table[from_])>=1 and len(table[to])<4 and to != from_
28 and (len(table[to]) == 0 or (abs(int(table[from_][-1]) - int(table[to][-1]))<3))
29 and not (len(table[from_])==4 and len(set(table[from_]))==1)
30 and not (len(set(table[from_]))==1 and len(table[to]) ==0)):
31 possible.append((from_,to))
32
33 return possible
34
35
36 def check_win(table):
37 temp = []
38 not_full =[]
39 for i in table:
40 temp.append(len(set(i)))
41 if len(i)<4:
42 not_full.append(i)
43 if len(not_full)>2:
44 return False
45 for i in temp:
46 if i>1:
47 return False
48 print(table)
49 return True
50
51
52 def game_loop(agent, picture):
53
54 table, boxes_position, boxes = loading_pc.load_transform_img(picture)
55 print(len(boxes_position))
56
57 answer = agent(table, boxes)
58 return answer, boxes_position
59
60 def random_agent(table, boxes):
61
62 k=5
63 l=0
64 while True:
65 print(l)
66 table_copy = copy.deepcopy(table)
67 if l%1000 == 0:
68 k+=1
69
70 correct_moves = []
71 for i in range(boxes*k):
72 pmove = possible_moves(table_copy, boxes)
73 if len(pmove) == 0:
74 win = check_win(table_copy)
75 if win:
76 return correct_moves
77 else:
78 break
79 x, y = random.choice(pmove)
80 table_copy = move(table_copy, x, y)
81 correct_moves.append((x,y))
82
83 l+=1
84
85
86 if __name__ == '__main__':
87 answer, boxes_position = game_loop(random_agent, 'level/screen.jpg')
88 print('answer', answer) | 27 - refactor: too-many-boolean-expressions
54 - warning: redefined-outer-name
57 - warning: redefined-outer-name
75 - refactor: no-else-return
71 - warning: unused-variable
1 - warning: unused-import
4 - warning: unused-import
6 - warning: unused-import
|
1 import numpy as np
2 import cv2
3 import imutils
4
5 picture = 'puzzle.jpg'
6
7 def load_transform_img(picture):
8 image = cv2.imread(picture)
9 #image = imutils.resize(image, height=800)
10 org = image.copy()
11 #cv2.imshow('orginal', image)
12
13 mask = np.zeros(image.shape[:2], dtype = "uint8")
14 cv2.rectangle(mask, (680, 260), (1160, 910), 255, -1)
15 #cv2.imshow("Mask", mask)
16
17 image = cv2.bitwise_and(image, image, mask = mask)
18 #cv2.imshow("Applying the Mask", image)
19
20 image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
21 #cv2.imshow('image', image)
22 blurred = cv2.GaussianBlur(image, (5, 5), 0)
23 edged = cv2.Canny(blurred, 140, 230)
24 #cv2.imshow("Canny", edged)
25
26 (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
27
28 #print(len(cnts))
29
30 cv2.fillPoly(edged, pts =cnts, color=(255,255,255))
31 #cv2.imshow('filled', edged)
32
33 fedged = cv2.Canny(edged, 140, 230)
34 #cv2.imshow("fedged", fedged)
35
36 (cnts, _) = cv2.findContours(fedged.copy(), cv2.RETR_EXTERNAL,
37 cv2.CHAIN_APPROX_SIMPLE)
38
39
40 # boxes = fedged.copy()
41 # cv2.drawContours(boxes, cnts, 10, (100 , 200, 100), 2)
42 # cv2.imshow("Boxes", boxes)
43
44 image = cv2.bitwise_and(org, org, mask = edged)
45 #cv2.imshow("Applying the Mask2", image)
46
47 puzzlelist = []
48 boxes_positon = []
49 for (i, c) in enumerate(cnts):
50 (x, y, w, h) = cv2.boundingRect(c)
51
52 #print("Box #{}".format(i + 1))
53 box = org[y:y + h, x:x + w]
54 boxes_positon.append( ( (x+x+w)/2, (y+y+h)/2 ) )
55 cv2.imwrite(f'temp/box{i+1}.jpg',box)
56 #cv2.imshow("Box", box)
57 gray = cv2.cvtColor(box, cv2.COLOR_BGR2GRAY)
58 #cv2.imshow("gray", gray)
59 mask = np.zeros(gray.shape[:2], dtype = "uint8")
60
61 y1,y2 = 45, 60
62 for i in range(4):
63 cv2.rectangle(mask, (15, y1), (37, y2), 255, -1)
64 y1,y2 = y1+45, y2+45
65
66 #cv2.imshow("Mask2 ", mask)
67 masked = cv2.bitwise_and(gray, gray, mask = mask)
68 #cv2.imshow('Masked', masked)
69
70 y1,y2 = 45, 60
71 temp = []
72 for i in range(4):
73 value = masked[y1:y2,15:37]
74 #cv2.imshow(f'val{i}',value)
75 max_val = max(value.flatten())
76 if max_val >= 45:
77 temp.append(max_val)
78 y1,y2 = y1+45, y2+45
79 puzzlelist.append(temp[::-1])
80 #cv2.waitKey(0)
81 print(f'Pozycja początkowa: {puzzlelist[::-1]}\n')
82 print(f'Pozycje boksow: {boxes_positon[::-1]}\n')
83 return puzzlelist[::-1], boxes_positon[::-1], len(cnts)
84
85
86 if __name__ == '__main__':
87 answer, boxes_positon[::-1], boxes = load_transform_img('level/screen.jpg')
88 print(answer)
| 7 - refactor: too-many-locals
7 - warning: redefined-outer-name
87 - error: undefined-variable
3 - warning: unused-import
|
1 import pyautogui as pya
2 import solver
3 import time
4 import glob
5 import os
6 import numpy as np
7 import cv2
8 import shutil
9
10
11 path = os.getcwd()
12 path1 = path + r'/temp'
13 path2 = path +r'/level'
14 try:
15 shutil.rmtree(path1)
16 except:
17 pass
18 try:
19 os.mkdir('temp')
20 except:
21 pass
22 try:
23 os.mkdir('level')
24 except:
25 pass
26
27 bluestacks = pya.locateCenterOnScreen('static/bluestacks.jpg', confidence=.9)
28 print(bluestacks)
29 pya.click(bluestacks)
30 time.sleep(3)
31 full = pya.locateCenterOnScreen('static/full.jpg', confidence=.8)
32 pya.click(full)
33 time.sleep(15)
34 mojeGry = pya.locateCenterOnScreen('static/mojegry.jpg', confidence=.8)
35 print(mojeGry)
36 if mojeGry:
37 pya.click(mojeGry)
38 time.sleep(2)
39 game = pya.locateCenterOnScreen('static/watersort.jpg', confidence=.5)
40 print(game)
41 if game:
42 pya.click(game)
43 time.sleep(6)
44
45 record = pya.locateCenterOnScreen('static/record.jpg', confidence=.8)
46
47 for m in range(4):
48 pya.click(record)
49 time.sleep(4.5)
50 for k in range(10):
51
52 screenshoot = pya.screenshot()
53 screenshoot = cv2.cvtColor(np.array(screenshoot), cv2.COLOR_RGB2BGR)
54 cv2.imwrite("level/screen.jpg", screenshoot)
55
56 moves, boxes_position = solver.game_loop("level/screen.jpg")
57 print(f'Steps to solve level: {len(moves)}')
58 print(moves)
59 for i,j in moves:
60 pya.click(boxes_position[i])
61 time.sleep(0.3)
62 pya.click(boxes_position[j])
63 pya.sleep(2.5)
64
65 next_level = pya.locateCenterOnScreen('static/next.jpg', confidence=.7)
66 pya.click(next_level)
67 time.sleep(3)
68 x_location = pya.locateCenterOnScreen('static/x.jpg', confidence=.7)
69 if x_location:
70 pya.click(x_location)
71 time.sleep(2)
72 x_location = pya.locateCenterOnScreen('static/x.jpg', confidence=.7)
73 if x_location:
74 pya.click(x_location)
75 time.sleep(2)
76 pya.click(record)
77 time.sleep(2)
78 | 16 - warning: bare-except
20 - warning: bare-except
24 - warning: bare-except
4 - warning: unused-import
|
1 # from __future__ import unicode_literals
2
3 nlp = spacy.load(lang)
| 3 - error: undefined-variable
3 - error: undefined-variable
|
1 """This converts a cardbundle.pdf (downloaded from Privateer Press) into
2 Tabletop Simulator deck Saved Objects."""
3
4 import os
5 import argparse
6 import json
7 import threading
8 from shutil import copyfile
9 import PIL.ImageOps
10 from PIL import Image
11 import cloudinary.uploader
12 import cloudinary.api
13 from pdf2image import convert_from_path
14
15 def parse_images(fronts, backs, raw_page):
16 """Chop a page from the PP PDF into its constituent card images."""
17 # 400 DPI
18 # fronts.append(raw_page.crop((188, 303, 1185, 1703)))
19 # fronts.append(raw_page.crop((1193, 303, 2190, 1703)))
20 # fronts.append(raw_page.crop((2199, 303, 3196, 1703)))
21 # fronts.append(raw_page.crop((3205, 303, 4201, 1703)))
22 # backs.append(raw_page.crop((188, 1709, 1185, 3106)))
23 # backs.append(raw_page.crop((1193, 1709, 2190, 3106)))
24 # backs.append(raw_page.crop((2199, 1709, 3196, 3106)))
25 # backs.append(raw_page.crop((3205, 1709, 4201, 3106)))
26 # 200 DPI
27 fronts.append(raw_page.crop((94, 151, 592, 852)))
28 fronts.append(raw_page.crop((597, 151, 1095, 852)))
29 fronts.append(raw_page.crop((1099, 151, 1598, 852)))
30 fronts.append(raw_page.crop((1602, 151, 2101, 852)))
31 backs.append(raw_page.crop((94, 855, 592, 1553)))
32 backs.append(raw_page.crop((597, 855, 1095, 1553)))
33 backs.append(raw_page.crop((1099, 855, 1598, 1553)))
34 backs.append(raw_page.crop((1602, 855, 2101, 1553)))
35 # 150 DPI
36 # fronts.append(page.crop((70,114,444,639)))
37 # fronts.append(page.crop((447,114,821,639)))
38 # fronts.append(page.crop((824,114,1198,639)))
39 # fronts.append(page.crop((1202,114,1576,639)))
40 # backs.append(page.crop((70,641,444,1165)))
41 # backs.append(page.crop((447,641,821,1165)))
42 # backs.append(page.crop((824,641,1198,1165)))
43 # backs.append(page.crop((1202,641,1576,1165)))
44
45 def load_config():
46 """Load your config"""
47 with open('config.json') as json_file:
48 data = json.load(json_file)
49 cloudinary.config(
50 cloud_name=data["cloud_name"],
51 api_key=data["api_key"],
52 api_secret=data["api_secret"]
53 )
54 return data["width"], data["height"], data["saved_objects_folder"]
55
56 def image_upload(name, links):
57 """Upload a compiled TTS-compatible deck template image into Cloudinary."""
58
59 res = cloudinary.uploader.upload(name)
60
61 links[name] = res["url"]
62 os.remove(name)
63 print(links[name])
64
65
66 def package_pages(cards_width, cards_height, fronts, backs, page_count, links):
67 """Stitch together card images into a TTS-compatible deck template image"""
68 pixel_width = 4096//cards_width
69 pixel_height = 4096//cards_height
70 for i in range(page_count):
71 fronts_image = Image.new("RGB", (4096, 4096))
72 backs_image = Image.new("RGB", (4096, 4096))
73
74 for j in range(cards_width * cards_height):
75 if len(fronts) <= i * cards_width * cards_height + j:
76 continue
77 front = fronts[i * cards_width * cards_height + j].resize(
78 (pixel_width, pixel_height), Image.BICUBIC)
79 back = backs[i * cards_width * cards_height + j].resize(
80 (pixel_width, pixel_height), Image.BICUBIC).rotate(180)
81 fronts_image.paste(front, (j % cards_width * pixel_width,
82 (j // cards_width) * pixel_height))
83 backs_image.paste(back, (j % cards_width * pixel_width,
84 (j // cards_width) * pixel_height))
85
86 fronts_image.save(f"f-{i}.jpg")
87 backs_image.save(f"b-{i}.jpg")
88 t_1 = threading.Thread(
89 target=image_upload, args=(f"f-{i}.jpg", links)
90 )
91 t_1.start()
92 t_2 = threading.Thread(
93 target=image_upload, args=(f"b-{i}.jpg", links)
94 )
95 t_2.start()
96 t_1.join()
97 t_2.join()
98
99 def write_deck(deck_json, args, saved_objects_folder, links, num):
100 """Craft the JSON for your final TTS deck Saved Object"""
101 name = args.name + str(num)
102 deck_json = deck_json.replace("DeckName", name)
103 deck_json = deck_json.replace("FrontImageURL", links[f"f-{num}.jpg"])
104 deck_json = deck_json.replace("BackImageURL", links[f"b-{num}.jpg"])
105 deck_json = deck_json.replace("ReplaceGUID", f"{name}C")
106 deck_json = deck_json.replace("ReplaceGUID2", f"{name}D")
107 with open(saved_objects_folder + name + ".json", "w") as deck:
108 deck.write(deck_json)
109 copyfile("warmahordes.png", saved_objects_folder + name + ".png")
110
111 def parse_arguments():
112 """Command line arg parse"""
113 parser = argparse.ArgumentParser(
114 description="Convert Privateer Press card pdfs to Tabletop Simulator saved deck objects."
115 )
116 parser.add_argument(
117 "-name",
118 type=str,
119 help="your deck name - possibly the faction you are converting",
120 )
121 return parser.parse_args()
122
123 def convert():
124 """This converts a cardbundle.pdf (downloaded from Privateer Press) into
125 Tabletop Simulator deck Saved Objects."""
126 args = parse_arguments()
127 width, height, saved_objects_folder = load_config()
128 if args.name is None:
129 args.name = "Warmachine"
130 print("Naming decks: " + args.name + "X")
131
132 # Strip out the card images from the Privateer Press pdfs.
133 card_fronts = []
134 card_backs = []
135 infile = "cardbundle.pdf"
136 pages = convert_from_path(infile, 200, output_folder="pdf_parts")
137 for page in pages:
138 parse_images(card_fronts, card_backs, page)
139 print("Parsing cardbundle.pdf complete.")
140
141 # But we don't want the blank white cards.
142 # I'd rather do a .filter, but I'm concerned a stray pixel would put them outta sync.
143 filtered_fronts = []
144 filtered_backs = []
145 for i, card in enumerate(card_fronts):
146 if PIL.ImageOps.invert(card).getbbox():
147 filtered_fronts.append(card)
148 filtered_backs.append(card_backs[i])
149 print("Stripping out blank cards complete.")
150
151 # Collate the cards into the image format Tabletop Simulator requires.
152 links = {}
153 deck_count = len(card_fronts) // (width*height) + 1
154 package_pages(width, height, filtered_fronts, filtered_backs, deck_count, links)
155 print("Packaging cards into TTS deck template images and uploading to Cloudinary complete.")
156
157 # And let's shove em all in your Saved Objects folder :)
158 deck_json = ""
159 with open("decktemplate.json", "r") as deck_template:
160 deck_json = deck_template.read()
161 for i in range(deck_count):
162 write_deck(deck_json, args, saved_objects_folder, links, i)
163 print("Writing deck jsons into Saved Object folder complete.")
164
165
166 if __name__ == "__main__":
167 convert()
| 47 - warning: unspecified-encoding
66 - refactor: too-many-arguments
66 - refactor: too-many-positional-arguments
66 - refactor: too-many-locals
78 - error: no-member
80 - error: no-member
107 - warning: unspecified-encoding
123 - refactor: too-many-locals
159 - warning: unspecified-encoding
|
1 import os
2
3 def vcs_status():
4 from powerline.lib.vcs import guess
5 repo = guess(os.path.abspath(os.getcwd()))
6 if repo and repo.status():
7 return "X"
8 else:
9 return None
| 4 - warning: bad-indentation
5 - warning: bad-indentation
6 - warning: bad-indentation
7 - warning: bad-indentation
8 - warning: bad-indentation
9 - warning: bad-indentation
6 - refactor: no-else-return
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.