code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from __future__ import absolute_import
import os
###############################################################################
## GENERAL SETTINGS.
###############################################################################
# Sets the preferred mobile device detection method. Available options are:
#
# - 'v3-wrapper': Requires '51degrees-mobile-detector-v3-wrapper' package.
# - 'v3-trie-wrapper': Requires '51degrees-mobile-detector-v3-trie-wrapper' package.
#
DETECTION_METHOD = 'v3-wrapper'
# List of case-sensitive property names to be fetched on every device detection. Leave empty to
# fetch all available properties.
PROPERTIES = ''
# Your 51Degrees license key. This is required if you want to set up the automatic
# data file updates.
LICENSE = ''
###############################################################################
## TRIE DETECTOR SETTINGS.
###############################################################################
# Location of the database file. If not specified, the trie-based detection
# method will not be available. Download the latest 51Degrees-LiteV3.4.trie
# file from http://github.com/51Degrees/Device-Detection/data/.
# Compare database options at https://51degrees.com/compare-data-options .
V3_TRIE_WRAPPER_DATABASE = os.path.expanduser('~/51Degrees/51Degrees-LiteV3.4.trie')
###############################################################################
## PATTERN DETECTOR SETTINGS.
###############################################################################
# Location of the database file. If not specified, the trie-based detection
# method will not be available. Download the latest 51Degrees-LiteV3.2.dat
# file from http://github.com/51Degrees/Device-Detection/data/.
# Compare database options at https://51degrees.com/compare-data-options .
V3_WRAPPER_DATABASE = os.path.expanduser('~/51Degrees/51Degrees-LiteV3.2.dat')
# Size of cache allocated
CACHE_SIZE = 10000
#Size of pool allocated
POOL_SIZE = 20
###############################################################################
## USAGE SHARER SETTINGS.
###############################################################################
# Indicates if usage data should be shared with 51Degrees.com. We recommended
# leaving this value unchanged to ensure we're improving the performance and
# accuracy of the solution.
USAGE_SHARER_ENABLED = True
# The detail that should be provided relating to new devices.
# Modification not required for most users.
USAGE_SHARER_MAXIMUM_DETAIL = True
# URL to send new device data to.
# Modification not required for most users.
USAGE_SHARER_SUBMISSION_URL = 'https://devices.51degrees.com/new.ashx'
# Data submission timeout (seconds).
USAGE_SHARER_SUBMISSION_TIMEOUT = 10
# Minimum queue length to launch data submission.
USAGE_SHARER_MINIMUM_QUEUE_LENGTH = 50
# Used to detect local devices.
# Modification not required for most users.
USAGE_SHARER_LOCAL_ADDRESSES = (
'127.0.0.1',
'0:0:0:0:0:0:0:1',
)
# The content of fields in this list should not be included in the
# request information sent to 51Degrees.
# Modification not required for most users.
USAGE_SHARER_IGNORED_HEADER_FIELD_VALUES = (
'Referer',
'cookie',
'AspFilterSessionId',
'Akamai-Origin-Hop',
'Cache-Control',
'Cneonction',
'Connection',
'Content-Filter-Helper',
'Content-Length',
'Cookie',
'Cookie2',
'Date',
'Etag',
'If-Last-Modified',
'If-Match',
'If-Modified-Since',
'If-None-Match',
'If-Range',
'If-Unmodified-Since',
'IMof-dified-Since',
'INof-ne-Match',
'Keep-Alive',
'Max-Forwards',
'mmd5',
'nnCoection',
'Origin',
'ORIGINAL-REQUEST',
'Original-Url',
'Pragma',
'Proxy-Connection',
'Range',
'Referrer',
'Script-Url',
'Unless-Modified-Since',
'URL',
'UrlID',
'URLSCAN-ORIGINAL-URL',
'UVISS-Referer',
'X-ARR-LOG-ID',
'X-Cachebuster',
'X-Discard',
'X-dotDefender-first-line',
'X-DRUTT-REQUEST-ID',
'X-Initial-Url',
'X-Original-URL',
'X-PageView',
'X-REQUEST-URI',
'X-REWRITE-URL',
'x-tag',
'x-up-subno',
'X-Varnish',
) | 51degrees-mobile-detector | /51degrees-mobile-detector-3.2.18.4.tar.gz/51degrees-mobile-detector-3.2.18.4/fiftyone_degrees/mobile_detector/conf/default.py | default.py |
from __future__ import absolute_import
import os
import sys
import imp
import logging
from fiftyone_degrees.mobile_detector.conf import default
class _Settings(object):
VERSION = '3.2'
try:
import pkg_resources
VERSION = pkg_resources.get_distribution('51degrees-mobile-detector').version
except:
pass
def __init__(self, settings_file_or_module):
# Add default settings.
self._add_settings(default)
# Try to load settings from file/module pointed by the
# environment variable.
try:
__import__(settings_file_or_module)
self._add_settings(sys.modules[settings_file_or_module])
except:
try:
self._add_settings(imp.load_source(
'fiftyone_degrees.conf._file',
settings_file_or_module))
except:
pass
# Try to load setting from the Django settings file.
try:
from django.conf import settings
from django.core import exceptions
try:
for name, value in getattr(settings, 'FIFTYONE_DEGREES_MOBILE_DETECTOR_SETTINGS', {}).iteritems():
self._add_setting(name, value)
except exceptions.ImproperlyConfigured:
pass
except ImportError:
pass
# Add logger instance.
self.logger = logging.getLogger('fiftyone_degrees.mobile_detector')
def _add_settings(self, mod):
'''Updates this dict with mod settings (only ALL_CAPS).
'''
for name in dir(mod):
if name == name.upper():
self._add_setting(name, getattr(mod, name))
def _add_setting(self, name, value):
'''Updates this dict with a specific setting.
'''
if name == 'USAGE_SHARER_IGNORED_HEADER_FIELD_VALUES':
value = tuple([item.upper() for item in value])
setattr(self, name, value)
settings = _Settings(
os.environ.get(
'FIFTYONE_DEGREES_MOBILE_DETECTOR_SETTINGS',
os.path.join(os.getcwd(), '51degrees-mobile-detector.settings.py'))) | 51degrees-mobile-detector | /51degrees-mobile-detector-3.2.18.4.tar.gz/51degrees-mobile-detector-3.2.18.4/fiftyone_degrees/mobile_detector/conf/__init__.py | __init__.py |
import requests
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s') # logging.basicConfig函数对日志的输出格式及方式做相关配置
logger = logging.getLogger('com.autotest.db.sqlalchemy_util')
class HttpSender(object):
get_response = ""
post_response = ""
hostname = "" # 公有的类属性
__cookies = {} # 私有的类属性
# def __init__(self):
# print("HttpSender Default Constructor has been called.")
def __init__(self, hostname, headers=None):
logger.info("HttpSender Parameter Constructor has been called.")
self.hostname = hostname
self.headers = headers # self.headers的这个headers是实例属性,可以用实例直接方法。
logger.info("self.headers = {0}".format(self.headers))
def set_headers(self, headers):
self.headers = headers
logger.info("成员方法设置请求头:self.headers = {0}".format(self.headers))
logger.info("self.headers = {0}".format(self.headers))
# 类方法,用classmethod来进行修饰
# 注:类方法和实例方法同名,则类方法会覆盖实例方法。所以改个名字。
@classmethod
# def set_headers(cls, headers):
def set_cls_headers(cls, headers):
cls.headers = headers
logger.info("类方法设置请求头:cls.headers = {0}".format(cls.headers))
def send_get_request(self, full_get_url):
self.get_response = requests.get(full_get_url, headers=self.headers)
# logger.info("响应:", self.get_response.text)
def send_get_request_by_suburi(self, sub_uri, input_params):
full_url = self.hostname + sub_uri
self.get_response = requests.get(full_url, params=input_params, headers=self.headers)
logger.info("full_url = %s" % self.get_response.url)
def send_post_request(self, full_post_url, param_data=None):
self.post_response = requests.post(full_post_url, param_data, headers=self.headers)
def send_json_post_request(self, full_post_url, json_data=None):
self.post_response = requests.post(full_post_url, json=json_data, headers=self.headers)
logger.info("响应={0}".format(self.post_response.text))
# 静态方法
@staticmethod
def send_json_post_request_with_headers_cookies(self, full_post_url, json_data=None, header_data=None, cookie_data=None):
# 在静态方法中引用类属性的话,必须通过类实例对象来引用
# print(self.hostname)
self.post_response = requests.post(full_post_url, json=json_data, headers=header_data, cookies=cookie_data)
def send_json_post_request_by_suburi(self, sub_uri, json_data=None):
full_url = self.hostname + sub_uri
logger.info("full_url={0}".format(full_url))
logger.info("json_data={0}".format(json_data))
self.post_response = requests.post(full_url, json=json_data, headers=self.headers)
# *args 和 **kwargs 都代表 1个 或 多个 参数的意思。*args 传入tuple 类型的无名参数,而 **kwargs 传入的参数是 dict 类型.
# 可变参数 (Variable Argument) 的方法:使用*args和**kwargs语法。# 其中,*args是可变的positional arguments列表,**kwargs是可变的keyword arguments列表。
# 并且,*args必须位于**kwargs之前,因为positional arguments必须位于keyword arguments之前。
#
# r = requests.get("http://www.baidu.com")
# print(r.text) | 51job-autotest-framework | /51job_autotest_framework-0.3.1-py3-none-any.whl/rolling_king/autotest/requests/http_sender_module.py | http_sender_module.py |
from sqlalchemy import create_engine, and_, desc
# from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, declarative_base
import logging
import traceback
import datetime
import platform
import os
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s') # logging.basicConfig函数对日志的输出格式及方式做相关配置
logger = logging.getLogger('db.sqlalchemy_util')
class AlchemyUtil(object):
# 基类
Base = declarative_base()
@classmethod
def init_engine(cls, db_param_dict, db_type: str = 'mysql'):
"""
根据数据库连接串创建MySQL数据库的engine。
:param db_type: DB类型
:param db_param_dict: 数据连接串的各项参数的字典。
:return: MySQL数据库的engine。
"""
host = db_param_dict['db_host']
user = db_param_dict['db_user']
passwd = db_param_dict['db_passwd']
db = db_param_dict['db_db']
charset = db_param_dict['db_charset']
port = db_param_dict['db_port']
logger.info("host = {0}".format(host))
logger.info("user = {0}".format(user))
logger.info("passwd = {0}".format(passwd))
logger.info("db = {0}".format(db))
logger.info("charset = {0}".format(charset))
logger.info("port = {0}".format(port))
if db_type == "postgresql":
conn_str = "postgresql://" + user + ":" + passwd + "@" + host + ":" + port + "/" + db + "?charset=" + charset
elif db_type == "sqlite":
conn_str = None
if str(platform.system().lower()) == 'windows':
path = __file__.replace(fr"\{os.path.basename(__file__)}", "").replace("\\\\", "\\")
conn_str = fr'sqlite:///{path}\db\sqlite_recruit.db''?check_same_thread=False'
else:
path = __file__.replace(fr"/{os.path.basename(__file__)}", "").replace("//", "/")
conn_str = fr'sqlite:///{path}/db/sqlite_recruit.db''?check_same_thread=False'
print(f'数据库路径:{conn_str}')
else:
conn_str = "mysql+pymysql://" + user + ":" + passwd + "@" + host + ":" + port + "/" + db + "?charset=" + charset
db_engine = create_engine(
conn_str,
max_overflow=0, # 超过连接池大小外最多创建的连接
pool_size=5, # 连接池大小
pool_timeout=30, # 池中没有线程最多等待的时间,否则报错
pool_recycle=-1 # 多久之后对线程池中的线程进行一次连接的回收(重置)
)
logger.info("[%s] engine has been created successfully." % db_engine.name)
return db_engine
@classmethod
def init_db(cls, mysql_engine):
"""
根据类创建数据库表
:return:
"""
AlchemyUtil.Base.metadata.create_all(mysql_engine)
@classmethod
def drop_db(cls, mysql_engine):
"""
根据类删除数据库表
:return:
"""
AlchemyUtil.Base.metadata.drop_all(mysql_engine)
@classmethod
def init_db_by_flask(cls, db, bind_key=None):
if bind_key is None:
db.create_all()
else:
# 下面这句不能初始化Flask中的SQLAlchemy Table,因为里面是调用 create_all() of MetaData in sqlalchemy.sql.schema。
# AlchemyUtil.init_db(db.get_engine(bind="site_reldb"))
db.create_all(bind=bind_key) # 这个是create_all() of SQLAlchemy in flask_sqlalchemy
@classmethod
def get_session(cls, mysql_engine):
db_session = sessionmaker(bind=mysql_engine) # Session是<class 'sqlalchemy.orm.session.sessionmaker'>
return db_session()
@classmethod
def insert_list_with_flush_only(cls, session, obj_list):
try:
for obj in obj_list:
session.add(obj)
session.flush()
logger.info("【Success】一共插入 %d 条记录 by [insert_list_with_flush_only] method." % len(obj_list))
finally:
logger.info("[insert_list_with_flush_only] method has done, but has not been committed yet.")
@classmethod
def insert_obj_with_commit(cls, session, obj):
try:
session.add(obj)
session.commit()
logger.info("【Success】插入一条记录:%s" % obj.__dict__)
finally:
session.close()
logger.info("[insert_obj_with_commit] method has done and session has been closed.")
@classmethod
def insert_obj_without_commit(cls, session, obj):
try:
session.add(obj)
session.flush()
logger.info("【Success】插入一条记录:%s" % obj.__dict__)
finally:
logger.info("[insert_obj_without_commit] method has done but not committed yet.")
@classmethod
def do_commit_only(cls, session):
try:
session.commit()
logger.info("session has been committed.")
finally:
session.close()
logger.info("do_commit_only method has done and session has been closed.")
@classmethod
def query_first(cls, session, clazz, criteria_set=None):
try:
if criteria_set is None or len(criteria_set) == 0:
sql = session.query(clazz)
logger.info("执行全量查询SQL = %s" % sql)
else:
sql = session.query(clazz).filter(*criteria_set)
logger.info("执行条件查询SQL = %s" % sql)
record = sql.one_or_none() # 真正执行该查询。
return record
finally:
session.close()
logger.info("[query_first] method has done and session has been closed.")
@classmethod
def query_obj_list(cls, session, clazz, criteria_set=None):
try:
if criteria_set is None or len(criteria_set) == 0:
sql = session.query(clazz)
logger.info("执行全量查询SQL = %s" % sql)
else:
sql = session.query(clazz).filter(*criteria_set)
logger.info("执行条件查询SQL = %s" % sql)
record_list = sql.all() # 真正执行该查询。
logger.info("查询获取到 %d 条记录。" % len(record_list))
return record_list
finally:
session.close()
logger.info("[query_obj_list] method has done and session has been closed.")
@classmethod
def query_field_list(cls, session, entities, criteria_set=None):
try:
if criteria_set is None or len(criteria_set) == 0:
sql = session.query(*entities)
logger.info("执行全量查询SQL = %s" % sql)
else:
sql = session.query(*entities).filter(*criteria_set)
logger.info("执行条件查询SQL = %s" % sql)
fields_record_list = sql.all() # 真正执行该查询。
logger.info("查询获取到 %d 条记录。" % len(fields_record_list))
return fields_record_list
finally:
session.close()
logger.info("[query_field_list] method has done and seesion has been closed.")
@classmethod
def query_field_list_with_distinct(cls, session, entities, criteria_set=None, distinct_columns=None):
try:
if criteria_set is None or len(criteria_set) == 0:
if distinct_columns is None or len(distinct_columns) == 0:
sql = session.query(*entities)
else:
sql = session.query(*entities).distinct(*distinct_columns)
logger.info("执行全量查询SQL = %s" % sql)
else:
if distinct_columns is None or len(distinct_columns) == 0:
sql = session.query(*entities).filter(*criteria_set)
else:
sql = session.query(*entities).filter(*criteria_set).distinct(*distinct_columns)
logger.info("执行条件查询SQL = %s" % sql)
fields_record_list = sql.all() # 真正执行该查询。
logger.info("查询获取到 %d 条记录。" % len(fields_record_list))
return fields_record_list
finally:
session.close()
logger.info("[query_field_list_with_distinct] method has done and seesion has been closed.")
@classmethod
def query_field_list_with_distinct_orderby_limit(cls, session, entities, criteria_set=None, distinct_columns=None,
order_by_columns=None, sequence: str = 'ASC', limit_val: int = 0):
try:
if criteria_set is None or len(criteria_set) == 0:
if distinct_columns is None or len(distinct_columns) == 0:
sql = session.query(*entities)
else:
sql = session.query(*entities).distinct(*distinct_columns)
logger.info("执行全量查询SQL = %s" % sql)
else:
if distinct_columns is None or len(distinct_columns) == 0:
if order_by_columns is None and limit_val == 0:
sql = session.query(*entities).filter(*criteria_set)
elif order_by_columns is not None and sequence == 'DESC' and limit_val > 0:
sql = session.query(*entities).filter(*criteria_set).order_by(
and_([x.desc() for x in order_by_columns])
).limit(limit_val)
else:
sql = (session.query(*entities).filter(*criteria_set).
order_by(and_(*order_by_columns)).limit(limit_val))
else:
if order_by_columns is None and limit_val == 0:
sql = session.query(*entities).filter(*criteria_set).distinct(*distinct_columns)
elif order_by_columns is not None and sequence == 'DESC' and limit_val > 0:
sql = session.query(*entities).filter(*criteria_set).distinct(*distinct_columns).order_by(
# 列表生成式让每一个排序字段调用.desc()方法。相当于生成了[gmt_create.desc(), gmt_modify.desc()]列表。
and_(*[x.desc() for x in order_by_columns])
).limit(limit_val)
else:
sql = session.query(*entities).distinct(*distinct_columns).filter(*criteria_set).order_by(
and_(*order_by_columns)
).limit(limit_val)
logger.info("执行条件查询SQL = %s" % sql)
fields_record_list = sql.all() # 真正执行该查询。
logger.info("查询获取到 %d 条记录。" % len(fields_record_list))
return fields_record_list
finally:
session.close()
logger.info("[query_field_list_with_distinct] method has done and seesion has been closed.")
@classmethod
def update_for_criteria_with_commit(cls, session, clazz, criteria_set=None, update_dict={}):
"""
:param session: db_session
:param clazz: db_model_name
:param criteria_set: query's criteria
:param update_dict: update's field-value pairs
:return: row count of updated records
"""
if len(update_dict) > 0:
try:
if criteria_set is None or len(criteria_set) == 0:
sql = session.query(clazz)
logger.info("执行全量查询SQL = %s" % sql)
else:
sql = session.query(clazz).filter(*criteria_set)
logger.info("执行条件查询SQL = %s" % sql)
affected_row = sql.update(update_dict) # 真正执行更新,返回更新的记录条数。
session.commit()
logger.info("【Success】一共更新 %d 行记录。" % affected_row)
return affected_row
except:
session.rollback()
logger.warning("出现异常")
logger.error(traceback.format_exc())
finally:
session.close()
else:
logger.warning("依据update_dict参数,传入的需要更新的字段个数为零,无法更新。")
@classmethod
def delete_for_criteria_commit(cls, session, clazz, criteria_set=None):
"""
:param session: db_session
:param clazz: db_model_name
:param criteria_set: query's criteria
:return: row count of deleted records
"""
try:
if criteria_set is None or len(criteria_set) == 0:
logger.info("criteria_set 为空,不可删除全部记录,有风险。")
return 0
else:
sql = session.query(clazz).filter(*criteria_set)
logger.info("执行条件查询SQL = %s" % sql)
affected_row = sql.delete() # 真正执行删除,返回删除的记录条数。
session.commit()
# logger.info("【Success】一共删除 %d 行记录,依据条件:%s" % (affected_row, *criteria_set))
# 类似这种pytest_execution_record.test_psm IS NULL 有NULL的条件,上一行报错。
logger.info("【Success】一共删除 %d 行记录." % affected_row)
return affected_row
except:
session.rollback()
logger.warning("出现异常")
logger.error(traceback.format_exc())
finally:
session.close()
@classmethod
def gen_unique_key(cls):
dt = datetime.datetime.now()
dt_str = dt.strftime('%Y%m%d%H%M%S')
ts = datetime.datetime.timestamp(dt)
ts_str = str(int(ts * 1000000))
unique_key = dt_str + ts_str
return unique_key
if __name__ == "__main__":
pass | 51job-autotest-framework | /51job_autotest_framework-0.3.1-py3-none-any.whl/rolling_king/autotest/db/sqlalchemy_util.py | sqlalchemy_util.py |
import pymysql
import configparser
import json
import sys
# 定义一个db链接类
class DBConn:
"""
使用cnn进行db连接
"""
def __init__(self, db_key, db_conf_relative_path):
self.db_key = db_key
conf_file = open(db_conf_relative_path)
cf = configparser.ConfigParser()
cf.read_file(conf_file)
json_str = cf.get("mysql", db_key)
print(json_str)
dict_val = json.loads(json_str)
host = dict_val['db_host']
user = dict_val['db_user']
passwd = dict_val['db_passwd']
db = dict_val['db_db']
charset = dict_val['db_charset']
port = dict_val['db_port']
print("host = {0}".format(host))
print("user = {0}".format(user))
print("passwd = {0}".format(passwd))
print("db = {0}".format(db))
print("charset = {0}".format(charset))
print("port = {0}".format(port))
self.conn = pymysql.connect(host=host,
user=user,
passwd=passwd,
db=db,
charset=charset,
port=int(port),
)
print("成功连接{0}数据库。".format(self.db_key))
def query_db(self, sql_str):
cur = self.conn.cursor()
try:
affected_row = cur.execute(sql_str)
print("【{0}】SQL语句返回{1}条数据".format(sql_str, affected_row))
self.conn.commit()
return cur.fetchall()
except Exception as e:
print(e.with_traceback(sys.exc_info()[2]))
finally:
cur.close()
def update_db(self, sql_str):
cur = self.conn.cursor()
try:
affected_row = cur.execute(sql_str)
self.conn.commit()
print("【{0}】SQL语句共影响{1}条数据".format(sql_str, affected_row))
return affected_row
except Exception as e:
print(e.with_traceback(sys.exc_info()[2]))
finally:
cur.close()
def close_db(self):
self.conn.close()
print("与{0}的数据库连接已关闭。".format(self.db_key))
if __name__ == "__main__":
# 建立连接
conn = DBConn("DB_BOE_Site_Reldb", "../../conf/db.conf")
# 执行SELECT
tuple_result = conn.query_db("select * from union_media where id in (45535, 45532, 45507, 259);")
print(tuple_result)
for row_result in tuple_result:
print("当前行元祖为:", row_result)
print("当前行共{0}个字段".format(len(row_result)))
print("当前行第一个字段值=", row_result[0])
# 执行UPDATE
affected_row_num = conn.update_db("update union_media_function_rel set function_id = 35 where id = '111287';")
print("更新了{0}行。".format(affected_row_num))
# 关闭连接
conn.close_db() | 51job-autotest-framework | /51job_autotest_framework-0.3.1-py3-none-any.whl/rolling_king/autotest/db/db_util.py | db_util.py |
from rolling_king.autotest.db.sqlalchemy_util import AlchemyUtil
from sqlalchemy import Column, Integer, String, DateTime, Float
import datetime
import json
import re
class CaseRecordModel(AlchemyUtil.Base):
__tablename__ = 'pytest_case_record'
# uid = Column(Integer, primary_key=True, autoincrement=True) # '测试用例唯一标识'
uid = Column(String(32), primary_key=True) # uuid.uuid4().hex
test_project_name = Column(String(64)) # 'QA的Python测试项目名称'
test_psm = Column(String(32)) # '被测PSM'
test_interface = Column(String(64)) # '被测接口: Http接口是subrui, Thrift接口是Service.Method'
test_inter_type = Column(String(8)) # '接口协议类型'
test_class = Column(String(64)) # 'Pytest的测试类:package.Class'
test_method = Column(String(64)) # 'Pytest的测试方法名'
test_description = Column(String(128)) # '测试用例描述'
version = Column(Integer) # '用例版本号'
gmt_create = Column(DateTime, default=datetime.datetime.now) # '记录创建时间'
gmt_modify = Column(DateTime, default=datetime.datetime.now) # '记录修改时间'
def to_json(self):
dict = self.__dict__
if "_sa_instance_state" in dict:
del dict["_sa_instance_state"]
return dict
# 重写JSONEncoder的default方法,object转换成dict
class CaseRecordEncoder(json.JSONEncoder):
# 重写default方法
def default(self, obj):
"""
把【Object】转成【Dict字典】
:param obj:
:return:
"""
if isinstance(obj, CaseRecordModel):
return {
'uid': obj.uid,
'test_project_name': obj.test_project_name,
'test_psm': obj.test_psm,
'test_interface': obj.test_interface,
'test_inter_type': obj.test_inter_type,
'test_class': obj.test_class,
'test_method': obj.test_method,
'test_description': obj.test_description,
'version': obj.version,
'gmt_create': obj.gmt_create,
'gmt_modify': obj.gmt_modify
}
else:
return json.JSONEncoder.default(self, obj)
# 重写encode方法
def encode(self, obj):
"""
把【Object】转成【Dict字典】再转成【String】
:param obj:
:return:
"""
if isinstance(obj, CaseRecordModel):
dict_val = {
'uid': obj.uid,
'test_project_name': obj.test_project_name,
'test_psm': obj.test_psm,
'test_interface': obj.test_interface,
'test_inter_type': obj.test_inter_type,
'test_class': obj.test_class,
'test_method': obj.test_method,
'test_description': obj.test_description,
'version': obj.version,
'gmt_create': obj.gmt_create,
'gmt_modify': obj.gmt_modify
}
return str(dict_val)
else:
return json.JSONEncoder.encode(self, obj)
# 重写JSONDecoder的decode方法,dict转换成object
class CaseRecordDecoder(json.JSONDecoder):
def decode(self, dict_str):
"""
把【字符串】转成【字典】再转成【Object】
:param dict_str: 字典的字符串
:return:
"""
dict_val = super().decode(dict_str) # 先把str转dict
# 下面是dict转object
CaseRecordDecoder.dict_to_obj(dict_val)
@staticmethod
def dict_to_obj(dict_val):
"""
把【字典Dict】直接转成对应的【Object】
:param dict_val:
:return:
"""
case_record_model = CaseRecordModel()
if 'uid' in dict_val.keys():
case_record_model.uid = dict_val['uid']
else:
case_record_model.uid = '0'
case_record_model.test_project_name = dict_val['test_project_name']
case_record_model.test_psm = dict_val['test_psm']
case_record_model.test_interface = dict_val['test_interface']
case_record_model.test_inter_type = dict_val['test_inter_type']
case_record_model.test_class = dict_val['test_class']
case_record_model.test_method = dict_val['test_method']
case_record_model.test_description = dict_val['test_description']
case_record_model.version = dict_val['version']
return case_record_model
############################################################################
class ExecutionRecordModel(AlchemyUtil.Base):
__tablename__ = 'pytest_execution_record'
# uid = Column(Integer, primary_key=True, autoincrement=True) # '测试记录每一个TestCase执行的唯一标识'
uid = Column(String(32), primary_key=True) # uuid.uuid4().hex
test_unique_tag = Column(String(64)) # '一次整体测试的唯一标签'
test_project_name = Column(String(64)) # 'QA的Python测试项目名称'
test_psm = Column(String(32)) # '被测PSM'
test_interface = Column(String(64)) # '被测接口: Http接口是subrui, Thrift接口是Service.Method'
test_inter_type = Column(String(8)) # '接口协议类型'
test_class = Column(String(64)) # 'Pytest的测试类:package.Class'
test_method = Column(String(64)) # 'Pytest的测试方法名'
test_result = Column(String(8)) # '测试用例执行结果'
test_params = Column(String(64)) # 'Pytest的测试方法入参'
test_duration = Column(Integer) # '测试用例执行耗时'
test_start_time = Column(String(64)) # '测试用例执行起始时间'
test_finish_time = Column(String(64)) # '测试用例执行完成时间'
test_assert = Column(String(8)) # '测试用例是否使用Assert断言'
test_error_msg = Column(String(32)) # '测试用例失败信息'
gmt_create = Column(DateTime, default=datetime.datetime.now) # '记录创建时间'
gmt_modify = Column(DateTime, default=datetime.datetime.now) # '记录修改时间'
def to_json(self):
dict = self.__dict__
if "_sa_instance_state" in dict:
del dict["_sa_instance_state"]
return dict
# 重写JSONEncoder的default方法,object转换成dict
class ExecutionRecordEncoder(json.JSONEncoder):
# 重写default方法
def default(self, execution_obj):
"""
把【Object】转成【Dict字典】
:param execution_obj:
:return:
"""
if isinstance(execution_obj, ExecutionRecordModel):
return {
'uid': execution_obj.uid,
'test_unique_tag': execution_obj.test_unique_tag,
'test_project_name': execution_obj.test_project_name,
'test_psm': execution_obj.test_psm,
'test_interface': execution_obj.test_interface,
'test_inter_type': execution_obj.test_inter_type,
'test_class': execution_obj.test_class,
'test_method': execution_obj.test_method,
'test_result': execution_obj.test_result,
'test_params': execution_obj.test_params,
'test_duration': execution_obj.test_duration,
'test_start_time': execution_obj.test_start_time,
'test_finish_time': execution_obj.test_finish_time,
'test_assert': execution_obj.test_assert,
'test_error_msg': execution_obj.test_error_msg,
'gmt_create': execution_obj.gmt_create,
'gmt_modify': execution_obj.gmt_modify
}
else:
return json.JSONEncoder.default(self, execution_obj)
# 重写encode方法
def encode(self, execution_obj):
"""
把【Object】转成【Dict字典】再转成【String】
:param execution_obj:
:return:
"""
if isinstance(execution_obj, CaseRecordModel):
return str(ExecutionRecordEncoder.default(execution_obj))
else:
return json.JSONEncoder.encode(self, execution_obj)
# 重写JSONDecoder的decode方法,dict转换成object
class ExecutionRecordDecoder(json.JSONDecoder):
def decode(self, dict_str):
"""
把【字符串】转成【字典】再转成【Object】
:param dict_str: 字典的字符串
:return:
"""
dict_val = super().decode(dict_str) # 先把str转dict
# 下面是dict转object
ExecutionRecordDecoder.dict_to_obj(dict_val)
@staticmethod
def dict_to_obj(dict_val):
"""
把【字典Dict】直接转成对应的【Object】
:param dict_val:
:return:
"""
execution_record_model = ExecutionRecordModel()
if 'uid' in dict_val.keys():
execution_record_model.uid = dict_val['uid']
else:
execution_record_model.uid = '0'
execution_record_model.test_unique_tag = dict_val['test_unique_tag']
execution_record_model.test_project_name = dict_val['test_project_name']
execution_record_model.test_psm = dict_val['test_psm']
execution_record_model.test_interface = dict_val['test_interface']
execution_record_model.test_inter_type = dict_val['test_inter_type']
execution_record_model.test_class = dict_val['test_class']
execution_record_model.test_method = dict_val['test_method']
execution_record_model.test_result = dict_val['test_result']
execution_record_model.test_params = dict_val['test_params']
execution_record_model.test_duration = dict_val['test_duration']
execution_record_model.test_start_time = dict_val['test_start_time']
execution_record_model.test_finish_time = dict_val['test_finish_time']
execution_record_model.test_assert = dict_val['test_assert']
execution_record_model.test_error_msg = dict_val['test_error_msg']
return execution_record_model
class ExecutionStatisticModel(AlchemyUtil.Base):
__tablename__ = 'pytest_exec_statistic_record'
uid = Column(String(32), primary_key=True) # uuid.uuid4().hex
test_unique_tag = Column(String(16)) # '一次整体测试的唯一标签'
test_project_name = Column(String(64)) # 'QA的Python测试项目名称'
test_psm = Column(String(32)) # '被测PSM'
test_cases_num = Column(Integer) # '本次测试的用例个数'
test_pass_rate = Column(Float) # '本次测试的通过率'
test_duration = Column(Integer) # '本次测试的总体执行耗时'
test_assert_rate = Column(Float) # '本次测试使用Assert断言比率'
test_interface_num = Column(Integer) # '本次测试的覆盖接口数'
gmt_create = Column(DateTime, default=datetime.datetime.now) # '记录创建时间'
gmt_modify = Column(DateTime, default=datetime.datetime.now) # '记录修改时间'
def to_json(self):
dict = self.__dict__
if "_sa_instance_state" in dict:
del dict["_sa_instance_state"]
return dict
#################################################################################
###### 下方是全部接口Model:BamInterModel 和 未覆盖接口Model:NonCovInterModel ######
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
def gen_unique_key():
dt = datetime.datetime.now()
dt_str = dt.strftime('%Y%m%d%H%M%S')
ts = datetime.datetime.timestamp(dt)
ts_str = str(int(ts * 1000000))
unique_key = dt_str + ts_str
return unique_key
class BamInterModel(AlchemyUtil.Base):
__bind_key__ = "site_reldb" # 若不指定,则使用默认数据库。
__tablename__ = 'psm_inter_info'
id = Column(String(32), primary_key=True)
psm = Column(String(64), nullable=False)
endpoint_id = Column(String(64), nullable=False)
method = Column(String(8))
path = Column(String(128))
level = Column(Integer)
name = Column(String(64))
note = Column(String(64))
rpc_method = Column(String(64))
creator = Column(String(16))
updater = Column(String(32))
modify_time = Column(String(32))
create_time = Column(String(32))
publish_status = Column(Integer)
priority = Column(Integer)
version = Column(String(8))
gmt_create = Column(DateTime, default=datetime.datetime.now)
def to_json(self):
dict = self.__dict__
if "_sa_instance_state" in dict:
del dict["_sa_instance_state"]
return dict
# 重写JSONEncoder的default方法,object转换成dict
class BamInterEncoder(json.JSONEncoder):
# 重写default方法
def default(self, obj):
"""
把【Object】转成【Dict字典】
:param obj:
:return:
"""
if isinstance(obj, BamInterModel):
return {
'id': obj.id,
'psm': obj.psm,
'endpoint_id': obj.endpoint_id,
'method': obj.method,
'path': obj.path,
'level': obj.level,
'name': obj.name,
'note': obj.note,
'rpc_method': obj.rpc_method,
'creator': obj.creator,
'updater': obj.updater,
'create_time': obj.create_time,
'modify_time': obj.modify_time,
'publish_status': obj.publish_status,
'priority': obj.priority,
'version': obj.version,
'gmt_create': obj.gmt_create
}
else:
return json.JSONEncoder.default(self, obj)
# 重写encode方法
def encode(self, obj):
"""
把【Object】转成【Dict字典】再转成【String】
:param obj:
:return:
"""
if isinstance(obj, BamInterModel):
return str(self.default(obj))
else:
return json.JSONEncoder.encode(self, obj)
# 重写JSONDecoder的decode方法,dict转换成object
class BamInterDecoder(json.JSONDecoder):
def decode(self, dict_str, _w=WHITESPACE.match):
"""
把【字符串】转成【字典】再转成【Object】
:param dict_str: 字典的字符串
:param _w:
:return:
"""
dict_val = super().decode(dict_str) # 先把str转dict
# 下面是dict转object
self.dict_to_obj(dict_val)
@staticmethod
def dict_to_obj(dict_val):
"""
把【字典Dict】直接转成对应的【Object】
:param dict_val:
:return:
"""
bam_inter_model = BamInterModel()
if 'uid' in dict_val.keys():
bam_inter_model.id = dict_val['id']
else:
bam_inter_model.id = gen_unique_key()
bam_inter_model.psm = dict_val['psm']
bam_inter_model.endpoint_id = dict_val['endpoint_id']
bam_inter_model.method = dict_val['method']
bam_inter_model.path = dict_val['path']
bam_inter_model.level = dict_val['level']
bam_inter_model.name = dict_val['name']
bam_inter_model.note = dict_val['note']
bam_inter_model.rpc_method = dict_val['rpc_method']
bam_inter_model.creator = dict_val['creator']
bam_inter_model.updater = dict_val['updater']
bam_inter_model.create_time = dict_val['create_time']
bam_inter_model.modify_time = dict_val['modify_time']
bam_inter_model.publish_status = dict_val['publish_status']
bam_inter_model.priority = dict_val['priority']
bam_inter_model.version = dict_val['version']
if 'gmt_create' in dict_val.keys():
bam_inter_model.gmt_create = dict_val['gmt_create']
return bam_inter_model
class NonCovInterModel(AlchemyUtil.Base):
__bind_key__ = "site_reldb" # 若不指定,则使用默认数据库。
__tablename__ = 'psm_non_cov_inter'
id = Column(String(32), primary_key=True)
psm = Column(String(64), nullable=False)
endpoint_id = Column(String(64), nullable=False)
method = Column(String(8))
path = Column(String(128))
name = Column(String(64))
note = Column(String(64))
rpc_method = Column(String(64))
version = Column(String(8))
gmt_create = Column(DateTime, default=datetime.datetime.now)
def to_json(self):
dict = self.__dict__
if "_sa_instance_state" in dict:
del dict["_sa_instance_state"]
return dict | 51job-autotest-framework | /51job_autotest_framework-0.3.1-py3-none-any.whl/rolling_king/autotest/db/db_models.py | db_models.py |
import requests
import urllib3
urllib3.disable_warnings()
class HttpSender(object):
get_response = ""
post_response = ""
hostname = "" # 公有的类属性
__cookies = {} # 私有的类属性
# def __init__(self):
# print("HttpSender Default Constructor has been called.")
def __init__(self, hostname, headers=None):
print("HttpSender Parameter Constructor has been called.")
self.hostname = hostname
self.headers = headers # self.headers的这个headers是实例属性,可以用实例直接方法。
print("self.headers = {0}".format(self.headers))
def set_headers(self, headers):
self.headers = headers
print("成员方法设置请求头:self.headers = {0}".format(self.headers))
print("self.headers = {0}".format(self.headers))
# 类方法,用classmethod来进行修饰
# 注:类方法和实例方法同名,则类方法会覆盖实例方法。所以改个名字。
@classmethod
# def set_headers(cls, headers):
def set_cls_headers(cls, headers):
cls.headers = headers
print("类方法设置请求头:cls.headers = {0}".format(cls.headers))
def send_get_request(self, full_get_url):
self.get_response = requests.get(full_get_url, headers=self.headers)
print("响应:", self.get_response.text)
def send_get_request_by_suburi(self, sub_uri, input_params):
full_url = self.hostname + sub_uri
self.get_response = requests.get(full_url, params=input_params, headers=self.headers)
print("full_url = %s" % self.get_response.url)
def send_post_request(self, full_post_url, param_data=None):
self.post_response = requests.post(full_post_url, param_data, headers=self.headers)
def send_json_post_request(self, full_post_url, json_data=None):
self.post_response = requests.post(full_post_url, json=json_data, headers=self.headers)
# 静态方法
@staticmethod
def send_json_post_request_with_headers_cookies(self, full_post_url, json_data=None, header_data=None, cookie_data=None):
# 在静态方法中引用类属性的话,必须通过类实例对象来引用
# print(self.hostname)
self.post_response = requests.post(full_post_url, json=json_data, headers=header_data, cookies=cookie_data)
def send_json_post_request_by_suburi(self, sub_uri, json_data=None):
full_url = self.hostname + sub_uri
self.post_response = requests.post(full_url, json=json_data, headers=self.headers)
# *args 和 **kwargs 都代表 1个 或 多个 参数的意思。*args 传入tuple 类型的无名参数,而 **kwargs 传入的参数是 dict 类型.
# 可变参数 (Variable Argument) 的方法:使用*args和**kwargs语法。# 其中,*args是可变的positional arguments列表,**kwargs是可变的keyword arguments列表。
# 并且,*args必须位于**kwargs之前,因为positional arguments必须位于keyword arguments之前。
r = requests.get("http://www.baidu.com")
print(r.text) | 51job-autotest-framework | /51job_autotest_framework-0.3.1-py3-none-any.whl/rolling_king/jason/requests/http_sender_module.py | http_sender_module.py |
import logging
from rolling_king.autotest.requests.http_sender_module import HttpSender
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
logger = logging.getLogger("gitlab_utils")
class GitLab(object):
__ACCESS_SEG_1: str = "glpat"
__ACCESS_SEG_2: str = "Y4AGLX1aWBkvvsyjqEuv"
__access_token: str = None
__host_url: str = None
__http_sender: HttpSender = None
@classmethod
def init(cls, gitlab_host_url: str, personal_access_token: str = None):
if personal_access_token is None:
cls.__access_token = cls.__ACCESS_SEG_1 + "-" + cls.__ACCESS_SEG_2
else:
cls.__access_token = personal_access_token
if gitlab_host_url is not None and len(gitlab_host_url) > 0:
cls.__host_url = gitlab_host_url
cls.__http_sender = HttpSender(hostname=cls.__host_url,
headers={"PRIVATE-TOKEN": cls.__access_token})
else:
logger.error("Please provide gitlab_host_url")
@classmethod
def get_token(cls):
logger.info(f"Personal Access Token = {cls.__access_token}")
@classmethod
def get_host_url(cls):
logger.info(f"GitLab Host URL = {cls.__host_url}")
@classmethod
def get_all_projects(cls) -> dict:
cls.__http_sender.send_get_request_by_suburi(sub_uri="/api/v4/projects",
input_params={
"private_token": cls.__access_token
})
# try:
json_resp = cls.__http_sender.get_response.json()
if len(json_resp) > 0:
logger.info(f"Total {len(json_resp)} projects")
for curr in json_resp:
logger.info(f"id = {curr['id']}, name = {curr['name']}, default_branch = {curr['default_branch']}")
return json_resp
else:
return {}
# except e:
# logger.error("Exception happened...{e.args}")
@classmethod
def get_specific_project(cls, project_name: str) -> dict | None:
# "private_token": cls.__access_token, # 若header中没有PRIVATE-TOKEN则需要参数里写上。
cls.__http_sender.send_get_request_by_suburi("/api/v4/projects",
input_params={
"search": project_name
})
# cls.__http_sender.send_get_request(full_get_url="https://gitdev.51job.com/api/v4/projects?search=maven-jave-project")
json_resp = cls.__http_sender.get_response.json()
if json_resp is not None and len(json_resp) == 1:
logger.info(f"[成功]: 响应为{json_resp}")
return json_resp[0]
else:
return {}
@classmethod
def get_project_branches(cls, project_id: str = None, project_name: str = None) -> list[dict] | None:
if project_id is None or project_id == "":
project_id = cls.get_specific_project(project_name)['id']
cls.__http_sender.send_get_request_by_suburi(
# "private_token": cls.__access_token, # 若header中没有PRIVATE-TOKEN则需要参数里写上。
sub_uri=f"/api/v4/projects/{project_id}/repository/branches",
input_params={
# "private_token": cls.__access_token
}
)
json_resp = cls.__http_sender.get_response.json()
logger.info(json_resp)
return json_resp
if __name__ == '__main__':
GitLab.init(gitlab_host_url="https://gitdev.51job.com")
GitLab.get_token()
GitLab.get_host_url()
# GitLab.get_all_projects()
# GitLab.get_specific_project(project_name="maven-jave-project")
GitLab.get_project_branches(project_name="maven-jave-project") | 51job-autotest-framework | /51job_autotest_framework-0.3.1-py3-none-any.whl/rolling_king/jason/gitlab/gitlab_utils.py | gitlab_utils.py |
import logging
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.common.keys import Keys
from typing import List
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
logger = logging.getLogger("my_driver")
class WebDriverCommon(object):
driver = None
action = None
@classmethod
def _browser_options(cls, browser_type='Chrome'):
if browser_type == 'Firefox':
options = webdriver.FirefoxOptions()
elif browser_type == 'IE':
options = webdriver.IeOptions()
else:
options = webdriver.ChromeOptions()
# 像Options中添加实验选项。
options.add_experimental_option(name="excludeSwitches", value=["enable-automation"])
options.add_argument("--headless")
return options
@classmethod
def init_driver(cls, driver_type='Chrome', executable_path=None):
if cls.driver is None:
if executable_path is None:
if driver_type == 'Firefox':
cls.driver = webdriver.Firefox(options=cls._browser_options(browser_type=driver_type))
elif driver_type == 'IE':
cls.driver = webdriver.Ie(options=cls._browser_options(browser_type=driver_type))
else:
cls.driver = webdriver.Chrome(options=cls._browser_options())
else:
service_obj = Service(executable_path=executable_path)
# Chrome类的初始化,executable_path已不建议使用,所以使用Service对象。
if driver_type == 'Firefox':
cls.driver = webdriver.Firefox(service=service_obj,
options=cls._browser_options(browser_type=driver_type))
elif driver_type == 'IE':
cls.driver = webdriver.Ie(service=service_obj,
options=cls._browser_options(browser_type=driver_type))
else:
cls.driver = webdriver.Chrome(service=service_obj, options=cls._browser_options())
logger.info('The driver object of WebDriverCommon class was successfully initialized.')
else:
logger.warning('The driver object of WebDriverCommon class has been already initialized.')
@classmethod
def navigate(cls, url):
cls.driver.get(url)
@classmethod
def refresh(cls):
cls.driver.refresh()
@classmethod
def max_window(cls):
cls.driver.maximize_window()
@classmethod
def min_window(cls):
cls.driver.minimize_window()
@classmethod
def set_action(cls):
if cls.driver is None:
logger.error("Driver is None, so cannot initialize ActionChains.")
else:
cls.action = ActionChains(cls.driver)
logger.info("Initialize ActionChains successfully by Driver.")
@classmethod
def is_ele_exist(cls, by_locator: str, locator_value: str) -> bool:
# e.g: element = driver.find_element(By.ID, 'foo')
try:
web_ele = cls.driver.find_element(by_locator, locator_value)
if web_ele is None:
logger.warning("[失败]:{}={}, 未能定位到WebElement".format(by_locator, locator_value))
return False
else:
logger.info("[成功]:{}={}, 成功定位到WebElement".format(by_locator, locator_value))
return True
except Exception as e:
logger.warning("[异常]:{}={}, 未能定位到WebElement".format(by_locator, locator_value))
logger.warning(e.args)
return False
finally:
logger.info("is_ele_exist class func has been executed.")
@classmethod
def switch_to_new_window(cls):
handles_list = cls.driver.window_handles()
for handle in handles_list:
if handle == cls.driver.current_window_handle:
pass
else:
cls.driver.switch_to.window(handle)
@classmethod
def wait_implicitly(cls, time_in_seconds):
cls.driver.implicitly_wait(time_to_wait=time_in_seconds)
@classmethod
def wait_for_load(cls, tuple_locator: tuple, presence_or_visibility='visibility', time_out=10, frequency=0.5) -> WebElement:
try:
web_driver_wait = WebDriverWait(cls.driver, timeout=time_out, poll_frequency=frequency)
if presence_or_visibility == 'visibility':
result = web_driver_wait.until(method=EC.visibility_of_element_located(tuple_locator),
message="超时未找到")
elif presence_or_visibility == 'presence':
result = web_driver_wait.until(method=EC.presence_of_element_located(tuple_locator),
message="超时未找到")
else:
logger.warning("presence_or_visibility only supports visibility or presence.")
result = None
if isinstance(result, WebElement):
logger.info("Locator={}, 元素已成功加载。".format(tuple_locator))
else:
logger.warning("未等到元素加载。")
logger.info("result={}".format(result))
return result
except Exception as e:
logger.error(e.args)
logger.error(e)
finally:
logger.info("wait_for_load method has been executed.")
@classmethod
def find_element(cls, by_locator: str, locator_value: str, curr_web_ele=None) -> WebElement:
try:
if curr_web_ele is None:
web_ele = cls.driver.find_element(by_locator, locator_value)
logger.info("[成功]:{}={}, 成功定位到WebElement".format(by_locator, locator_value))
elif isinstance(curr_web_ele, WebElement):
web_ele = curr_web_ele.find_element(by_locator, locator_value)
logger.info("[成功]:基于当前Element[{}], 通过 {}={}, 成功定位到WebElement".format(curr_web_ele, by_locator, locator_value))
else:
logger.info("所传参数curr_web_ele类型错误,必须是WebElement类型。")
web_ele = None
except Exception as e:
logger.error(e.args)
web_ele = None
finally:
logger.info("find_element method has been executed.")
return web_ele
@classmethod
def find_element_list(cls, by_locator: str, locator_value: str, curr_web_ele=None) -> List[WebElement]:
try:
if curr_web_ele is None:
web_ele_list = cls.driver.find_elements(by_locator, locator_value)
logger.info("[成功]:{}={}, 成功获取到WebElement List。".format(by_locator, locator_value))
elif isinstance(curr_web_ele, WebElement):
web_ele_list = curr_web_ele.find_elements(by_locator, locator_value)
logger.info("[成功]:基于当前Element[{}], 通过 {}={}, 成功获取到WebElement List。".format(curr_web_ele, by_locator, locator_value))
else:
logger.info("所传参数curr_web_ele类型错误,必须是WebElement类型。")
web_ele_list = []
except Exception as e:
logger.error(e.args)
web_ele_list = []
finally:
logger.info("find_element_list method has been executed.")
return web_ele_list
@classmethod
def switch_to_iframe(cls, frame_id_name_ele):
# driver.switch_to.frame('frame_name')
# driver.switch_to.frame(1)
# driver.switch_to.frame(driver.find_elements(By.TAG_NAME, "iframe")[0])
try:
if isinstance(frame_id_name_ele, int):
cls.driver.switch_to.frame(frame_id_name_ele)
logger.info("通过Integer Index={}, 进入iFrame。".format(frame_id_name_ele))
elif isinstance(frame_id_name_ele, str):
cls.driver.switch_to.frame(frame_id_name_ele)
logger.info("通过iFrame Name={}, 进入iFrame。".format(frame_id_name_ele))
elif isinstance(frame_id_name_ele, WebElement):
cls.driver.switch_to.frame(frame_id_name_ele)
logger.info("通过iFrame WebElement={}, 进入iFrame。".format(frame_id_name_ele))
else:
logger.warning("frame_id_name_ele参数,仅支持int、str、WebElement类型。")
except Exception as e:
logger.error(e.args)
finally:
logger.info("switch_to_iFrame method has been executed.")
@classmethod
def switch_to_default_content(cls):
cls.driver.switch_to.default_content()
@classmethod
def right_click(cls, on_web_ele, int_down_times):
if cls.action is None:
logger.error("尚未未初始化ActionChains对象action.")
else:
cls.action.context_click(on_element=on_web_ele).perform()
for i in range(int_down_times): # 当前点击向下键无反应。
# cls.action.send_keys(Keys.ARROW_DOWN)
cls.action.key_down(Keys.ARROW_DOWN)
cls.wait_implicitly(1)
cls.action.key_up(Keys.ARROW_DOWN)
logger.info("第{}次点击向下键。".format(i))
cls.action.send_keys(Keys.ENTER)
logger.info("回车选中。")
@classmethod
def move_to_ele(cls, web_ele, x_off_set=None, y_off_set=None):
if web_ele is None:
logger.error("给定WebElement is None.")
return None
elif x_off_set is None or y_off_set is None:
return cls.action.move_to_element(web_ele)
else:
return cls.action.move_to_element_with_offset(web_ele, xoffset=x_off_set, yoffset=y_off_set)
@classmethod
def close_driver(cls):
cls.driver.close()
logger.info("成功关闭WebDriver")
if __name__ == '__main__':
WebDriverCommon.init_driver(executable_path='./chromedriver.exe')
WebDriverCommon.navigate("https://www.baidu.com")
WebDriverCommon.refresh()
WebDriverCommon.max_window()
logger.info(WebDriverCommon.is_ele_exist(By.ID, "s-top-left"))
ele = WebDriverCommon.wait_for_load((By.XPATH, "//div[@id='s-top-left']/a[1]"))
logger.info(type(ele))
logger.info(ele)
WebDriverCommon.set_action()
# WebDriverCommon.right_click(ele, 3) # 该功能有Bug
WebDriverCommon.wait_implicitly(3) # 该功能不生效
search_input = (By.ID, 'kw')
search_button = (By.ID, 'su')
WebDriverCommon.find_element(*search_input).send_keys("郑宇")
WebDriverCommon.find_element(*search_button).click()
time.sleep(3)
WebDriverCommon.close_driver() | 51job-autotest-framework | /51job_autotest_framework-0.3.1-py3-none-any.whl/rolling_king/jason/webdriver/webdriver_common.py | webdriver_common.py |
import math
# 自定义函数
# def functionname( parameters ):
# "函数_文档字符串"
# function_suite
# return [expression]
# 可更改(mutable)与不可更改(immutable)对象
# 在 python 中,strings, tuples, 和 numbers 是不可更改的对象,而 list,dict 等则是可以修改的对象。
# 不可变类型:变量赋值 a=5 后再赋值 a=10,这里实际是新生成一个 int 值对象 10,再让 a 指向它,而 5 被丢弃,不是改变a的值,相当于新生成了a。
# 可变类型:变量赋值 la=[1,2,3,4] 后再赋值 la[2]=5 则是将 list la 的第三个元素值更改,本身la没有动,只是其内部的一部分值被修改了。
#
# python 函数的参数传递:
# 不可变类型:类似 c++ 的值传递,如 整数、字符串、元组。如fun(a),传递的只是a的值,没有影响a对象本身。比如在 fun(a)内部修改 a 的值,只是修改另一个复制的对象,不会影响 a 本身。
# 可变类型:类似 c++ 的引用传递,如 列表,字典。如 fun(la),则是将 la 真正的传过去,修改后fun外部的la也会受影响
# 不可变类型 是 值传递;可变类型 是 引用传递。
def ChangeInt(a):
a = 10
b = 2
ChangeInt(b)
print("b = ", b) # 结果是 2,因为不可变类型 是 值传递。
# 可写函数说明
def changeme(mylist):
"""修改传入的列表"""
mylist.append([1, 2, 3, 4])
print("函数内取值: ", mylist)
return
# 调用changeme函数
mylist = [10, 20, 30]
changeme(mylist)
print("函数外取值: ", mylist)
print(len(mylist))
# 参数种类
# 正式参数类型:必选参数、默认参数、可变参数、命名关键字参数、关键字参数 共计5种。(方法定义时,也按此顺序!)
# 必备参数须以正确的顺序传入函数。调用时的数量必须和声明时的一样。
# changeme() # 将会报错,缺少必要参数: changeme() missing 1 required positional argument: 'mylist'
# 使用关键字参数允许函数调用时参数的顺序与声明时不一致,因为 Python 解释器能够用参数名匹配参数值。
def printinfo(name, age):
"打印任何传入的字符串"
print("Name: ", name)
print("Age ", age)
return
# 调用printinfo函数
printinfo(age=50, name="miki")
# 默认参数的值如果没有传入,则被认为是默认值。
def printinfo1(name, age=0):
"""打印任何传入的字符串"""
print("Name: ", name)
print("Age ", age)
return
printinfo1("Jason")
printinfo1(name="Jason")
printinfo1(age=10, name="Jason")
# 加了星号(*)的变量名会存放所有未命名的变量参数。不定长参数, 声明时不会命名。
def printinfo(arg1, *vartuple):
"打印任何传入的参数"
print("输出: ", arg1)
for var in vartuple:
print(var)
printinfo(10)
printinfo(70, 60, 50)
nums = [1,2,3]
printinfo(nums) # 传入一个list,相当于传了一个参数,对应方法的arg1;没有传入后面的可变参数
printinfo(*nums) # 在入参list前添加*,变成可变参数,就是list的各个元素,相当于传入了三个参数。
# *args 和 **kwargs 主要用于函数定义。
# 你可以将不定数量的参数传递给一个函数。不定的意思是:预先并不知道, 函数使用者会传递多少个参数给你, 所以在这个场景下使用这两个关键字。其实并不是必须写成 *args 和 **kwargs。 *(星号) 才是必须的. 你也可以写成 *ar 和 **k 。而写成 *args 和**kwargs 只是一个通俗的命名约定。
# python函数传递参数的方式有两种:位置参数(positional argument)、关键词参数(keyword argument)
#
# *args 与 **kwargs 的区别,两者都是 python 中的可变参数:
#
# *args 表示任何多个无名参数(可变参数),它本质是一个 tuple
# **kwargs 表示关键字参数,它本质上是一个 dict
#
# 如果同时使用 *args 和 **kwargs 时,必须 *args 参数列要在 **kwargs 之前。
def person(name, age, **kw):
print('name:', name, 'age:', age, 'other:', kw)
person("zy", 30, city='Beijing')
extra = {'city': 'Beijing', 'job': 'Engineer'}
person("smm", 28, **extra) # 必须通过**将dict转为关键字参数。
# 命名关键字参数
# 限制关键字参数的名字,就可以用命名关键字参数
# (1)在没有可变参数的情况下,命名关键字参数需要一个特殊分隔符*,*后面的参数被视为命名关键字参数
def person1(name, age, *, city, job):
print(name, age, city, job)
# (2)在存在可变参数的情况下,可变参数后面跟着的命名关键字参数就不再需要一个特殊分隔符*了。
def person2(name, age, *args, city, job):
print(name, age, args, city, job)
# 对于任意函数,都可以通过类似func(*args, **kw)的形式调用它,无论它的参数是如何定义的。
def f1(a, b, c=0, *args, **kw):
print('a =', a, 'b =', b, 'c =', c, 'args =', args, 'kw =', kw)
def f2(a, b, c=0, *, d, **kw):
print('a =', a, 'b =', b, 'c =', c, 'd =', d, 'kw =', kw)
args = (1, 2, 3, 4)
kw = {'d': 99, 'x': '#'}
f1(*args, **kw) # a = 1 b = 2 c = 3 args = (4,) kw = {'d': 99, 'x': '#'}
args = (1, 2, 3)
kw = {'d': 88, 'x': '#'}
f2(*args, **kw) # a = 1 b = 2 c = 3 d = 88 kw = {'x': '#'}
# 匿名函数:python 使用 lambda 来创建匿名函数。
# lambda [arg1 [,arg2,.....argn]]:expression
# 可写函数说明
sum = lambda arg1, arg2: arg1 + arg2
# 调用sum函数
print("相加后的值为 : ", sum(10, 20))
print("相加后的值为 : ", sum(20, 20))
# 变量作用域:全局变量 和 局部变量
total = 0 # 这是一个全局变量
# 可写函数说明
def sum(arg1, arg2):
# 返回2个参数的和."
total = arg1 + arg2 # total在这里是局部变量, 是一个定义的新变量(局部变量且名为total)
print("函数内是局部变量 : ", total) # 30
return total
# 调用sum函数
sum(10, 20)
print("函数外是全局变量 : ", total) # 0
total = sum(10, 20)
print("函数外是全局变量 : ", total) # 30
# Python 模块
# Python 模块(Module),是一个 Python 文件,以 .py 结尾,包含了 Python 对象定义和Python语句。
# 请注意,每一个包目录下面都会有一个__init__.py的文件,这个文件是必须存在的,否则,Python就把这个目录当成普通目录,而不是一个包。
# __init__.py可以是空文件,也可以有Python代码,因为__init__.py本身就是一个模块,而它的模块名就是mycompany。
# 类似的,可以有多级目录,组成多级层次的包结构。比如如下的目录结构:
# mycompany
# ├─ web
# │ ├─ __init__.py
# │ ├─ utils.py
# │ └─ www.py
# ├─ __init__.py
# ├─ abc.py
# └─ utils.py
# 模块的引入
# 模块定义好后,我们可以使用 import 语句来引入模块,语法如下:
# import module1[, module2[,... moduleN]]
# 当解释器遇到 import 语句,如果模块在当前的搜索路径就会被导入。
# 搜索路径是一个解释器会先进行搜索的所有目录的列表。
# 需要把import命令放在脚本的顶端.
# 引入模块后,通过 模块名.函数名 方式调用模块中的函数。
# from…import 语句
# Python 的 from 语句让你从模块中导入一个指定的部分到当前命名空间中。语法如下:
# from modname import name1[, name2[, ... nameN]]
# from modname import *
# 自己创建模块时要注意命名,不能和Python自带的模块名称冲突。
# 例如,系统自带了sys模块,自己的模块就不可命名为sys.py,否则将无法导入系统自带的sys模块。
# sys模块有一个argv变量,用list存储了命令行的所有参数。argv至少有一个元素,因为第一个参数永远是该.py文件的名称。
# 在命令行运行hello模块文件时,Python解释器把一个特殊变量__name__置为__main__,
# 而如果在其他地方导入该hello模块时,if判断将失效。
# $ python hello.py Michael 的 参数Michael可以被sys.argv这个list获取到。
# 搜索路径
# 当你导入一个模块,Python 解析器对模块位置的搜索顺序是:
# 1、当前目录
# 2、如果不在当前目录,Python 则搜索在 shell 变量 PYTHONPATH 下的每个目录。
# 3、如果都找不到,Python会察看默认路径。UNIX下,默认路径一般为/usr/local/lib/python/。
# 模块搜索路径存储在 system 模块的 sys.path 变量中。变量里包含当前目录,PYTHONPATH和由安装过程决定的默认目录。
# PYTHONPATH 变量
# 作为环境变量,PYTHONPATH 由装在一个列表里的许多目录组成。PYTHONPATH 的语法和 shell 变量 PATH 的一样。
# 命名空间和作用域
# 变量是拥有匹配对象的名字(标识符)。命名空间是一个包含了变量名称们(键)和它们各自相应的对象们(值)的字典。
# 一个 Python 表达式可以访问局部命名空间和全局命名空间里的变量。如果一个局部变量和一个全局变量重名,则局部变量会覆盖全局变量。
# 每个函数都有自己的命名空间。类的方法的作用域规则和通常函数的一样。
# Python 会智能地猜测一个变量是局部的还是全局的,它假设任何在函数内赋值的变量都是局部的。
# 因此,如果要给函数内的全局变量赋值,必须使用 global 语句。
# global VarName 的表达式会告诉 Python, VarName 是一个全局变量,这样 Python 就不会在局部命名空间里寻找这个变量了。
Money = 2000
def AddMoney():
# 想改正代码就取消以下注释:
global Money
Money = Money + 1
print(Money)
AddMoney()
print(Money)
# dir()函数
# dir() 函数一个排好序的字符串列表,内容是一个模块里定义过的名字。
# 返回的列表容纳了在一个模块里定义的所有模块,变量和函数。获得一个对象的所有属性和方法。
content = dir(math)
print(content)
# 特殊字符串变量__name__指向模块的名字:
print(math.__name__)
# __file__指向该模块的导入文件名:
print(math.__file__)
# globals() 和 locals() 函数
# 根据调用地方的不同,globals() 和 locals() 函数可被用来返回全局和局部命名空间里的名字。
# 如果在函数内部调用 locals(),返回的是所有能在该函数里访问的命名。
# 如果在函数内部调用 globals(),返回的是所有在该函数里能访问的全局名字。
# 两个函数的返回类型都是字典。所以名字们能用 keys() 函数摘取。
def func():
a = 1
b = 2
print(globals())
print(globals().keys())
print(locals())
print(locals().keys())
func()
# reload() 函数
# 当一个模块被导入到一个脚本,模块顶层部分的代码只会被执行一次。该函数会重新导入之前导入过的模块。
# 语法:reload(module_name), 入参不是字符串,就是module_name,譬如:reload(math)
# Python中的包
# 包是一个分层次的文件目录结构,它定义了一个由模块及子包,和子包下的子包等组成的 Python 的应用环境。
# 简单来说,包就是文件夹,但该文件夹下必须存在 __init__.py 文件, 该文件的内容可以为空。__init__.py 用于标识当前文件夹是一个包。
# Python 文件I/O
# 读取键盘输入
# raw_input([prompt]) 函数从标准输入读取一个行,并返回一个字符串(去掉结尾的换行符):
# 但是 input 可以接收一个Python表达式作为输入,并将运算结果返回
# 打开和关闭文件
# open 函数: 你必须先用Python内置的open()函数打开一个文件,创建一个file对象,相关的方法才可以调用它进行读写
# file object = open(file_name [, access_mode][, buffering])
fileObj = open("/Users/jasonzheng/Desktop/CAT告警.txt", mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None)
print("文件名: ", fileObj.name)
print("是否已关闭 : ", fileObj.closed)
print("访问模式 : ", fileObj.mode)
# print("末尾是否强制加空格 : ", fileObj.softspace)
firstLine = fileObj.readline()
print(firstLine)
# tell()方法告诉你文件内的当前位置, 换句话说,下一次的读写会发生在文件开头这么多字节之后。
# seek(offset [,from])方法改变当前文件的位置。
# 查找当前位置
position = fileObj.tell()
print("当前文件位置 : ", position)
print(fileObj.seek(35, 0))
print(fileObj.read(5))
# 重命名和删除文件
# Python的os模块提供了帮你执行文件处理操作的方法
# import os
# os.renames("oldfilename.txt", "newfilename.txt")
# os.remove("existfilename.txt")
# os.mkdir("newdirectory")
# os.chdir("newdirname")
# print(os.getcwd())
# os.rmdir('newdirname')
if fileObj.closed :
print("File has been already closed.")
else:
fileObj.close()
print("File is closed now.")
print(fileObj.closed)
# Python 异常处理
# 什么是异常?
# 异常即是一个事件,该事件会在程序执行过程中发生,影响了程序的正常执行。
# 一般情况下,在Python无法正常处理程序时就会发生一个异常。
# 异常是Python对象,表示一个错误。
# 当Python脚本发生异常时我们需要捕获处理它,否则程序会终止执行。
# 以下为简单的try....except...else的语法:
# try:
# <语句> #运行别的代码
# except <名字>:
# <语句> #如果在try部份引发了'name'异常
# except <名字>,<数据>:
# <语句> #如果引发了'name'异常,获得附加的数据
# else:
# <语句> #如果没有异常发生
try:
fh = open("testfile", "w")
fh.write("这是一个测试文件,用于测试异常!!")
except IOError:
print("Error: 没有找到文件或读取文件失败")
else:
print("内容写入文件成功")
fh.close()
def exp_func():
print("raise IOError exception")
raise IOError("my io error")
try:
print("try body")
exp_func()
except IOError as err:
print("get IOError exception")
print("OS error: {0}".format(err))
# raise #抛出
else:
print("else block")
finally:
print("finally block") | 51job-autotest-framework | /51job_autotest_framework-0.3.1-py3-none-any.whl/rolling_king/jason/python/PythonTest.py | PythonTest.py |
import math
import cmath
from collections.abc import Iterable
from collections.abc import Iterator
import argparse
if True:
print("真")
else:
print("假")
strVar = input("Please input a number:")
print("strVar is:"+strVar)
print(type(strVar))
num = int(strVar)
print(type(num))
print(num)
# chr(参数是一个ASCII码),就是将ASCII转为char
print("ASCII码转字符:"+chr(49))
print("字符转ASCII码:", ord("A"))
print(len('ABC')) # 3
strEn = 'ABC'.encode('ascii')
print(strEn) # string英文转ascii编码的byte数组
print(len(strEn)) # 3
print(len('中文')) # 2
strCn = '中文'.encode('utf-8')
print(strCn) # string中文转utf-8编码的byte数组
print(len(strCn)) # 6
print("bytes转str:", b'ABC'.decode('ascii'))
print("bytes转str:", b'\xe4\xb8\xad\xe6\x96\x87'.decode('utf-8'))
# 当Python解释器读取源代码时,为了让它按UTF-8编码读取,我们通常在文件开头写上这两行:
# !/usr/bin/env python3 # 告诉Linux/OS X系统,这是一个Python可执行程序,Windows系统会忽略这个注释;
# -*- coding: utf-8 -*- # 告诉Python解释器,按照UTF-8编码读取源代码,否则,你在源代码中写的中文输出可能会有乱码。
counter = 100 # 赋值整型变量
miles = 1000.0 # 浮点型
name = "John" # 字符串
print(counter, miles, name) # ,代表不换行
counter1, miles1, name1 = 100, 1000.0, "Jason" # 为多个对象指定多个变量
print(counter1, miles1, name1)
# 标准数据类型:Numbers(数字)、String(字符串)、List(列表)、Tuple(元组)、Dictionary(字典)
var1 = 10
print(var1)
# ----- del用于删除对象的引用
del var1
# print(var1) #del之后此行会报错:NameError: name 'var1' is not defined
var1 = 20
print(var1)
s = "abcdef"
print(s[0:2]) # 包括起始,但不包括结尾。与java的substr函数一致。
print(s[-6:-4]) # 结果均是ab
# 加号(+)是字符串连接运算符,星号(*)是重复操作。
strVal = "Hello World"
print(strVal) # 输出完整字符串
print(strVal[0]) # 输出字符串中的第一个字符
print(strVal[2:5]) # 输出字符串中第三个至第六个之间的字符串
print(strVal[2:]) # 输出从第三个字符开始的字符串
print(strVal * 2) # 输出字符串两次
print(strVal + "TEST") # 输出连接的字符串
# 列表
list = ['runoob', 786, 2.23, 'john', 70.2]
tinylist = [123, 'john']
print(list) # 输出完整列表
print(list[0]) # 输出列表的第一个元素
print(list[1:3]) # 输出第二个至第三个元素
print(list[2:]) # 输出从第三个开始至列表末尾的所有元素
print(tinylist * 2) # 输出列表两次
print(list + tinylist) # 打印组合的列表
list.append('Google') ## 使用 append() 添加元素
list.append('Runoob')
print(list)
del(list[-2])
print(list)
# 元祖(元组不能二次赋值,相当于只读列表): Tuple是有序但元素指向不可变。
tuple = ('runoob', 786, 2.23, 'john', 70.2)
tinytuple = (123, 'john')
print(tuple) # 输出完整元组
print(tuple[0]) # 输出元组的第一个元素
print(tuple[1:3]) # 输出第二个至第四个(不包含)的元素
print(tuple[2:]) # 输出从第三个开始至列表末尾的所有元素
print(tinytuple * 2) # 输出元组两次
print(tuple + tinytuple) # 打印组合的元组
#tuple[2] = 1000 # 元组中是非法应用,此行会报错:TypeError: 'tuple' object does not support item assignment
list[2] = 1000 # 列表中是合法应用
tup1 = () # 创建空元组
tup1 = (50,) # 元组中只包含一个元素时,需要在元素后面添加逗号
# 元组中的元素值是不允许修改的,但我们可以对元组进行连接组合.
tup1 = (12, 34.56)
tup2 = ('abc', 'xyz')
tup3 = tup1 + tup2
print("tup3=", tup3)
# 元组中的元素值是不允许删除的,但我们可以使用del语句来删除整个元组
del tup3
# print("After deleting tup3:", tup3) # NameError: name 'tup3' is not defined
# 字典(列表是有序的对象集合,字典是无序的对象集合)
dict = {}
dict['one'] = "This is one"
dict[2] = "This is two"
tinydict = {'name': 'runoob', 'code':6734, 'dept': 'sales'}
print(dict['one']) # 输出键为'one' 的值
print(dict[2]) # 输出键为 2 的值
print(tinydict) # 输出完整的字典
print(tinydict.keys()) # 输出所有键
print(tinydict.values()) # 输出所有值
dict = {'Name': 'Zara', 'Age': 7, 'Class': 'First'}
dict['Age'] = 8 # 更新
dict['School'] = "RUNOOB" # 添加
del dict['Name'] # 删除键是'Name'的条目
dict.clear() # 清空字典所有条目
del dict # 删除字典
# 键必须不可变,所以可以用数字,字符串或元组充当,所以用列表就不行
# dict = {['Name']: 'Zara', 'Age': 7} # TypeError: unhashable type: 'list'
# 集合Set(Set是无序的、元素唯一不可重复的对象集合)
s = set([1, 2, 3])
print("Set集合:", s)
s = set([1, 1, 2, 2, 3, 3]) # 重复元素在set中自动被过滤
print(s)
s.add(5)
print(s)
s.remove(1)
print(s)
# Set可以看成数学意义上的无序和无重复元素的集合,因此,两个set可以做数学意义上的交集、并集等操作
s1 = set([2, 5, 6, 8])
print("交集:", s & s1)
print("并集:", s | s1)
# if 条件语句
num = 9
if num >= 0 and num <= 10: # 判断值是否在0~10之间
print('hello')
# 输出结果: hello
num = 10
if num < 0 or num > 10: # 判断值是否在小于0或大于10
print('hello')
else:
print('undefine')
# 输出结果: undefine
num = 8
# 判断值是否在0~5或者10~15之间
if (num >= 0 and num <= 5) or (10 <= num <= 15):
print('hello')
elif num < 0:
print("负数")
else:
print('undefine')
# 输出结果: undefine
# while 循环
count = 0
while count < 9:
print('The count is:', count)
count = count + 1
print("Good bye!")
# while..eles..
# 在 python 中,while … else 在循环条件为 false 时执行 else 语句块:
count = 0
while count < 5:
print(count, "is less than 5")
count = count + 1
print("Count is ", count, " now.")
# for 循环1
for letter in 'Python': # 第一个实例
print('当前字母 :', letter)
fruits = ['banana', 'apple', 'mango']
for fruit in fruits: # 第二个实例
print('当前水果 :', fruit)
print("Good bye!")
# for 循环2:通过索引
fruits = ['banana', 'apple', 'mango']
for index in range(len(fruits)):
print("Current is ", fruits[index])
for index in range(0, len(fruits)):
print("Current is ", fruits[index])
print("Good Bye")
# for..else.. 循环 (else 中的语句会在循环正常执行完的情况下执行, 也就意味着不是通过 break 跳出而中断的)
fruits = ['banana', 'apple', 'mango']
for index in range(len(fruits)):
if index == 3:
break
print("index=", index, "'s fruit is ", fruits[index])
else:
print("Current index = ", index)
# 凡是可作用于for循环的对象都是Iterable类型;
print("是否可迭代:", isinstance('abc', Iterable)) # str是否可迭代 True
print("是否可迭代:", isinstance([1,2,3], Iterable)) # list是否可迭代 True
print("是否可迭代:", isinstance(123, Iterable)) # 整数是否可迭代 False
for i, value in enumerate(['A', 'B', 'C']):
print(i, value)
# 0 A
# 1 B
# 2 C
# 列表生成式
print("列表生成式")
print([x * x for x in range(1, 11)])
print([x * x for x in range(1, 11) if x % 2 == 0]) # 跟在for后面的if是一个筛选条件,不能带else。
print([x if x % 2 == 0 else -x for x in range(1, 11)]) # 在for前面的部分是一个表达式,必须要算出一个值。
print([m + n for m in 'ABC' for n in 'XYZ'])
# Python中,这种一边循环一边计算的机制,称为生成器:generator
# 第一种方法很简单,只要把一个列表生成式的[]改成(),就创建了一个generator:
g = (x * x for x in range(1, 11))
print(g)
print("使用next函数打印generator", next(g))
for i in g:
print("使用for循环打印generator:", i)
# 第二种(生成器函数):一个函数定义中包含yield关键字,那么这个函数就不再是一个普通函数,而是一个generator:
# 在每次调用next()的时候执行,遇到yield语句返回,再次执行时从上次返回的yield语句处继续执行。
# 定义一个generator,依次返回数字1,3,5:
def odd():
print('step 1')
yield 1
print('step 2')
yield(3)
print('step 3')
yield(5)
return "Done"
gen_func = odd() # generator函数的“调用”实际返回一个generator对象:
# print(next(gen_func))
# print("-------")
# print(next(gen_func))
# print("-------")
# print(next(gen_func))
# print("-------")
# print(next(gen_func))
# 但是用for循环调用generator时,发现拿不到generator的return语句的返回值。
for n in gen_func:
print(n)
print("-------")
print("for循环打印generator完成")
# 如果想要拿到返回值,必须捕获StopIteration错误,返回值包含在StopIteration的value中:
while True:
try:
x = next(gen_func)
print("g: ", x)
except StopIteration as e:
print("Generator return value: ", e.value)
break
# 迭代器:可以被next()函数调用并不断返回下一个值的对象称为迭代器:Iterator。
print(isinstance((x for x in range(10)), Iterator)) # True, generator生成器是迭代器
print(isinstance([], Iterator)) # False
# 生成器都是Iterator对象,但list、dict、str虽然是Iterable,却不是Iterator。
# 这是因为Python的Iterator对象表示的是一个数据流,把这个数据流看做是一个有序序列,
# 但我们却不能提前知道序列的长度,只能不断通过next()函数实现按需计算下一个数据,
# 所以Iterator的计算是惰性的,只有在需要返回下一个数据时它才会计算。
# Iterator甚至可以表示一个无限大的数据流
# 把list、dict、str等Iterable变成Iterator可以使用iter()函数:
print(isinstance(iter([]), Iterator)) # True
print(isinstance(iter("abc"), Iterator)) # True
# pass语句:不做任何事情,一般用做占位语句
for letter in 'Python':
if letter == 'h':
pass
print('这是 pass 块')
print('当前字母 :', letter)
print("Good bye!")
# Python Number(数字)
# 数据类型是不允许改变的,这就意味着如果改变 Number 数据类型的值,将重新分配内存空间。
var = 0
var1 = 1
var2 = 10
# 使用del语句删除一些 Number 对象引用
del var
del var1, var2
# Python Number 类型转换
# int(x [,base ]) 将x转换为一个整数
# long(x [,base ]) 将x转换为一个长整数
# float(x ) 将x转换到一个浮点数
# complex(real [,imag ]) 创建一个复数
# str(x ) 将对象 x 转换为字符串
# repr(x ) 将对象 x 转换为表达式字符串
# eval(str ) 用来计算在字符串中的有效Python表达式,并返回一个对象
# tuple(s ) 将序列 s 转换为一个元组
# list(s ) 将序列 s 转换为一个列表
# chr(x ) 将一个整数转换为一个字符
# unichr(x ) 将一个整数转换为Unicode字符
# ord(x ) 将一个字符转换为它的整数值
# hex(x ) 将一个整数转换为一个十六进制字符串
# oct(x ) 将一个整数转换为一个八进制字符串
intVal = 0
print(type(intVal))
strVal = str(intVal)
print(type(strVal))
print(hex(15))
# 数学运算
# Python 中数学运算常用的函数基本都在 math 模块、cmath 模块中。
# Python math 模块提供了许多对浮点数的数学运算函数。
# Python cmath 模块包含了一些用于复数运算的函数。
# cmath 模块的函数跟 math 模块函数基本一致,区别是 cmath 模块运算的是复数,math 模块运算的是数学运算。
# 要使用 math 或 cmath 函数必须先导入:import math、import cmath
print(dir(math))
print(dir(cmath))
# 字符串
var1 = 'Hello World!'
var2 = "Python Runoob"
print("var1[0]: ", var1[0])
print("var2[1:5]: ", var2[1:5])
# 成员运算符
if ("ll" in "Hello"):
print("Hello 包含 ll")
else:
print("错误")
# 原始字符串
print("反转义")
print(r'\n') # 反转义
# 字符串格式化使用与 C 中 sprintf 函数一样的语法
print("My name is %s and weight is %d kg!" % ('Zara', 21))
# Python 三引号允许一个字符串跨多行,字符串中可以包含换行符、制表符以及其他特殊字符。
# 三引号让程序员从引号和特殊字符串的泥潭里面解脱出来,当你需要一块HTML或者SQL时,这时当用三引号标记。
errHTML = '''
<HTML><HEAD><TITLE>
Friends CGI Demo</TITLE></HEAD>
<BODY><H3>ERROR</H3>
<B>%s</B><P>
<FORM><INPUT TYPE=button VALUE=Back
ONCLICK="window.history.back()"></FORM>
</BODY></HTML>
'''
print(errHTML)
# Unicode 字符串
# 定义一个 Unicode 字符串
uVar = u'Hello World !'
print(uVar)
# 如果你想加入一个特殊字符,可以使用 Python 的 Unicode-Escape 编码
uVar1 = u'Hello\u0020World !'
print(uVar1)
# Python 日期和时间
import time # 引入time模块
ticks = time.time()
print("当前时间戳为:", ticks) # 当前时间戳为: 1603089755.566846
# 时间元祖:很多Python函数用一个元组装起来的9组数字处理时间,也就是struct_time元组。
localtime = time.localtime(time.time())
print("本地时间为:", localtime) # time.struct_time(tm_year=2020, tm_mon=10, tm_mday=19, tm_hour=14, tm_min=47, tm_sec=46, tm_wday=0, tm_yday=293, tm_isdst=0)
print(time.localtime()) # 等同
asctime = time.asctime(localtime)
print("asc本地时间为:", asctime) # Mon Oct 19 14:47:46 2020
# 使用 time 模块的 strftime 方法来格式化日期
# 格式化成2016-03-20 11:45:39形式
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
# 格式化成Sat Mar 28 22:24:24 2016形式
print(time.strftime("%a %b %d %H:%M:%S %Y", time.localtime()))
# 将格式字符串转换为时间戳
a = "Sat Mar 28 22:24:24 2016"
print(time.mktime(time.strptime(a, "%a %b %d %H:%M:%S %Y")))
# 处理年历和月历
import calendar
cal = calendar.month(2020, 10)
print("2020年10月的日历:\n", cal)
# Time 模块: 内置函数,既有时间处理的,也有转换时间格式
# time.clock() # Python 3.8 已移除 clock() 方法,改用下方:
print(time.process_time())
# ArgumentParser
print("ArgumentParser")
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('int_val', metavar='N', type=int, nargs='+', help='an integer for the accumulator')
parser.add_argument("square", help="display a square of a given number", type=int)
args = parser.parse_args()
print("输入的int_val={0}".format(args.int_val))
print("输入的square={0}".format(args.square))
print(args.square**2) | 51job-autotest-framework | /51job_autotest_framework-0.3.1-py3-none-any.whl/rolling_king/jason/python/MyPyTest.py | MyPyTest.py |
from functools import reduce
import functools
# ################### 闭包 ###################
# 闭包的定义:在函数嵌套的前提下,内部函数使用了外部函数的变量,并且外部函数返回了内部函数,这种程序结构称为闭包。
# 闭包的构成条件:
# 1、在函数嵌套(函数里面再定义函数)的前提下
# 2、内部函数使用了外部函数的变量(还包括外部函数的参数)
# 3、外部函数返回了内部函数
# 定义一个外部函数
def func_out(num1):
# 定义一个内部函数
def func_inner(num2):
# 内部函数使用了外部函数的变量(num1)
result = num1 + num2
print("结果是:", result)
# 外部函数返回了内部函数,这里返回的内部函数就是闭包
return func_inner
# 创建闭包实例
f = func_out(1)
# 执行闭包
f(2) # 3
f(3) # 4
# 若要修改外部函数的变量,则内部函数中应该:
# nonlocal num1 # 告诉解释器,此处使用的是 外部变量a
# 修改外部变量num1
# num1 = 10
# ################### 装饰器 ###################
# 装饰器的定义:就是给已有函数增加额外功能的函数,它本质上就是一个闭包函数。
# 代码运行期间动态增加功能的方式,称之为“装饰器”(Decorator)。
# 装饰器的功能特点:
# 1、不修改已有函数的源代码
# 2、不修改已有函数的调用方式
# 3、给已有函数增加额外的功能
# 添加一个登录验证的功能
def check(fn):
def inner():
print("请先登录....")
fn()
return inner
def comment():
print("发表评论")
# 使用装饰器来装饰函数
comment = check(comment)
comment()
'''
执行结果
请先登录....
发表评论
'''
# 装饰器的基本雏形
# def decorator(fn): # fn:目标函数.
# def inner():
# '''执行函数之前'''
# fn() # 执行被装饰的函数
# '''执行函数之后'''
# return inner
# 代码说明:
# 闭包函数有且只有一个参数,必须是函数类型,这样定义的函数才是装饰器。
# 写代码要遵循开放封闭原则,它规定已经实现的功能代码不允许被修改,但可以被扩展。
# 装饰器的语法糖写法
# Python给提供了一个装饰函数更加简单的写法,那就是语法糖,语法糖的书写格式是: @装饰器名字,通过语法糖的方式也可以完成对已有函数的装饰
# 使用语法糖方式来装饰函数
@check
def comment():
print("发表评论")
# @check 等价于 comment = check(comment)
# 装饰器的执行时间是加载模块时立即执行。
# ###### 装饰带有参数的函数 ######
def logging(fn):
def inner(num1, num2):
print("--正在努力计算--")
fn(num1, num2)
return inner
# 使用装饰器装饰函数
@logging
def sum_num(a, b):
result = a + b
print(result)
sum_num(1, 2)
'''
运行结果:
--正在努力计算--
3
'''
# ###### 装饰带有返回值的函数 ######
# 添加输出日志的功能
def logging(fn):
def inner(num1, num2):
print("--正在努力计算--")
result = fn(num1, num2)
return result
return inner
# 使用装饰器装饰函数
@logging
def sum_num(a, b):
result = a + b
return result
result = sum_num(1, 2)
print(result)
'''
运行结果:
--正在努力计算--
3
'''
# ###### 装饰带有不定长参数的函数 ######
# 添加输出日志的功能
def logging(func):
def inner(*args, **kwargs):
print("--正在努力计算--")
func(*args, **kwargs)
return inner
# 使用语法糖装饰函数
@logging
def sum_num(*args, **kwargs):
result = 0
for value in args:
result += value
for value in kwargs.values():
result += value
print(result)
sum_num(1, 2, a=10)
'''
运行结果:
--正在努力计算--
13
'''
# ###### 通用装饰器 ######
# 通用装饰器 - 添加输出日志的功能
def logging(func):
def inner(*args, **kwargs):
print("--正在努力计算--")
result = func(*args, **kwargs)
return result
return inner
# 使用语法糖装饰函数
@logging
def sum_num(*args, **kwargs):
result = 0
for value in args:
result += value
for value in kwargs.values():
result += value
return result
@logging
def subtraction(a, b):
result = a - b
print(result)
result = sum_num(1, 2, a=10)
print(result)
subtraction(4, 2)
'''
运行结果:
--正在努力计算--
13
--正在努力计算--
2
'''
# log方法作为装饰器,返回替代func方法的wrapper方法,利用@functools.wraps表示,以便让wrapper.__name__等同于func.__name__。
def log(func):
@functools.wraps(func) #import functools才行
def wrapper(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return wrapper
# 针对带参数的decorator:
def log_with_param(text):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('%s %s():' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decorator
# ###### 多个装饰器的使用 ######
# 代码说明:多个装饰器的装饰过程是: 离函数最近的装饰器先装饰,然后外面的装饰器再进行装饰,由内到外的装饰过程。
def make_div(func):
"""对被装饰的函数的返回值 div标签"""
def inner():
return "<div>" + func() + "</div>"
return inner
def make_p(func):
"""对被装饰的函数的返回值 p标签"""
def inner():
return "<p>" + func() + "</p>"
return inner
# 装饰过程: 1 content = make_p(content) 2 content = make_div(content)
# content = make_div(make_p(content))
@make_div
@make_p
def content():
return "人生苦短"
result = content()
print(result) # <div><p>人生苦短</p></div>
# ################### 带有参数的装饰器 ###################
# 代码说明:装饰器只能接收一个参数,并且还是函数类型。
# 正确写法:在装饰器外面再包裹上一个函数,让最外面的函数接收参数,返回的是装饰器,因为@符号后面必须是装饰器实例。
# 添加输出日志的功能
def logging(flag):
def decorator(fn):
def inner(num1, num2):
if flag == "+":
print("--正在努力加法计算--")
elif flag == "-":
print("--正在努力减法计算--")
result = fn(num1, num2)
return result
return inner
# 返回装饰器
return decorator
# 使用装饰器装饰函数
@logging("+")
def add(a, b):
result = a + b
return result
@logging("-")
def sub(a, b):
result = a - b
return result
result = add(1, 2)
print(result)
result = sub(1, 2)
print(result)
'''
执行结果:
--正在努力加法计算--
3
--正在努力减法计算--
-1
'''
# ################### 类装饰器 ###################
# 类装饰器的介绍:装饰器还有一种特殊的用法就是类装饰器,就是通过定义一个类来装饰函数。
class Check(object):
def __init__(self, fn):
# 初始化操作在此完成
self.__fn = fn
# 实现__call__方法,表示对象是一个可调用对象,可以像调用函数一样进行调用。
def __call__(self, *args, **kwargs):
# 添加装饰功能
print("请先登陆...")
self.__fn()
@Check
def comment():
print("发表评论")
comment()
'''
执行结果:
请先登陆...
发表评论
'''
# 代码说明:
# 1.1@Check 等价于 comment = Check(comment), 所以需要提供一个init方法,并多增加一个fn参数。
# 1.2要想类的实例对象能够像函数一样调用,需要在类里面使用call方法,把类的实例变成可调用对象(callable),也就是说可以像调用函数一样进行调用。
# 1.3在call方法里进行对fn函数的装饰,可以添加额外的功能。
# 函数式编程
# 其一个特点就是,允许把函数本身作为参数传入另一个函数,还允许返回一个函数!
# 函数本身也可以赋值给变量,即:变量可以指向函数。
f = abs # 变量f现在已经指向了abs函数本身。
print(f(-10))
# 高阶函数
# 一个函数就可以接收另一个函数作为参数,这种函数就称之为高阶函数。
# map函数
def func(x):
return x * x
r = map(func, [1,2,3,4]) # map()函数接收两个参数,一个是函数,一个是Iterable
# map将传入的函数依次作用到序列的每个元素,并把结果作为新的Iterator返回。
# Iterator是惰性序列,因此通过list()函数让它把整个序列都计算出来并返回一个list。
print(list(r))
# reduce函数
# reduce把一个函数作用在一个序列[x1, x2, x3, ...]上,这个函数必须接收两个参数。reduce把结果继续和序列的下一个元素做计算。
def fn(x, y):
return x * 10 + y
res = reduce(fn, [1, 3, 5, 7, 9])
print(res)
# map、reduce函数配合使用将str转为int。
DIGITS = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}
def str2int(s):
def fn1(x, y):
return x * 10 + y
def char2num(s):
return DIGITS[s]
return reduce(fn1, map(char2num, s))
# 还可以用lambda函数进一步简化成:
# 无需定义fn1这个函数,直接用lambda表达式替换
def char2num(s):
return DIGITS[s]
def str_to_int(s):
return reduce(lambda x, y: x * 10 + y, map(char2num, s))
# filter() 函数
# Python内建的filter()函数用于过滤序列。
# filter()把传入的函数依次作用于每个元素,然后根据返回值是True还是False决定保留还是丢弃该元素。
# 注意到filter()函数返回的是一个Iterator,也就是一个惰性序列。
def is_odd(n):
return n % 2 == 1
res = filter(is_odd, [1, 2, 3, 4, 5, 6])
print(list(res))
def not_empty(s):
return s and s.strip()
print(list(filter(not_empty, ['A', '', ' B', None, 'C ', ' '])))
# 排序算法
# Python内置的sorted()函数就可以对list进行排序:
print(sorted([36, 5, -12, 9, -21]))
# 此外,sorted()函数也是一个高阶函数,它还可以接收一个key函数来实现自定义的排序,例如按绝对值大小排序:
print(sorted([36, 5, -12, 9, -21], key=abs))
# 给sorted传入key函数,即可实现忽略大小写的排序:
print(sorted(['bob', 'about', 'Zoo', 'Credit'], key=str.lower))
# 要进行反向排序,不必改动key函数,可以传入第三个参数reverse=True:
print(sorted(['bob', 'about', 'Zoo', 'Credit'], key=str.lower, reverse=True))
# 按成绩从高到低排序:
L = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]
def by_score(t):
return -t[1]
L2 = sorted(L, key=by_score)
print(L2)
# 偏函数
# functools.partial:作用就是,把一个函数的某些参数给固定住(也就是设置默认值),返回一个新的函数,调用这个新函数会更简单。
int2 = functools.partial(int, base=2) # 接收函数对象、*args和**kw这3个参数。入参1是方法名。
# 相当于:
kw = {'base': 2}
int('10010', **kw)
print(int2('1000000')) # int2方法就是把2进制字符串转为integer,相当于int('char', base=2)
# int2函数,仅仅是把base参数重新设定默认值为2,但也可以在函数调用时传入其他值:
print(int2('1000000', base=10))
# 当传入max2 = functools.partial(max, 10)时,把10作为*args的一部分自动加到左边。
max2 = functools.partial(max, 10)
print(max2(5, 6, 7))
# 相当于:
args = (10, 5, 6, 7)
max(*args) | 51job-autotest-framework | /51job_autotest_framework-0.3.1-py3-none-any.whl/rolling_king/jason/python/SeniorTest.py | SeniorTest.py |
import builtins
# Python 3 可以使用直接使用 super().xxx 代替 super(Class, self).xxx :
class Employee:
empCount = 0
class Member(object):
def __init__(self):
print("This is {0} Constructor.".format("无参"))
member = Member()
class Parent(object):
def myfunc(self):
print("This is {0}'s myfunc method.".format("Parent"))
class SubA(Parent):
def myfunc(self):
print("This is {0}'s myfunc method.".format("SubA"))
super().myfunc()
super(SubA, self).myfunc()
sub = SubA()
sub.myfunc()
class Upper(object):
def myfunc(self):
print("This is {0}'s myfunc method.".format("Upper"))
# class Child(Parent, SubA): # 多个基类之间不能存在继承关系否则将有如下错误:
# TypeError: Cannot create a consistent method resolution
# order (MRO) for bases Parent, SubA
class Child(Parent, Upper):
def __init__(self):
super(Child, self).__init__() # 首先找到 Child 的父类(就是类 Parent),然后把类 Child 的对象转换为类 Parent 的对象
def myfunc(self):
print("This is {0}'s myfunc method.".format("Child"))
super(Child, self).myfunc()
child = Child()
child.myfunc()
print(issubclass(Child, Parent)) # Child类 是 Parent的子类
print(isinstance(child, Child)) # child 是 Child类的实例
print(isinstance(child, Parent)) # child 是 Parent类子类的实例
print(isinstance(child, Upper)) # child 是 Upper类子类的实例
# 类的私有属性
# __private_attrs:两个下划线开头,在类内部的方法中使用时 self.__private_attrs。
# 类的私有方法
# __private_method:两个下划线开头,在类的内部调用 self.__private_methods。
# 单下划线、双下划线、头尾双下划线说明:
# __foo__: 定义的是特殊方法,一般是系统定义名字 ,类似 __init__() 之类的。
# _foo: 以单下划线开头的表示的是 protected 类型的变量,即保护类型只能允许其本身与子类进行访问,不能用于 from module import *
# __foo: 双下划线的表示的是私有类型(private)的变量, 只能是允许这个类本身进行访问了。
# Python 中只有模块(module),类(class)以及函数(def、lambda)才会引入新的作用域
# 有四种作用域:
# L(Local):最内层,包含局部变量,比如一个函数/方法内部。
# E(Enclosing):包含了非局部(non-local)也非全局(non-global)的变量。比如两个嵌套函数,一个函数(或类) A 里面又包含了一个函数 B ,那么对于 B 中的名称来说 A 中的作用域就为 nonlocal。
# G(Global):当前脚本的最外层,比如当前模块的全局变量。
# B(Built-in): 包含了内建的变量/关键字等。,最后被搜索
# 实例熟悉 与 类属性
# 实例属性属于各个实例所有,互不干扰;
# 类属性属于类所有,所有实例共享一个属性;
# 不要对实例属性和类属性使用相同的名字,否则将产生难以发现的错误。
# 可以给该实例绑定任何属性和方法,这就是动态语言的灵活性
# class Student(object):
# pass
#
# 然后,尝试给实例绑定一个属性:
# >>> s = Student()
# >>> s.name = 'Michael'
# >>> print(s.name)
# Michael
# 还可以尝试给实例绑定一个方法:
# >>> def set_age(self, age):
# ... self.age = age
# ...
# >>> from types import MethodType
# >>> s.set_age = MethodType(set_age, s)
# >>> s.set_age(25)
# >>> s.age
# 25
# 给一个实例绑定的方法,对另一个实例是不起作用的
# 为了给所有实例都绑定方法,可以给class绑定方法:
# >>> def set_score(self, score):
# ... self.score = score
# ...
# >>> Student.set_score = set_score
#
# 给class绑定方法后,所有实例均可调用。
# 使用__slots__ 限制某类型的实例可以添加的属性
# 想要限制实例的属性怎么办?Python允许在定义class的时候,定义一个特殊的__slots__变量,来限制该class实例能添加的属性。
# class Student(object):
# __slots__ = ('name', 'age')
#
# 然后,我们试试:
# >>> s = Student()
# >>> s.name = 'Michael'
# >>> s.age = 25
# >>> s.score = 99 # 试图绑定score将得到AttributeError的错误。
# 使用__slots__要注意,__slots__定义的属性仅对当前类的实例起作用,对继承的子类是不起作用的。
# @property装饰器
# 有没有既能检查参数,又可以用类似属性这样简单的方式来访问类的变量呢?
# Python内置的@property装饰器就是负责把一个方法变成属性调用的。
class Student(object):
@property
def score(self):
return self._score
@score.setter
def score(self, value):
if not isinstance(value, int):
raise ValueError('score must be an integer!')
if value < 0 or value > 100:
raise ValueError('score must between 0 ~ 100!')
self._score = value
# 把一个getter方法变成属性,只需要加上@property就可以了
# @property本身又创建了另一个装饰器@score.setter,负责把一个setter方法变成属性赋值
# >>> s = Student()
# >>> s.score = 60
# >>> s.score
# 60
# 还可以定义只读属性,只定义getter方法,不定义setter方法就是一个只读属性。
# 查看到底预定义了哪些变量:
print(dir(builtins))
# global 关键字
num = 1
def fun1():
global num # 需要使用 global 关键字声明
print(num)
num = 123
print(num)
fun1()
print(num)
# 如果要修改嵌套作用域(enclosing 作用域,外层非全局作用域)中的变量则需要 nonlocal 关键字
def outer():
num = 10
def inner():
nonlocal num # nonlocal关键字声明
num = num + 100
print(num)
inner()
print(num)
outer()
a = 10
def test(a): # a 是 number,不可变对象属于值传递,也就是复制a的值传进来,而不是a本身。
a = a + 1 # 11 = 10 + 1
print(a) # 11
test(a)
print(a) # 10
# 动态语言的“鸭子类型”
# 对于静态语言(例如Java)来说,如果需要传入Animal类型,则传入的对象必须是Animal类型或者它的子类,否则,将无法调用run()方法。
# 对于Python这样的动态语言来说,则不一定需要传入Animal类型。我们只需要保证传入的对象有一个run()方法就可以了
# 获取对象信息。获得一个对象的所有属性和方法
# >>> import types
# >>> def fn():
# ... pass
# ...
# >>> type(fn)==types.FunctionType
# True
# >>> type(abs)==types.BuiltinFunctionType
# True
# >>> type(lambda x: x)==types.LambdaType
# True
# >>> type((x for x in range(10)))==types.GeneratorType
# True
# 获得一个对象的所有属性和方法,可以使用dir()函数,它返回一个包含字符串的list
# 调用len()函数试图获取一个对象的长度,实际上,在len()函数内部,它自动去调用该对象的__len__()方法:
# 自己写的类,如果也想用len(myObj)的话,就自己写一个__len__()方法
# 配合getattr()、setattr()以及hasattr(),我们可以直接操作一个对象的状态
# >>> getattr(obj, 'z', 404) # 获取属性'z',如果不存在,返回默认值404
# 404
# >>> hasattr(obj, 'power') # 有属性'power'吗?
# True
# >>> getattr(obj, 'power') # 获取属性'power'
# <bound method MyObject.power of <__main__.MyObject object at 0x10077a6a0>>
# >>> fn = getattr(obj, 'power') # 获取属性'power'并赋值到变量fn
# >>> fn # fn指向obj.power
# <bound method MyObject.power of <__main__.MyObject object at 0x10077a6a0>>
# >>> fn() # 调用fn()与调用obj.power()是一样的
# 81
# 枚举
from enum import Enum
Month = Enum('Month', ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))
print(Month.Jan)
# value属性则是自动赋给成员的int常量,默认从1开始计数。
for name, member in Month.__members__.items():
print(name, '=>', member, ',', member.value)
# 更精确地控制枚举类型,可以从Enum派生出自定义类:
from enum import Enum, unique
# @unique装饰器可以帮助我们检查保证没有重复值。
@unique
class Weekday(Enum):
Sun = 0
Mon = 1
Tue = 2
Wed = 3
Thu = 4
Fri = 5
Sat = 6
day1 = Weekday.Mon
print(day1)
print(Weekday['Tue'])
print(Weekday.Sun.value)
print(day1 == Weekday.Mon)
print(Weekday(1))
print(day1 == Weekday(1))
for name, member in Weekday.__members__.items():
print(name, '=>', member)
# 使用元类
# 动态语言和静态语言最大的不同,就是函数和类的定义,不是编译时定义的,而是运行时动态创建的。
print(type(Member))
print(type(member))
# class的定义是运行时动态创建的,而创建class的方法就是使用type()函数。
# type()函数既可以返回一个对象的类型,又可以创建出新的类型。
def fn(self, name='world'): # 先定义函数
print('Hello, %s.' % name)
# 创建一个class对象,type()函数依次传入3个参数:class的名称、继承的父类集合(是一个tuple)、class的方法名称与函数绑定(是一个dict)
Hello = type('Hello', (object,), dict(hello=fn)) # 创建Hello class
h = Hello()
h.hello()
print(type(Hello)) # <class 'type'>
print(type(h)) # <class '__main__.Hello'>
# 除了使用type()动态创建类以外,还可以使用metaclass。
# metaclass,直译为元类。先定义metaclass,就可以创建类,最后创建实例。
# metaclass允许你创建类或者修改类。可以把类看成是metaclass创建出来的“实例”。
# 我们先看一个简单的例子,这个metaclass可以给我们自定义的MyList增加一个add方法:
# 定义ListMetaclass,按照默认习惯,metaclass的类名总是以Metaclass结尾,以便清楚地表示这是一个metaclass:
class ListMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['add'] = lambda self, value: self.append(value)
print("attrs = ", attrs) # {'__module__': '__main__', '__qualname__': 'MyList', 'add': <function ListMetaclass.__new__.<locals>.<lambda> at 0x1020b79d0>}
return type.__new__(cls, name, bases, attrs)
# 有了ListMetaclass,我们在定义类的时候还要指示使用ListMetaclass来定制类,传入关键字参数metaclass:
class MyList(list, metaclass=ListMetaclass):
pass
# 当我们传入关键字参数metaclass时,魔术就生效了,它指示Python解释器在创建MyList时,要通过ListMetaclass.__new__()来创建,
# 在此,我们可以修改类的定义,比如,加上新的方法,然后,返回修改后的定义。
# __new__()方法接收到的参数依次是:当前准备创建的类的对象、类的名字、类继承的父类集合、类的方法集合。
L = MyList()
L.add(1) # 普通的list没有add()方法,这个add方法是
L.append(2)
print(L) # [1, 2]
# ### 通过metaclass来实现ORM框架 ###
# class User(Model):
# id = IntegerField('id')
# name = StringField('username')
# email = StringField('email')
# password = StringField('password')
# u = User(id=12345, name='Michael', email='test@orm.org', password='my-pwd')
class Field(object):
def __init__(self, name, column_type):
self.name = name
self.column_type = column_type
def __str__(self):
return '<%s:%s>' % (self.__class__.__name__, self.name)
class StringField(Field):
def __init__(self, name):
super().__init__(name, "varchar(100)") # Python3 方式
class IntegerField(Field):
def __init__(self, name):
super(IntegerField, self).__init__(name, "bigint") # 通用方式
# 下一步,就是编写最复杂的ModelMetaclass了:
# 建议使用 "import os" 风格而非 "from os import *"。这样可以保证随操作系统不同而有所变化的 os.open() 不会覆盖内置函数 open()。
import os
print(os.getcwd())
os.chdir("/Users/jasonzheng/PycharmProjects/pythonProject/rolling_king/jason")
print(os.getcwd())
os.system("mkdir today")
os.system("touch temp.txt")
# 针对日常的文件和目录管理任务,:mod:shutil 模块提供了一个易于使用的高级接口:
import shutil
shutil.copyfile("temp.txt", "./today/new.txt")
shutil.copy("temp.txt", "./today")
# 文件通配符
# glob模块提供了一个函数用于从目录通配符搜索中生成文件列表:
import glob
list = glob.glob("*.txt")
print(list)
# 测试模块
def average(values):
"""Computes the arithmetic mean of a list of numbers.
>>> print(average([20, 30, 70]))
40.0
"""
return sum(values) / len(values)
import doctest
doctest.testmod() # 自动验证嵌入测试
a = [10, ]
print(len(a)) # 1
print('%.2f' % 123.444)
# unittest模块
import unittest
class TestStatisticalFunctions(unittest.TestCase):
def test_average(self):
self.assertEqual(average([20, 30, 70]), 40.0)
self.assertEqual(round(average([1, 5, 7]), 1), 4.3)
self.assertRaises(ZeroDivisionError, average, [])
self.assertRaises(TypeError, average, 20, 30, 70)
unittest.main() # Calling from the command line invokes all tests | 51job-autotest-framework | /51job_autotest_framework-0.3.1-py3-none-any.whl/rolling_king/jason/python/ClassTest.py | ClassTest.py |
import schedule
import time
import threading
import functools
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s') # logging.basicConfig函数对日志的输出格式及方式做相关配置
logger = logging.getLogger('com.autotest.db.sqlalchemy_util')
# def job():
# print("I'm working...")
#
#
# schedule.every(10).seconds.do(job)
#
# while True:
# schedule.run_pending() # 检测是否执行
# time.sleep(1)
# logger.info("Waiting for 1 second...")
def job():
print("I'm working...")
# 每十分钟执行任务
schedule.every(10).minutes.do(job)
# 每个小时执行任务
schedule.every().hour.do(job)
# 每天的10:30执行任务
schedule.every().day.at("10:30").do(job)
# 每个月执行任务
schedule.every().monday.do(job)
# 每个星期三的13:15分执行任务
schedule.every().wednesday.at("13:15").do(job)
# 每分钟的第17秒执行任务
schedule.every().minute.at(":17").do(job)
while True:
schedule.run_pending()
time.sleep(1)
# 只运行一次
def job_that_executes_once():
# 此处编写的任务只会执行一次...
return schedule.CancelJob
schedule.every().day.at('22:30').do(job_that_executes_once)
while True:
schedule.run_pending()
time.sleep(1)
# 参数传递
def greet(name):
print('Hello', name)
# do() 将额外的参数传递给job函数
schedule.every(2).seconds.do(greet, name='Alice')
schedule.every(4).seconds.do(greet, name='Bob')
# 获取所有作业 and 取消所有作业
def hello():
print('Hello world')
schedule.every().second.do(hello)
all_jobs = schedule.get_jobs() # 获取
schedule.clear() # 取消
# .tag 打标签
schedule.every().day.do(greet, 'Andrea').tag('daily-tasks', 'friend')
schedule.every().hour.do(greet, 'John').tag('hourly-tasks', 'friend')
schedule.every().hour.do(greet, 'Monica').tag('hourly-tasks', 'customer')
schedule.every().day.do(greet, 'Derek').tag('daily-tasks', 'guest')
# get_jobs(标签):可以获取所有该标签的任务
friends = schedule.get_jobs('friend')
# 取消所有 daily-tasks 标签的任务
schedule.clear('daily-tasks')
# 设定截止时间
# 每个小时运行作业,18:30后停止
schedule.every(1).hours.until("18:30").do(job)
# 每个小时运行作业,2030-01-01 18:33 today
schedule.every(1).hours.until("2030-01-01 18:33").do(job)
# 每个小时运行作业,8个小时后停止
schedule.every(1).hours.until(timedelta(hours=8)).do(job)
# 每个小时运行作业,11:32:42后停止
schedule.every(1).hours.until(time(11, 33, 42)).do(job)
# 每个小时运行作业,2020-5-17 11:36:20后停止
schedule.every(1).hours.until(datetime(2020, 5, 17, 11, 36, 20)).do(job)
# 立即运行所有作业,而不管其安排如何
schedule.run_all()
# 立即运行所有作业,每次作业间隔10秒
schedule.run_all(delay_seconds=10)
# 装饰器安排作业
# 此装饰器效果等同于 schedule.every(10).minutes.do(job)
@repeat(every(10).minutes)
def job():
print("I am a scheduled job")
while True:
run_pending()
time.sleep(1)
# 并行执行
# 默认情况下,Schedule 按顺序执行所有作业
# 通过多线程的形式来并行每个作业
def job1():
print("I'm running on thread %s" % threading.current_thread())
def job2():
print("I'm running on thread %s" % threading.current_thread())
def job3():
print("I'm running on thread %s" % threading.current_thread())
def run_threaded(job_func):
job_thread = threading.Thread(target=job_func)
job_thread.start()
schedule.every(10).seconds.do(run_threaded, job1)
schedule.every(10).seconds.do(run_threaded, job2)
schedule.every(10).seconds.do(run_threaded, job3)
while True:
schedule.run_pending()
time.sleep(1)
# 异常处理
# Schedule 不会自动捕捉异常,它遇到异常会直接抛出
def catch_exceptions(cancel_on_failure=False):
def catch_exceptions_decorator(job_func):
@functools.wraps(job_func)
def wrapper(*args, **kwargs):
try:
return job_func(*args, **kwargs)
except:
import traceback
print(traceback.format_exc())
if cancel_on_failure:
return schedule.CancelJob
return wrapper
return catch_exceptions_decorator
@catch_exceptions(cancel_on_failure=True)
def bad_task():
return 1 / 0
# 这样,bad_task 在执行时遇到的任何错误,都会被 catch_exceptions 捕获,这点在保证调度任务正常运转的时候非常关键。
schedule.every(5).minutes.do(bad_task) | 51job-autotest-framework | /51job_autotest_framework-0.3.1-py3-none-any.whl/rolling_king/jason/python/tools/zy_schedule.py | zy_schedule.py |
from openpyxl import Workbook, load_workbook
from openpyxl.worksheet.worksheet import Worksheet
from openpyxl.styles import *
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s') # logging.basicConfig函数对日志的输出格式及方式做相关配置
logger = logging.getLogger("excel_util")
class ExcelUtil(object):
def __init__(self, excel_path=None, excel_sheet=None):
if excel_path is None:
self.wb: Workbook = Workbook(write_only=False)
logger.info("默认创建一个空workbook。")
self.ws: Worksheet = self.wb.active
logger.info("默认worksheet={0}。".format(self.ws))
else:
self.wb: Workbook = load_workbook(filename=excel_path)
if excel_sheet is not None:
self.ws: Worksheet = self.wb[excel_sheet]
logger.info("加载{0}文件的{1}表单。".format(excel_path, excel_sheet))
else:
logger.info("加载{0}文件。".format(excel_path))
@property
def rows(self):
return self.ws.max_row
@property
def cols(self):
return self.ws.max_column
@property
def cell(self, cell_name):
self.cell = self.ws[cell_name]
return self.cell
@property
def cell(self, row, col):
self.cell = self.ws.cell(row, col)
return self.cell
def set_cell_value(self, content):
self.cell.value = content
def set_cell_value_by_cell_name(self, cell_name, content):
self.ws[cell_name] = content
def set_cell_value(self, row, col, content):
self.ws.cell(row, col).value = content
def get_cell_value_by_cell_name(self, cell_name):
return self.ws[cell_name].value
def get_cell_value(self, row, col):
return self.ws.cell(row, col).value
def change_active_sheet(self, index):
self.wb._active_sheet_index = index
def save(self, save_path):
self.wb.save(save_path)
def get_sheet_list(self) -> list:
return self.wb.get_sheet_names()
def get_sheet(self, sheet_name: str):
self.ws: Worksheet = self.wb.get_sheet_by_name(sheet_name)
if __name__ == '__main__':
excelOperator = ExcelUtil(excel_path="../crawler/Temp.xlsx", excel_sheet="Records")
logger.info(excelOperator.rows) | 51job-autotest-framework | /51job_autotest_framework-0.3.1-py3-none-any.whl/rolling_king/jason/openpyxl/excel_util.py | excel_util.py |
import requests
from requests import Response
import re
import datetime
from concurrent.futures import ThreadPoolExecutor, as_completed
from bs4 import BeautifulSoup
from openpyxl import Workbook
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s') # logging.basicConfig函数对日志的输出格式及方式做相关配置
logger = logging.getLogger('rolling_king.jason.crawler.crawler_weibo')
# 微博热搜
class WeiBoCollection(object):
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36",
"cookie": ""
}
def __init__(self, cookie_val):
self.headers['cookie'] = cookie_val
self.host_url = 'https://s.weibo.com/weibo'
self.content_url = self.host_url
self.wb = Workbook()
# 获取微博热搜
def get_hot_query_by_key(self, key: str) -> Response:
hot_resp = requests.get(url="https://weibo.com/ajax/side/search?q="+key,
headers={
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36"}
)
logger.info(f'微博热搜={hot_resp.json()}')
return hot_resp
# 微博内容
def get_weibo_html_content_by_key(self, key: str) -> str:
self.content_url = self.host_url+'?q=' + key + '&nodup=1' # nodup=1代表查看微博全部结果
content_resp = requests.get(url=self.content_url, headers=self.headers)
print(content_resp.encoding) # ISO-8859-1
print(content_resp.apparent_encoding) # GB2312
# content_resp.encoding = content_resp.apparent_encoding
# print(content_resp.content) # bytes
# print(content_resp.text) # str
return content_resp.text # html_doc
def get_total_page_num(self, html_doc: str = None) -> int:
soup = BeautifulSoup(html_doc, "lxml")
print(type(soup))
# print(soup.find('a', class_='pagenum').text)
ul_tag = soup.find('ul', attrs={'action-type': 'feed_list_page_morelist'})
print(f'ul_tag={ul_tag}')
page_num: int = len(ul_tag.find_all('li'))
print('length=', page_num)
return page_num
def collect_func(self, curr_page: int) -> dict:
print(f'current page = {curr_page}')
curr_url = self.content_url + '&page=' + str(curr_page)
print(f'current url = {curr_url}')
curr_resp = requests.get(url=curr_url, headers=self.headers)
curr_html_doc = curr_resp.text
curr_soup = BeautifulSoup(curr_html_doc, "lxml")
# from_results = curr_soup.find_all('div', class_='from')
# print(len(from_results))
results = curr_soup.find_all('p', class_='txt', attrs={'node-type': 'feed_list_content'})
# results = curr_soup.find_all('p', class_='txt', attrs={'node-type': 'feed_list_content_full'})
print(len(results))
print(type(results))
print(results)
count: int = 0
curr_dict = {
'content': []
}
for item in results:
count += 1
print(type(item))
print(item.name) # p
print(f"微博名={item['nick-name']}") # 微博名
print(f'微博内容={item.text.strip()}') # 微博内容
regex = re.compile(r'#.*?#')
s = regex.search(item.text.strip())
topic: str = ''
if s is not None:
print(f'话题={s.group()}')
topic = s.group()
curr_dict['content'].append({
'微博名': item['nick-name'],
'微博话题': topic,
'微博内容': item.text.strip(),
})
print(f'--- 第{curr_page}页的{count}记录已获取 ---')
curr_dict['count'] = count
return curr_dict
def save_weibo_content(self, page_num: int, key: str):
thread_pool = ThreadPoolExecutor(page_num)
thread_task_list = []
for page in range(1, page_num+1):
thread_task_list.append(thread_pool.submit(self.collect_func, page))
print(self.wb.sheetnames)
print(self.wb.active)
ws = self.wb.active
ws.title = key
ws.cell(1, 1).value = '微博名'
ws.cell(1, 2).value = '微博话题'
ws.cell(1, 3).value = '微博内容'
total_count = 0
curr_row = 2
for future in as_completed(thread_task_list):
print(future.result())
total_count += future.result()['count']
# 存入Excel
# 将一页的结果存入
for dict_val in future.result()['content']:
curr_col = 1
ws.cell(curr_row, curr_col).value = dict_val['微博名']
curr_col += 1
ws.cell(curr_row, curr_col).value = dict_val['微博话题']
curr_col += 1
ws.cell(curr_row, curr_col).value = dict_val['微博内容']
curr_row += 1
# 一页的结果存完,从下一行存下一页的结果。
print(f'{page_num}页,一共{total_count}条记录')
def save_weibo_hot_query(self, hot_resp, key: str):
ws = self.wb.create_sheet(title='热搜_' + key)
if hot_resp.json()['ok'] == 1:
hot_query_json_list = hot_resp.json()['data']['hotquery']
if len(hot_query_json_list) > 0:
key_list = hot_query_json_list[0].keys()
curr_col = 1
for col_head in key_list:
ws.cell(1, curr_col).value = col_head
curr_col += 1
curr_row = 2
for hot_query_json_item in hot_query_json_list:
curr_col = 1
for col_key in key_list:
ws.cell(curr_row, curr_col).value = hot_query_json_item[col_key]
curr_col += 1
curr_row += 1
else:
print(f'hot_query_json_list is empty.')
else:
print(f'hot_resp is not ok.')
def save_excel_to_disk(self, file_name: str) -> None:
self.wb.save(file_name)
if __name__ == '__main__':
cookie_value: str = "XSRF-TOKEN=vH8eCkgP-JmRtN2Ia3VIZzNL; _s_tentry=weibo.com; Apache=8524959161901.953.1666270664916; SINAGLOBAL=8524959161901.953.1666270664916; ULV=1666270664920:1:1:1:8524959161901.953.1666270664916:; login_sid_t=b5127687703bbdcf584d351ad19bb4b4; cross_origin_proto=SSL; SSOLoginState=1666324094; SCF=ApUmMbNmgFup8JyPq2IgXMlCgCtSeadR43NF9Z6NG0KDyxJmqoy-q1BssnHP28j1ZKJlwOhyLRZzMNmw1cJ-FiM.; SUB=_2A25OUZ7RDeRhGedJ6VcV-SrLyDyIHXVtJvcZrDV8PUNbmtANLVr5kW9NVlLIFhGf5-a2Sp9qM7dSRByY1wlD_sSP; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WW7wgizECTwskArQ2OMHFNw5JpX5KMhUgL.Fo2Neo-X1KBNe052dJLoIE-LxKnLB.-LB.xWi--4iKn0iK.pi--fi-z7iKysi--4iKn0iK.p; ALF=1698112000; WBPSESS=fbOmJTuMY3c-5Rw73SivynCCuNFzmQGVExuu7n6msq-AjXm4uN--xLuIUTml8RhJDN_nrrqPS1nQ2NIMyMdVyNKkaKtQladJWypSdM_rIwgLWcjOOCCCyt2nzPJT3IGPbG6yCmzbwCeOSpYz_m0h4g=="
search_key = "南通"
obj = WeiBoCollection(cookie_val=cookie_value)
obj.save_weibo_content(obj.get_total_page_num(obj.get_weibo_html_content_by_key(key=search_key)), key=search_key)
obj.save_weibo_hot_query(obj.get_hot_query_by_key(key=search_key), key=search_key)
obj.save_excel_to_disk(file_name='WeiBo_'+datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')+'.xlsx') | 51job-autotest-framework | /51job_autotest_framework-0.3.1-py3-none-any.whl/rolling_king/jason/crawler/crawler_weibo.py | crawler_weibo.py |
import requests
from bs4 import BeautifulSoup
fileObj = open("/Users/admin/Desktop/baidu.html", mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None)
print("文件名: ", fileObj.name)
print("是否已关闭 : ", fileObj.closed)
print("访问模式 : ", fileObj.mode)
str_list = fileObj.readlines()
val = ''
for curr in str_list:
val += curr
# print(val)
soup = BeautifulSoup(val, "lxml")
print(type(soup))
# print(soup.find('a', class_='pagenum').text)
div_tag = soup.find('div', attrs={'class': ['c-row', 'content-wrapper_1SuJ0']})
print('-------------------')
print(f'div_tag={div_tag}')
# page_num: int = len(ul_tag.find_all('li'))
cookie = "BIDUPSID=4A1485563FA4A8F48BBA72A0DE6C86DD; PSTM=1666270645; BAIDUID=4A1485563FA4A8F4BC48518904109E08:FG=1; BD_UPN=123253; MCITY=-75:; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; H_PS_PSSID=36548_37358_37299_36885_37628_36807_36789_37540_37499_26350; BAIDUID_BFESS=4A1485563FA4A8F4BC48518904109E08:FG=1; delPer=0; BD_CK_SAM=1; PSINO=2; BA_HECTOR=0g85000la50k252l0424dqlu1hlcni51b; ZFY=6deFW77nFLKhW:A5JxO6akg7YzaDrDvStePnOta1Ka3U:C; H_PS_645EC=c8e7bAhaJW/MO9zWkp/H2nIXr8Xy3k5JAZTecHXru40trcMBk/SJguwj7SY; COOKIE_SESSION=3_0_8_9_5_17_0_1_7_6_1_3_28_0_2_0_1666604643_0_1666604641|9#0_0_1666604641|1; BDSVRTM=0; WWW_ST=1666605811625"
url_str = "https://www.baidu.com/s?rtt=1&bsst=1&cl=2&tn=news&ie=utf-8&word=%E5%8D%97%E9%80%9A&x_bfe_rqs=03E80&x_bfe_tjscore=0.100000&tngroupname=organic_news&newVideo=12&goods_entry_switch=1&rsv_dl=news_b_pn&pn=10"
# url_str = "https://www.baidu.com/s?ie=utf-8&medium=0&rtt=1&bsst=1&rsv_dl=news_t_sk&cl=2&wd=%E5%8D%97%E9%80%9A&tn=news&rsv_bp=1&rsv_sug3=1&rsv_sug1=2&rsv_sug7=100&rsv_sug2=0&oq=&rsv_btype=t&f=8&rsv_sug4=918&rsv_sug=1"
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36",
"cookie": "",
# "Host": "www.baidu.com"
# "Referer": "https://www.baidu.com/s?rtt=1&bsst=1&cl=2&tn=news&ie=utf-8&word=南通&x_bfe_rqs=03E80&x_bfe_tjscore=0.100000&tngroupname=organic_news&newVideo=12&goods_entry_switch=1&rsv_dl=news_b_pn&pn=10"
}
headers['cookie'] = cookie
resp = requests.get(url=url_str, headers=headers)
print(resp.text) | 51job-autotest-framework | /51job_autotest_framework-0.3.1-py3-none-any.whl/rolling_king/jason/crawler/TempBaidu.py | TempBaidu.py |
import pymysql
import time
import os
import subprocess
import logging
__all__ = ["PyMysqlDB"]
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s [%(levelname)s] %(funcName)s: %(message)s',
datefmt="%d %b %Y %H:%M:%S")
class PyMysqlDB:
def __init__(self, host=None, user=None, pwd=None, port=3306, base_path=None, backup_path='/data/LocalBackup'):
self.host = host
self.user = user
self.pwd = pwd
self.port = int(port)
self.base_path = base_path
self.backup_path = backup_path
def select_database(self):
db_list = []
con = pymysql.connect(host=self.host, user=self.user, password=self.pwd, db='information_schema',
port=self.port)
cur = con.cursor()
cur.execute('select SCHEMA_NAME from SCHEMATA')
for (db,) in cur.fetchall():
db_list.append(db)
return db_list
def backup_by_database(self, database):
logging.info('backup database: {}'.format(database))
today = time.strftime("%Y%m%d", time.localtime())
backup_dir = '{}/{}'.format(self.backup_path, today)
if not os.path.isdir(backup_dir):
os.makedirs(backup_dir)
os.chdir(backup_dir)
start_time = int(time.time())
cmd = "{}/bin/mysqldump --opt -h{} -P{} -u{} -p{} {} | gzip > {}/{}/{}-{}-{}.sql.gz".format(self.base_path,
self.host,
self.port,
self.user, self.pwd,
database,
self.backup_path,
today, today,
self.host,
database)
result = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
content = result.stdout.read()
if content and not content.decode().startswith("Warning:"):
subject = "{} - {} backup error, reason: {}".format(self.host, database, content.decode())
logging.error(subject)
end_time = int(time.time())
use_time = end_time - start_time
logging.info('{} - {} backup finished, use time: {}s'.format(self.host, database, float('%.2f' % use_time)))
def backup_by_table(self):
pass
def backup_all(self, **kwargs):
exclude_db = kwargs.get('exclude_db', [])
db_list = [val for val in self.select_database() if val not in exclude_db]
logging.info('db_list: {}'.format(db_list))
for db in db_list:
self.backup_by_database(db)
logging.info('{} backup all finished'.format(self.host)) | 51pub_pymodules | /51pub_pymodules-0.0.1.tar.gz/51pub_pymodules-0.0.1/opmysql/mysqldb.py | mysqldb.py |
import urllib.request
import xlwt
import re
import urllib.parse
import time
header={
'Host':'search.51job.com',
'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
}
def getfront(page,item): #page是页数,item是输入的字符串
result = urllib.parse.quote(item) #先把字符串转成十六进制编码
ur1 = result+',2,'+ str(page)+'.html'
ur2 = 'https://search.51job.com/list/000000,000000,0000,00,9,99,'
res = ur2+ur1 #拼接网址
a = urllib.request.urlopen(res)
html = a.read().decode('gbk') # 读取源代码并转为unicode
html = html.replace('\\','') # 将用于转义的"\"替换为空
html = html.replace('[', '')
html = html.replace(']', '')
#print(html)
return html
def getInformation(html):
reg = re.compile(r'"type":"engine_jds".*?"job_href":"(.*?)","job_name":"(.*?)".*?"company_href":"(.*?)","company_name":"(.*?)","providesalary_text":"(.*?)".*?"updatedate":"(.*?)".*?,'
r'"companytype_text":"(.*?)".*?"jobwelf":"(.*?)".*?"attribute_text":"(.*?)","(.*?)","(.*?)","(.*?)","companysize_text":"(.*?)","companyind_text":"(.*?)"',re.S)#匹配换行符
items=re.findall(reg,html)
print(items)
return items
def main():
#新建表格空间
excel1 = xlwt.Workbook()
# 设置单元格格式
sheet1 = excel1.add_sheet('Job', cell_overwrite_ok=True)
sheet1.write(0, 0, '序号')
sheet1.write(0, 1, '职位')
sheet1.write(0, 2, '公司名称')
sheet1.write(0, 3, '公司地点')
sheet1.write(0, 4, '公司性质')
sheet1.write(0, 5, '薪资')
sheet1.write(0, 6, '学历要求')
sheet1.write(0, 7, '工作经验')
sheet1.write(0, 8, '公司规模')
#sheet1.write(0, 9, '公司类型')
sheet1.write(0, 9,'公司福利')
sheet1.write(0, 10,'发布时间')
number = 1
item = input("请输入需要搜索的职位:") #输入想搜索的职位关键字
for j in range(1,33): #页数自己随便改
try:
print("正在爬取第"+str(j)+"页数据...")
html = getfront(j,item) #调用获取网页原码
for i in getInformation(html):
try:
sheet1.write(number,0,number)
sheet1.write(number,1,i[1])
sheet1.write(number,2,i[3])
sheet1.write(number,3,i[8])
sheet1.write(number,4,i[6])
sheet1.write(number,5,i[4])
sheet1.write(number,6,i[10])
sheet1.write(number,7,i[9])
sheet1.write(number,8,i[12])
#sheet1.write(number,9,i[7])
sheet1.write(number,9,i[7])
sheet1.write(number,10,i[5])
number+=1
excel1.save("51job.xls")
time.sleep(0.3) #休息间隔,避免爬取海量数据时被误判为攻击,IP遭到封禁
except:
pass
except:
pass
if __name__ == '__main__':
main()
###################################数据清洗#######################################
#coding:utf-8
import pandas as pd
import re
#读取表格内容到data
data = pd.read_excel(r'51job.xls',sheet_name='Job')
result = pd.DataFrame(data)
a = result.dropna(axis=0,how='any')
pd.set_option('display.max_rows',None) #输出全部行,不省略
#清洗职位中的异常数据
b = u'数据'
number = 1
li = a['职位']
for i in range(0,len(li)):
try:
if b in li[i]:
#print(number,li[i])
number+=1
else:
a = a.drop(i,axis=0) #删除整行
except:
pass
#清洗学历要求的异常数据
b2 = '人'
li2 = a['学历要求']
for i in range(0,len(li2)):
try:
if b2 in li2[i]:
# print(number,li2[i])
number += 1
a = a.drop(i, axis=0)
except:
pass
#转换薪资单位
b3 =u'万/年'
b4 =u'千/月'
li3 = a['薪资']
#注释部分的print都是为了调试用的
for i in range(0,len(li3)):
try:
if b3 in li3[i]:
x = re.findall(r'\d*\.?\d+',li3[i])
#print(x)
min_ = format(float(x[0])/12,'.2f') #转换成浮点型并保留两位小数
max_ = format(float(x[1])/12,'.2f')
li3[i][1] = min_+'-'+max_+u'万/月'
if b4 in li3[i]:
x = re.findall(r'\d*\.?\d+',li3[i])
#print(x)
#input()
min_ = format(float(x[0])/10,'.2f')
max_ = format(float(x[1])/10,'.2f')
li3[i][1] = str(min_+'-'+max_+'万/月')
print(i,li3[i])
except:
pass
#保存成另一个excel文件
a.to_excel('51job2.xlsx', sheet_name='Job', index=False)
########################################数据可视化################################################
import pandas as pd
import re
from pyecharts.charts import Funnel,Pie,Geo
import matplotlib.pyplot as plt
from pyecharts import options as opts
from pyecharts.datasets import register_url
file = pd.read_excel(r'51job2.xlsx',sheet_name='Job')
f = pd.DataFrame(file)
pd.set_option('display.max_rows',None)
add = f['公司地点']
sly = f['薪资']
edu = f['学历要求']
exp = f['工作经验']
address =[]
salary = []
education = []
experience = []
for i in range(0,len(f)):
try:
a = add[i].split('-')
address.append(a[0])
#print(address[i])
s = re.findall(r'\d*\.?\d+',sly[i])
s1= float(s[0])
s2 =float(s[1])
salary.append([s1,s2])
#print(salary[i])
education.append(edu[i])
#print(education[i])
experience.append(exp[i])
#print(experience[i])
except:
pass
min_s=[] #定义存放最低薪资的列表
max_s=[] #定义存放最高薪资的列表
for i in range(0,len(experience)):
min_s.append(salary[i][0])
max_s.append(salary[i][0])
plt.rcParams['font.sans-serif'] = ['KaiTi'] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
my_df = pd.DataFrame({'experience':experience, 'min_salay' : min_s, 'max_salay' : max_s}) #关联工作经验与薪资
data1 = my_df.groupby('experience').mean()['min_salay'].plot(kind='line')
plt.show()
my_df2 = pd.DataFrame({'education':education, 'min_salay' : min_s, 'max_salay' : max_s}) #关联学历与薪资
data2 = my_df2.groupby('education').mean()['min_salay'].plot(kind='line')
plt.show()
def get_edu(list):
education2 = {}
for i in set(list):
education2[i] = list.count(i)
return education2
dir1 = get_edu(education)
attr= dir1.keys()
value = dir1.values()
c = (
Pie()
.add(
"",
[list(z) for z in zip(attr, value)],
radius=["40%", "75%"],
)
.set_global_opts(
title_opts=opts.TitleOpts(title="Pie-Radius"),
legend_opts=opts.LegendOpts(orient="vertical", pos_top="15%", pos_left="2%"),
)
.set_series_opts(label_opts=opts.LabelOpts(formatter="{b}: {c}"))
.render("学历要求玫瑰图.html")
)
def get_address(list):
address2 = {}
for i in set(list):
address2[i] = list.count(i)
try:
address2.pop('异地招聘')
except:
pass
return address2
dir2 = get_address(address)
attr2 = dir2.keys()
value2 = dir2.values()
c = (
Geo()
.add_schema(maptype="china")
.add("geo", [list(z) for z in zip(attr2, value2)])
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(), title_opts=opts.TitleOpts(title="Geo-基本示例")
)
.render("大数据城市需求分布图.html")
)
def get_experience(list):
experience2 = {}
for i in set(list):
experience2[i] = list.count(i)
return experience2
dir3 = get_experience(experience)
attr3= dir3.keys()
value3 = dir3.values()
c = (
Funnel()
.add(
"",
[list(z) for z in zip(attr3, value3)],
label_opts=opts.LabelOpts(position="inside"),
)
.set_global_opts(title_opts=opts.TitleOpts(title="Funnel-Label(inside)"))
.render("工作经验要求漏斗图.html")
) | 51spiders | /51spiders-0.0.1.tar.gz/51spiders-0.0.1/51/51.py | 51.py |
from __future__ import annotations
import dataclasses
import inspect
import sys
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Coroutine,
Dict,
List,
Mapping,
Optional,
Sequence,
Type,
TypeVar,
Union,
overload,
)
from strawberry.annotation import StrawberryAnnotation
from strawberry.exceptions import InvalidArgumentTypeError, InvalidDefaultFactoryError
from strawberry.type import StrawberryType
from strawberry.union import StrawberryUnion
from strawberry.utils.cached_property import cached_property
from .types.fields.resolver import StrawberryResolver
if TYPE_CHECKING:
import builtins
from typing_extensions import Literal
from strawberry.arguments import StrawberryArgument
from strawberry.extensions.field_extension import FieldExtension
from strawberry.types.info import Info
from .object_type import TypeDefinition
from .permission import BasePermission
T = TypeVar("T")
_RESOLVER_TYPE = Union[
StrawberryResolver[T],
Callable[..., T],
# we initially used Awaitable, but that was triggering the following mypy bug:
# https://github.com/python/mypy/issues/14669
Callable[..., Coroutine[T, Any, Any]],
"staticmethod[Any, T]", # type: ignore
"classmethod[Any, Any, T]", # type: ignore
]
UNRESOLVED = object()
def _is_generic(resolver_type: Union[StrawberryType, type]) -> bool:
"""Returns True if `resolver_type` is generic else False"""
if isinstance(resolver_type, StrawberryType):
return resolver_type.is_generic
# solves the Generic subclass case
if hasattr(resolver_type, "_type_definition"):
return resolver_type._type_definition.is_generic
return False
class StrawberryField(dataclasses.Field):
type_annotation: Optional[StrawberryAnnotation]
default_resolver: Callable[[Any, str], object] = getattr
def __init__(
self,
python_name: Optional[str] = None,
graphql_name: Optional[str] = None,
type_annotation: Optional[StrawberryAnnotation] = None,
origin: Optional[Union[Type, Callable, staticmethod, classmethod]] = None,
is_subscription: bool = False,
description: Optional[str] = None,
base_resolver: Optional[StrawberryResolver] = None,
permission_classes: List[Type[BasePermission]] = (), # type: ignore
default: object = dataclasses.MISSING,
default_factory: Union[Callable[[], Any], object] = dataclasses.MISSING,
metadata: Optional[Mapping[Any, Any]] = None,
deprecation_reason: Optional[str] = None,
directives: Sequence[object] = (),
extensions: List[FieldExtension] = (), # type: ignore
):
# basic fields are fields with no provided resolver
is_basic_field = not base_resolver
kwargs: Dict[str, Any] = {}
# kw_only was added to python 3.10 and it is required
if sys.version_info >= (3, 10):
kwargs["kw_only"] = dataclasses.MISSING
super().__init__(
default=default,
default_factory=default_factory, # type: ignore
init=is_basic_field,
repr=is_basic_field,
compare=is_basic_field,
hash=None,
metadata=metadata or {},
**kwargs,
)
self.graphql_name = graphql_name
if python_name is not None:
self.python_name = python_name
self.type_annotation = type_annotation
self.description: Optional[str] = description
self.origin = origin
self._base_resolver: Optional[StrawberryResolver] = None
if base_resolver is not None:
self.base_resolver = base_resolver
# Note: StrawberryField.default is the same as
# StrawberryField.default_value except that `.default` uses
# `dataclasses.MISSING` to represent an "undefined" value and
# `.default_value` uses `UNSET`
self.default_value = default
if callable(default_factory):
try:
self.default_value = default_factory()
except TypeError as exc:
raise InvalidDefaultFactoryError() from exc
self.is_subscription = is_subscription
self.permission_classes: List[Type[BasePermission]] = list(permission_classes)
self.directives = list(directives)
self.extensions: List[FieldExtension] = list(extensions)
self.deprecation_reason = deprecation_reason
def __call__(self, resolver: _RESOLVER_TYPE) -> StrawberryField:
"""Add a resolver to the field"""
# Allow for StrawberryResolvers or bare functions to be provided
if not isinstance(resolver, StrawberryResolver):
resolver = StrawberryResolver(resolver)
for argument in resolver.arguments:
if isinstance(argument.type_annotation.annotation, str):
continue
elif isinstance(argument.type, StrawberryUnion):
raise InvalidArgumentTypeError(
resolver,
argument,
)
elif getattr(argument.type, "_type_definition", False):
if argument.type._type_definition.is_interface: # type: ignore
raise InvalidArgumentTypeError(
resolver,
argument,
)
self.base_resolver = resolver
return self
def get_result(
self, source: Any, info: Optional[Info], args: List[Any], kwargs: Dict[str, Any]
) -> Union[Awaitable[Any], Any]:
"""
Calls the resolver defined for the StrawberryField.
If the field doesn't have a resolver defined we default
to using the default resolver specified in StrawberryConfig.
"""
if self.base_resolver:
return self.base_resolver(*args, **kwargs)
return self.default_resolver(source, self.python_name)
@property
def is_basic_field(self) -> bool:
"""
Flag indicating if this is a "basic" field that has no resolver or
permission classes, i.e. it just returns the relevant attribute from
the source object. If it is a basic field we can avoid constructing
an `Info` object and running any permission checks in the resolver
which improves performance.
"""
return (
not self.base_resolver
and not self.permission_classes
and not self.extensions
)
@property
def arguments(self) -> List[StrawberryArgument]:
if not self.base_resolver:
return []
return self.base_resolver.arguments
def _python_name(self) -> Optional[str]:
if self.name:
return self.name
if self.base_resolver:
return self.base_resolver.name
return None
def _set_python_name(self, name: str) -> None:
self.name = name
python_name: str = property(_python_name, _set_python_name) # type: ignore[assignment] # noqa: E501
@property
def base_resolver(self) -> Optional[StrawberryResolver]:
return self._base_resolver
@base_resolver.setter
def base_resolver(self, resolver: StrawberryResolver) -> None:
self._base_resolver = resolver
# Don't add field to __init__, __repr__ and __eq__ once it has a resolver
self.init = False
self.compare = False
self.repr = False
# TODO: See test_resolvers.test_raises_error_when_argument_annotation_missing
# (https://github.com/strawberry-graphql/strawberry/blob/8e102d3/tests/types/test_resolvers.py#L89-L98)
#
# Currently we expect the exception to be thrown when the StrawberryField
# is constructed, but this only happens if we explicitly retrieve the
# arguments.
#
# If we want to change when the exception is thrown, this line can be
# removed.
_ = resolver.arguments
@property # type: ignore
def type(self) -> Union[StrawberryType, type, Literal[UNRESOLVED]]: # type: ignore
# We are catching NameError because dataclasses tries to fetch the type
# of the field from the class before the class is fully defined.
# This triggers a NameError error when using forward references because
# our `type` property tries to find the field type from the global namespace
# but it is not yet defined.
try:
# Prioritise the field type over the resolver return type
if self.type_annotation is not None:
return self.type_annotation.resolve()
if self.base_resolver is not None:
# Handle unannotated functions (such as lambdas)
if self.base_resolver.type is not None:
# Generics will raise MissingTypesForGenericError later
# on if we let it be returned. So use `type_annotation` instead
# which is the same behaviour as having no type information.
if not _is_generic(self.base_resolver.type):
return self.base_resolver.type
# If we get this far it means that we don't have a field type and
# the resolver doesn't have a return type so all we can do is return
# UNRESOLVED here.
# This case will raise a MissingReturnAnnotationError exception in the
# _check_field_annotations function:
# https://github.com/strawberry-graphql/strawberry/blob/846f060a63cb568b3cdc0deb26c308a8d0718190/strawberry/object_type.py#L76-L80
return UNRESOLVED
except NameError:
return UNRESOLVED
@type.setter
def type(self, type_: Any) -> None:
# Note: we aren't setting a namespace here for the annotation. That
# happens in the `_get_fields` function in `types/type_resolver` so
# that we have access to the correct namespace for the object type
# the field is attached to.
self.type_annotation = StrawberryAnnotation.from_annotation(
type_, namespace=None
)
# TODO: add this to arguments (and/or move it to StrawberryType)
@property
def type_params(self) -> List[TypeVar]:
if hasattr(self.type, "_type_definition"):
parameters = getattr(self.type, "__parameters__", None)
return list(parameters) if parameters else []
# TODO: Consider making leaf types always StrawberryTypes, maybe a
# StrawberryBaseType or something
if isinstance(self.type, StrawberryType):
return self.type.type_params
return []
def copy_with(
self, type_var_map: Mapping[TypeVar, Union[StrawberryType, builtins.type]]
) -> StrawberryField:
new_type: Union[StrawberryType, type] = self.type
# TODO: Remove with creation of StrawberryObject. Will act same as other
# StrawberryTypes
if hasattr(self.type, "_type_definition"):
type_definition: TypeDefinition = self.type._type_definition
if type_definition.is_generic:
type_ = type_definition
new_type = type_.copy_with(type_var_map)
elif isinstance(self.type, StrawberryType):
new_type = self.type.copy_with(type_var_map)
new_resolver = (
self.base_resolver.copy_with(type_var_map)
if self.base_resolver is not None
else None
)
return StrawberryField(
python_name=self.python_name,
graphql_name=self.graphql_name,
# TODO: do we need to wrap this in `StrawberryAnnotation`?
# see comment related to dataclasses above
type_annotation=StrawberryAnnotation(new_type),
origin=self.origin,
is_subscription=self.is_subscription,
description=self.description,
base_resolver=new_resolver,
permission_classes=self.permission_classes,
default=self.default_value,
# ignored because of https://github.com/python/mypy/issues/6910
default_factory=self.default_factory,
deprecation_reason=self.deprecation_reason,
)
@property
def _has_async_permission_classes(self) -> bool:
for permission_class in self.permission_classes:
if inspect.iscoroutinefunction(permission_class.has_permission):
return True
return False
@property
def _has_async_base_resolver(self) -> bool:
return self.base_resolver is not None and self.base_resolver.is_async
@cached_property
def is_async(self) -> bool:
return self._has_async_permission_classes or self._has_async_base_resolver
@overload
def field(
*,
resolver: _RESOLVER_TYPE[T],
name: Optional[str] = None,
is_subscription: bool = False,
description: Optional[str] = None,
init: Literal[False] = False,
permission_classes: Optional[List[Type[BasePermission]]] = None,
deprecation_reason: Optional[str] = None,
default: Any = dataclasses.MISSING,
default_factory: Union[Callable[..., object], object] = dataclasses.MISSING,
metadata: Optional[Mapping[Any, Any]] = None,
directives: Optional[Sequence[object]] = (),
extensions: Optional[List[FieldExtension]] = None,
graphql_type: Optional[Any] = None,
) -> T:
...
@overload
def field(
*,
name: Optional[str] = None,
is_subscription: bool = False,
description: Optional[str] = None,
init: Literal[True] = True,
permission_classes: Optional[List[Type[BasePermission]]] = None,
deprecation_reason: Optional[str] = None,
default: Any = dataclasses.MISSING,
default_factory: Union[Callable[..., object], object] = dataclasses.MISSING,
metadata: Optional[Mapping[Any, Any]] = None,
directives: Optional[Sequence[object]] = (),
extensions: Optional[List[FieldExtension]] = None,
graphql_type: Optional[Any] = None,
) -> Any:
...
@overload
def field(
resolver: _RESOLVER_TYPE[T],
*,
name: Optional[str] = None,
is_subscription: bool = False,
description: Optional[str] = None,
permission_classes: Optional[List[Type[BasePermission]]] = None,
deprecation_reason: Optional[str] = None,
default: Any = dataclasses.MISSING,
default_factory: Union[Callable[..., object], object] = dataclasses.MISSING,
metadata: Optional[Mapping[Any, Any]] = None,
directives: Optional[Sequence[object]] = (),
extensions: Optional[List[FieldExtension]] = None,
graphql_type: Optional[Any] = None,
) -> StrawberryField:
...
def field(
resolver: Optional[_RESOLVER_TYPE[Any]] = None,
*,
name: Optional[str] = None,
is_subscription: bool = False,
description: Optional[str] = None,
permission_classes: Optional[List[Type[BasePermission]]] = None,
deprecation_reason: Optional[str] = None,
default: Any = dataclasses.MISSING,
default_factory: Union[Callable[..., object], object] = dataclasses.MISSING,
metadata: Optional[Mapping[Any, Any]] = None,
directives: Optional[Sequence[object]] = (),
extensions: Optional[List[FieldExtension]] = None,
graphql_type: Optional[Any] = None,
# This init parameter is used by PyRight to determine whether this field
# is added in the constructor or not. It is not used to change
# any behavior at the moment.
init: Literal[True, False, None] = None,
) -> Any:
"""Annotates a method or property as a GraphQL field.
This is normally used inside a type declaration:
>>> @strawberry.type:
>>> class X:
>>> field_abc: str = strawberry.field(description="ABC")
>>> @strawberry.field(description="ABC")
>>> def field_with_resolver(self) -> str:
>>> return "abc"
it can be used both as decorator and as a normal function.
"""
type_annotation = StrawberryAnnotation.from_annotation(graphql_type)
field_ = StrawberryField(
python_name=None,
graphql_name=name,
type_annotation=type_annotation,
description=description,
is_subscription=is_subscription,
permission_classes=permission_classes or [],
deprecation_reason=deprecation_reason,
default=default,
default_factory=default_factory,
metadata=metadata,
directives=directives or (),
extensions=extensions or [],
)
if resolver:
assert init is not True, "Can't set init as True when passing a resolver."
return field_(resolver)
return field_
__all__ = ["StrawberryField", "field"] | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/field.py | field.py |
import importlib
import inspect
import sys
import warnings
from dataclasses import dataclass
from pathlib import Path
from typing import ForwardRef, Generic, Optional, Type, TypeVar, cast
TypeName = TypeVar("TypeName")
Module = TypeVar("Module")
@dataclass(frozen=True)
class LazyType(Generic[TypeName, Module]):
type_name: str
module: str
package: Optional[str] = None
def __class_getitem__(cls, params):
warnings.warn(
(
"LazyType is deprecated, use "
"Annotated[YourType, strawberry.lazy(path)] instead"
),
DeprecationWarning,
stacklevel=2,
)
type_name, module = params
package = None
if module.startswith("."):
current_frame = inspect.currentframe()
assert current_frame is not None
assert current_frame.f_back is not None
package = current_frame.f_back.f_globals["__package__"]
return cls(type_name, module, package)
def resolve_type(self) -> Type:
module = importlib.import_module(self.module, self.package)
main_module = sys.modules.get("__main__", None)
if main_module:
# If lazy type points to the main module, use it instead of the imported
# module. Otherwise duplication checks during schema-conversion might fail.
# Refer to: https://github.com/strawberry-graphql/strawberry/issues/2397
if main_module.__spec__ and main_module.__spec__.name == self.module:
module = main_module
elif hasattr(main_module, "__file__") and hasattr(module, "__file__"):
main_file = main_module.__file__
module_file = module.__file__
if main_file and module_file:
try:
is_samefile = Path(main_file).samefile(module_file)
except FileNotFoundError:
# Can be raised when run through the CLI as the __main__ file
# path contains `strawberry.exe`
is_samefile = False
module = main_module if is_samefile else module
return module.__dict__[self.type_name]
# this empty call method allows LazyTypes to be used in generic types
# for example: List[LazyType["A", "module"]]
def __call__(self): # pragma: no cover
return None
class StrawberryLazyReference:
def __init__(self, module: str) -> None:
self.module = module
self.package = None
if module.startswith("."):
frame = inspect.stack()[2][0]
# TODO: raise a nice error if frame is None
assert frame is not None
self.package = cast(str, frame.f_globals["__package__"])
def resolve_forward_ref(self, forward_ref: ForwardRef) -> LazyType:
return LazyType(forward_ref.__forward_arg__, self.module, self.package)
def lazy(module_path: str) -> StrawberryLazyReference:
return StrawberryLazyReference(module_path) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/lazy_type.py | lazy_type.py |
from __future__ import annotations
import dataclasses
from typing import TYPE_CHECKING, Any, Callable, List, Optional, TypeVar
from typing_extensions import Annotated
from graphql import DirectiveLocation
from strawberry.field import StrawberryField
from strawberry.types.fields.resolver import (
INFO_PARAMSPEC,
ReservedType,
StrawberryResolver,
)
from strawberry.unset import UNSET
from strawberry.utils.cached_property import cached_property
if TYPE_CHECKING:
import inspect
from strawberry.arguments import StrawberryArgument
def directive_field(name: str, default: object = UNSET) -> Any:
return StrawberryField(
python_name=None,
graphql_name=name,
default=default,
)
T = TypeVar("T")
class StrawberryDirectiveValue:
...
DirectiveValue = Annotated[T, StrawberryDirectiveValue()]
DirectiveValue.__doc__ = (
"""Represents the ``value`` argument for a GraphQL query directive."""
)
# Registers `DirectiveValue[...]` annotated arguments as reserved
VALUE_PARAMSPEC = ReservedType(name="value", type=StrawberryDirectiveValue)
class StrawberryDirectiveResolver(StrawberryResolver[T]):
RESERVED_PARAMSPEC = (
INFO_PARAMSPEC,
VALUE_PARAMSPEC,
)
@cached_property
def value_parameter(self) -> Optional[inspect.Parameter]:
return self.reserved_parameters.get(VALUE_PARAMSPEC)
@dataclasses.dataclass
class StrawberryDirective:
python_name: str
graphql_name: Optional[str]
resolver: StrawberryDirectiveResolver
locations: List[DirectiveLocation]
description: Optional[str] = None
@cached_property
def arguments(self) -> List[StrawberryArgument]:
return self.resolver.arguments
def directive(
*,
locations: List[DirectiveLocation],
description: Optional[str] = None,
name: Optional[str] = None,
) -> Callable[[Callable[..., T]], T]:
def _wrap(f: Callable[..., T]) -> T:
return StrawberryDirective( # type: ignore
python_name=f.__name__,
graphql_name=name,
locations=locations,
description=description,
resolver=StrawberryDirectiveResolver(f),
)
return _wrap
__all__ = ["DirectiveLocation", "StrawberryDirective", "directive"] | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/directive.py | directive.py |
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, List, Mapping, TypeVar, Union
if TYPE_CHECKING:
from .types.types import TypeDefinition
class StrawberryType(ABC):
@property
def type_params(self) -> List[TypeVar]:
return []
@abstractmethod
def copy_with(
self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]
) -> Union[StrawberryType, type]:
raise NotImplementedError()
@property
@abstractmethod
def is_generic(self) -> bool:
raise NotImplementedError()
def has_generic(self, type_var: TypeVar) -> bool:
return False
def __eq__(self, other: object) -> bool:
from strawberry.annotation import StrawberryAnnotation
if isinstance(other, StrawberryType):
return self is other
elif isinstance(other, StrawberryAnnotation):
return self == other.resolve()
else:
# This could be simplified if StrawberryAnnotation.resolve() always returned
# a StrawberryType
resolved = StrawberryAnnotation(other).resolve()
if isinstance(resolved, StrawberryType):
return self == resolved
else:
return NotImplemented
def __hash__(self) -> int:
# TODO: Is this a bad idea? __eq__ objects are supposed to have the same hash
return id(self)
class StrawberryContainer(StrawberryType):
def __init__(self, of_type: Union[StrawberryType, type]):
self.of_type = of_type
def __hash__(self) -> int:
return hash((self.__class__, self.of_type))
def __eq__(self, other: object) -> bool:
if isinstance(other, StrawberryType):
if isinstance(other, StrawberryContainer):
return self.of_type == other.of_type
else:
return False
return super().__eq__(other)
@property
def type_params(self) -> List[TypeVar]:
if hasattr(self.of_type, "_type_definition"):
parameters = getattr(self.of_type, "__parameters__", None)
return list(parameters) if parameters else []
elif isinstance(self.of_type, StrawberryType):
return self.of_type.type_params
else:
return []
def copy_with(
self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]
) -> StrawberryType:
of_type_copy: Union[StrawberryType, type] = self.of_type
# TODO: Obsolete with StrawberryObject
if hasattr(self.of_type, "_type_definition"):
type_definition: TypeDefinition = self.of_type._type_definition
if type_definition.is_generic:
of_type_copy = type_definition.copy_with(type_var_map)
elif isinstance(self.of_type, StrawberryType) and self.of_type.is_generic:
of_type_copy = self.of_type.copy_with(type_var_map)
return type(self)(of_type_copy)
@property
def is_generic(self) -> bool:
# TODO: Obsolete with StrawberryObject
type_ = self.of_type
if hasattr(self.of_type, "_type_definition"):
type_ = self.of_type._type_definition
if isinstance(type_, StrawberryType):
return type_.is_generic
return False
def has_generic(self, type_var: TypeVar) -> bool:
if isinstance(self.of_type, StrawberryType):
return self.of_type.has_generic(type_var)
return False
class StrawberryList(StrawberryContainer):
...
class StrawberryOptional(StrawberryContainer):
...
class StrawberryTypeVar(StrawberryType):
def __init__(self, type_var: TypeVar):
self.type_var = type_var
def copy_with(
self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]
) -> Union[StrawberryType, type]:
return type_var_map[self.type_var]
@property
def is_generic(self) -> bool:
return True
def has_generic(self, type_var: TypeVar) -> bool:
return self.type_var == type_var
@property
def type_params(self) -> List[TypeVar]:
return [self.type_var]
def __eq__(self, other: object) -> bool:
if isinstance(other, StrawberryTypeVar):
return self.type_var == other.type_var
if isinstance(other, TypeVar):
return self.type_var == other
return super().__eq__(other) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/type.py | type.py |
import dataclasses
from enum import EnumMeta
from typing import (
Any,
Callable,
Iterable,
List,
Mapping,
Optional,
TypeVar,
Union,
overload,
)
from strawberry.type import StrawberryType
from .exceptions import ObjectIsNotAnEnumError
@dataclasses.dataclass
class EnumValue:
name: str
value: Any
deprecation_reason: Optional[str] = None
directives: Iterable[object] = ()
description: Optional[str] = None
@dataclasses.dataclass
class EnumDefinition(StrawberryType):
wrapped_cls: EnumMeta
name: str
values: List[EnumValue]
description: Optional[str]
directives: Iterable[object] = ()
def __hash__(self) -> int:
# TODO: Is this enough for unique-ness?
return hash(self.name)
def copy_with(
self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]
) -> Union[StrawberryType, type]:
# enum don't support type parameters, so we can safely return self
return self
@property
def is_generic(self) -> bool:
return False
# TODO: remove duplication of EnumValueDefinition and EnumValue
@dataclasses.dataclass
class EnumValueDefinition:
value: Any
deprecation_reason: Optional[str] = None
directives: Iterable[object] = ()
description: Optional[str] = None
def enum_value(
value: Any,
deprecation_reason: Optional[str] = None,
directives: Iterable[object] = (),
description: Optional[str] = None,
) -> EnumValueDefinition:
return EnumValueDefinition(
value=value,
deprecation_reason=deprecation_reason,
directives=directives,
description=description,
)
EnumType = TypeVar("EnumType", bound=EnumMeta)
def _process_enum(
cls: EnumType,
name: Optional[str] = None,
description: Optional[str] = None,
directives: Iterable[object] = (),
) -> EnumType:
if not isinstance(cls, EnumMeta):
raise ObjectIsNotAnEnumError(cls)
if not name:
name = cls.__name__
description = description
values = []
for item in cls: # type: ignore
item_value = item.value
item_name = item.name
deprecation_reason = None
item_directives: Iterable[object] = ()
enum_value_description = None
if isinstance(item_value, EnumValueDefinition):
item_directives = item_value.directives
enum_value_description = item_value.description
deprecation_reason = item_value.deprecation_reason
item_value = item_value.value
# update _value2member_map_ so that doing `MyEnum.MY_VALUE` and
# `MyEnum['MY_VALUE']` both work
cls._value2member_map_[item_value] = item
cls._member_map_[item_name]._value_ = item_value
value = EnumValue(
item_name,
item_value,
deprecation_reason=deprecation_reason,
directives=item_directives,
description=enum_value_description,
)
values.append(value)
cls._enum_definition = EnumDefinition( # type: ignore
wrapped_cls=cls,
name=name,
values=values,
description=description,
directives=directives,
)
return cls
@overload
def enum(
_cls: EnumType,
*,
name: Optional[str] = None,
description: Optional[str] = None,
directives: Iterable[object] = ()
) -> EnumType:
...
@overload
def enum(
_cls: None = None,
*,
name: Optional[str] = None,
description: Optional[str] = None,
directives: Iterable[object] = ()
) -> Callable[[EnumType], EnumType]:
...
def enum(
_cls: Optional[EnumType] = None,
*,
name: Optional[str] = None,
description: Optional[str] = None,
directives: Iterable[object] = ()
) -> Union[EnumType, Callable[[EnumType], EnumType]]:
"""Registers the enum in the GraphQL type system.
If name is passed, the name of the GraphQL type will be
the value passed of name instead of the Enum class name.
"""
def wrap(cls: EnumType) -> EnumType:
return _process_enum(cls, name, description, directives=directives)
if not _cls:
return wrap
return wrap(_cls) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/enum.py | enum.py |
from __future__ import annotations
import inspect
import warnings
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Mapping,
Optional,
Union,
cast,
)
from typing_extensions import Annotated, get_args, get_origin
from strawberry.annotation import StrawberryAnnotation
from strawberry.enum import EnumDefinition
from strawberry.lazy_type import LazyType, StrawberryLazyReference
from strawberry.type import StrawberryList, StrawberryOptional
from .exceptions import MultipleStrawberryArgumentsError, UnsupportedTypeError
from .scalars import is_scalar
from .unset import UNSET as _deprecated_UNSET
from .unset import _deprecated_is_unset # noqa
if TYPE_CHECKING:
from strawberry.custom_scalar import ScalarDefinition, ScalarWrapper
from strawberry.schema.config import StrawberryConfig
from strawberry.type import StrawberryType
from .types.types import TypeDefinition
DEPRECATED_NAMES: Dict[str, str] = {
"UNSET": (
"importing `UNSET` from `strawberry.arguments` is deprecated, "
"import instead from `strawberry` or from `strawberry.unset`"
),
"is_unset": "`is_unset` is deprecated use `value is UNSET` instead",
}
class StrawberryArgumentAnnotation:
description: Optional[str]
name: Optional[str]
deprecation_reason: Optional[str]
directives: Iterable[object]
def __init__(
self,
description: Optional[str] = None,
name: Optional[str] = None,
deprecation_reason: Optional[str] = None,
directives: Iterable[object] = (),
):
self.description = description
self.name = name
self.deprecation_reason = deprecation_reason
self.directives = directives
class StrawberryArgument:
def __init__(
self,
python_name: str,
graphql_name: Optional[str],
type_annotation: StrawberryAnnotation,
is_subscription: bool = False,
description: Optional[str] = None,
default: object = _deprecated_UNSET,
deprecation_reason: Optional[str] = None,
directives: Iterable[object] = (),
) -> None:
self.python_name = python_name
self.graphql_name = graphql_name
self.is_subscription = is_subscription
self.description = description
self._type: Optional[StrawberryType] = None
self.type_annotation = type_annotation
self.deprecation_reason = deprecation_reason
self.directives = directives
# TODO: Consider moving this logic to a function
self.default = (
_deprecated_UNSET if default is inspect.Parameter.empty else default
)
if self._annotation_is_annotated(type_annotation):
self._parse_annotated()
@property
def type(self) -> Union[StrawberryType, type]:
return self.type_annotation.resolve()
@classmethod
def _annotation_is_annotated(cls, annotation: StrawberryAnnotation) -> bool:
return get_origin(annotation.annotation) is Annotated
def _parse_annotated(self):
annotated_args = get_args(self.type_annotation.annotation)
# The first argument to Annotated is always the underlying type
self.type_annotation = StrawberryAnnotation(annotated_args[0])
# Find any instances of StrawberryArgumentAnnotation
# in the other Annotated args, raising an exception if there
# are multiple StrawberryArgumentAnnotations
argument_annotation_seen = False
for arg in annotated_args[1:]:
if isinstance(arg, StrawberryArgumentAnnotation):
if argument_annotation_seen:
raise MultipleStrawberryArgumentsError(
argument_name=self.python_name
)
argument_annotation_seen = True
self.description = arg.description
self.graphql_name = arg.name
self.deprecation_reason = arg.deprecation_reason
self.directives = arg.directives
if isinstance(arg, StrawberryLazyReference):
self.type_annotation = StrawberryAnnotation(
arg.resolve_forward_ref(annotated_args[0])
)
def convert_argument(
value: object,
type_: Union[StrawberryType, type],
scalar_registry: Dict[object, Union[ScalarWrapper, ScalarDefinition]],
config: StrawberryConfig,
) -> object:
# TODO: move this somewhere else and make it first class
if value is None:
return None
if value is _deprecated_UNSET:
return _deprecated_UNSET
if isinstance(type_, StrawberryOptional):
return convert_argument(value, type_.of_type, scalar_registry, config)
if isinstance(type_, StrawberryList):
value_list = cast(Iterable, value)
return [
convert_argument(x, type_.of_type, scalar_registry, config)
for x in value_list
]
if is_scalar(type_, scalar_registry):
return value
if isinstance(type_, EnumDefinition):
return value
if isinstance(type_, LazyType):
return convert_argument(value, type_.resolve_type(), scalar_registry, config)
if hasattr(type_, "_enum_definition"):
enum_definition: EnumDefinition = type_._enum_definition
return convert_argument(value, enum_definition, scalar_registry, config)
if hasattr(type_, "_type_definition"): # TODO: Replace with StrawberryInputObject
type_definition: TypeDefinition = type_._type_definition
kwargs = {}
for field in type_definition.fields:
value = cast(Mapping, value)
graphql_name = config.name_converter.from_field(field)
if graphql_name in value:
kwargs[field.python_name] = convert_argument(
value[graphql_name], field.type, scalar_registry, config
)
type_ = cast(type, type_)
return type_(**kwargs)
raise UnsupportedTypeError(type_)
def convert_arguments(
value: Dict[str, Any],
arguments: List[StrawberryArgument],
scalar_registry: Dict[object, Union[ScalarWrapper, ScalarDefinition]],
config: StrawberryConfig,
) -> Dict[str, Any]:
"""Converts a nested dictionary to a dictionary of actual types.
It deals with conversion of input types to proper dataclasses and
also uses a sentinel value for unset values."""
if not arguments:
return {}
kwargs = {}
for argument in arguments:
assert argument.python_name
name = config.name_converter.from_argument(argument)
if name in value:
current_value = value[name]
kwargs[argument.python_name] = convert_argument(
value=current_value,
type_=argument.type,
config=config,
scalar_registry=scalar_registry,
)
return kwargs
def argument(
description: Optional[str] = None,
name: Optional[str] = None,
deprecation_reason: Optional[str] = None,
directives: Iterable[object] = (),
) -> StrawberryArgumentAnnotation:
return StrawberryArgumentAnnotation(
description=description,
name=name,
deprecation_reason=deprecation_reason,
directives=directives,
)
def __getattr__(name: str) -> Any:
if name in DEPRECATED_NAMES:
warnings.warn(DEPRECATED_NAMES[name], DeprecationWarning, stacklevel=2)
return globals()[f"_deprecated_{name}"]
raise AttributeError(f"module {__name__} has no attribute {name}")
# TODO: check exports
__all__ = [ # noqa: F822
"StrawberryArgument",
"StrawberryArgumentAnnotation",
"UNSET", # for backwards compatibility
"argument",
"is_unset", # for backwards compatibility
] | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/arguments.py | arguments.py |
from __future__ import annotations
import sys
import typing
from collections import abc
from enum import Enum
from typing import (
TYPE_CHECKING,
Any,
Dict,
ForwardRef,
Optional,
TypeVar,
Union,
cast,
)
from typing_extensions import Annotated, Self, get_args, get_origin
from strawberry.custom_scalar import ScalarDefinition
from strawberry.enum import EnumDefinition
from strawberry.exceptions.not_a_strawberry_enum import NotAStrawberryEnumError
from strawberry.lazy_type import LazyType
from strawberry.private import is_private
from strawberry.type import StrawberryList, StrawberryOptional, StrawberryTypeVar
from strawberry.types.types import TypeDefinition
from strawberry.unset import UNSET
from strawberry.utils.typing import (
eval_type,
is_generic,
is_type_var,
)
if TYPE_CHECKING:
from strawberry.field import StrawberryField
from strawberry.type import StrawberryType
from strawberry.union import StrawberryUnion
ASYNC_TYPES = (
abc.AsyncGenerator,
abc.AsyncIterable,
abc.AsyncIterator,
typing.AsyncContextManager,
typing.AsyncGenerator,
typing.AsyncIterable,
typing.AsyncIterator,
)
class StrawberryAnnotation:
def __init__(
self, annotation: Union[object, str], *, namespace: Optional[Dict] = None
):
self.annotation = annotation
self.namespace = namespace
def __eq__(self, other: object) -> bool:
if not isinstance(other, StrawberryAnnotation):
return NotImplemented
return self.resolve() == other.resolve()
@staticmethod
def from_annotation(
annotation: object, namespace: Optional[Dict] = None
) -> Optional[StrawberryAnnotation]:
if annotation is None:
return None
if not isinstance(annotation, StrawberryAnnotation):
return StrawberryAnnotation(annotation, namespace=namespace)
return annotation
def resolve(self) -> Union[StrawberryType, type]:
annotation = self.annotation
if isinstance(annotation, str):
annotation = ForwardRef(annotation)
evaled_type = eval_type(annotation, self.namespace, None)
if is_private(evaled_type):
return evaled_type
if get_origin(evaled_type) is Annotated:
evaled_type = get_args(evaled_type)[0]
if self._is_async_type(evaled_type):
evaled_type = self._strip_async_type(evaled_type)
if self._is_lazy_type(evaled_type):
return evaled_type
if self._is_generic(evaled_type):
if any(is_type_var(type_) for type_ in evaled_type.__args__):
return evaled_type
return self.create_concrete_type(evaled_type)
# Simply return objects that are already StrawberryTypes
if self._is_strawberry_type(evaled_type):
return evaled_type
# Everything remaining should be a raw annotation that needs to be turned into
# a StrawberryType
if self._is_enum(evaled_type):
return self.create_enum(evaled_type)
if self._is_list(evaled_type):
return self.create_list(evaled_type)
elif self._is_optional(evaled_type):
return self.create_optional(evaled_type)
elif self._is_union(evaled_type):
return self.create_union(evaled_type)
elif is_type_var(evaled_type) or evaled_type is Self:
return self.create_type_var(cast(TypeVar, evaled_type))
# TODO: Raise exception now, or later?
# ... raise NotImplementedError(f"Unknown type {evaled_type}")
return evaled_type
def set_namespace_from_field(self, field: StrawberryField) -> None:
module = sys.modules[field.origin.__module__]
self.namespace = module.__dict__
def create_concrete_type(self, evaled_type: type) -> type:
if _is_object_type(evaled_type):
type_definition: TypeDefinition
type_definition = evaled_type._type_definition # type: ignore
return type_definition.resolve_generic(evaled_type)
raise ValueError(f"Not supported {evaled_type}")
def create_enum(self, evaled_type: Any) -> EnumDefinition:
try:
return evaled_type._enum_definition
except AttributeError:
raise NotAStrawberryEnumError(evaled_type)
def create_list(self, evaled_type: Any) -> StrawberryList:
of_type = StrawberryAnnotation(
annotation=evaled_type.__args__[0],
namespace=self.namespace,
).resolve()
return StrawberryList(of_type)
def create_optional(self, evaled_type: Any) -> StrawberryOptional:
types = evaled_type.__args__
non_optional_types = tuple(
filter(
lambda x: x is not type(None) and x is not type(UNSET),
types,
)
)
# Note that passing a single type to `Union` is equivalent to not using `Union`
# at all. This allows us to not di any checks for how many types have been
# passed as we can safely use `Union` for both optional types
# (e.g. `Optional[str]`) and optional unions (e.g.
# `Optional[Union[TypeA, TypeB]]`)
child_type = Union[non_optional_types] # type: ignore
of_type = StrawberryAnnotation(
annotation=child_type,
namespace=self.namespace,
).resolve()
return StrawberryOptional(of_type)
def create_type_var(self, evaled_type: TypeVar) -> StrawberryTypeVar:
return StrawberryTypeVar(evaled_type)
def create_union(self, evaled_type) -> StrawberryUnion:
# Prevent import cycles
from strawberry.union import StrawberryUnion
# TODO: Deal with Forward References/origin
if isinstance(evaled_type, StrawberryUnion):
return evaled_type
types = evaled_type.__args__
union = StrawberryUnion(
type_annotations=tuple(StrawberryAnnotation(type_) for type_ in types),
)
return union
@classmethod
def _is_async_type(cls, annotation: type) -> bool:
origin = getattr(annotation, "__origin__", None)
return origin in ASYNC_TYPES
@classmethod
def _is_enum(cls, annotation: Any) -> bool:
# Type aliases are not types so we need to make sure annotation can go into
# issubclass
if not isinstance(annotation, type):
return False
return issubclass(annotation, Enum)
@classmethod
def _is_generic(cls, annotation: Any) -> bool:
if hasattr(annotation, "__origin__"):
return is_generic(annotation.__origin__)
return False
@classmethod
def _is_lazy_type(cls, annotation: Any) -> bool:
return isinstance(annotation, LazyType)
@classmethod
def _is_optional(cls, annotation: Any) -> bool:
"""Returns True if the annotation is Optional[SomeType]"""
# Optionals are represented as unions
if not cls._is_union(annotation):
return False
types = annotation.__args__
# A Union to be optional needs to have at least one None type
return any(x is type(None) for x in types)
@classmethod
def _is_list(cls, annotation: Any) -> bool:
"""Returns True if annotation is a List"""
annotation_origin = getattr(annotation, "__origin__", None)
return (annotation_origin in (list, tuple)) or annotation_origin is abc.Sequence
@classmethod
def _is_strawberry_type(cls, evaled_type: Any) -> bool:
# Prevent import cycles
from strawberry.union import StrawberryUnion
if isinstance(evaled_type, EnumDefinition):
return True
elif _is_input_type(evaled_type): # TODO: Replace with StrawberryInputObject
return True
# TODO: add support for StrawberryInterface when implemented
elif isinstance(evaled_type, StrawberryList):
return True
elif _is_object_type(evaled_type): # TODO: Replace with StrawberryObject
return True
elif isinstance(evaled_type, TypeDefinition):
return True
elif isinstance(evaled_type, StrawberryOptional):
return True
elif isinstance(
evaled_type, ScalarDefinition
): # TODO: Replace with StrawberryScalar
return True
elif isinstance(evaled_type, StrawberryUnion):
return True
return False
@classmethod
def _is_union(cls, annotation: Any) -> bool:
"""Returns True if annotation is a Union"""
# this check is needed because unions declared with the new syntax `A | B`
# don't have a `__origin__` property on them, but they are instances of
# `UnionType`, which is only available in Python 3.10+
if sys.version_info >= (3, 10):
from types import UnionType
if isinstance(annotation, UnionType):
return True
# unions declared as Union[A, B] fall through to this check
# even on python 3.10+
annotation_origin = getattr(annotation, "__origin__", None)
return annotation_origin is typing.Union
@classmethod
def _strip_async_type(cls, annotation) -> type:
return annotation.__args__[0]
@classmethod
def _strip_lazy_type(cls, annotation: LazyType) -> type:
return annotation.resolve_type()
################################################################################
# Temporary functions to be removed with new types
################################################################################
def _is_input_type(type_: Any) -> bool:
if not _is_object_type(type_):
return False
return type_._type_definition.is_input
def _is_object_type(type_: Any) -> bool:
return hasattr(type_, "_type_definition") | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/annotation.py | annotation.py |
from __future__ import annotations
import dataclasses
from abc import ABC, abstractmethod
from asyncio import create_task, gather, get_event_loop
from asyncio.futures import Future
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Dict,
Generic,
Hashable,
Iterable,
List,
Mapping,
Optional,
Sequence,
TypeVar,
Union,
overload,
)
from .exceptions import WrongNumberOfResultsReturned
if TYPE_CHECKING:
from asyncio.events import AbstractEventLoop
T = TypeVar("T")
K = TypeVar("K")
@dataclass
class LoaderTask(Generic[K, T]):
key: K
future: Future
@dataclass
class Batch(Generic[K, T]):
tasks: List[LoaderTask] = dataclasses.field(default_factory=list)
dispatched: bool = False
def add_task(self, key: Any, future: Future) -> None:
task = LoaderTask[K, T](key, future)
self.tasks.append(task)
def __len__(self) -> int:
return len(self.tasks)
class AbstractCache(Generic[K, T], ABC):
@abstractmethod
def get(self, key: K) -> Union[Future[T], None]:
pass
@abstractmethod
def set(self, key: K, value: Future[T]) -> None:
pass
@abstractmethod
def delete(self, key: K) -> None:
pass
@abstractmethod
def clear(self) -> None:
pass
class DefaultCache(AbstractCache[K, T]):
def __init__(self, cache_key_fn: Optional[Callable[[K], Hashable]] = None) -> None:
self.cache_key_fn: Callable[[K], Hashable] = (
cache_key_fn if cache_key_fn is not None else lambda x: x
)
self.cache_map: Dict[Hashable, Future[T]] = {}
def get(self, key: K) -> Union[Future[T], None]:
return self.cache_map.get(self.cache_key_fn(key))
def set(self, key: K, value: Future[T]) -> None:
self.cache_map[self.cache_key_fn(key)] = value
def delete(self, key: K) -> None:
del self.cache_map[self.cache_key_fn(key)]
def clear(self) -> None:
self.cache_map.clear()
class DataLoader(Generic[K, T]):
batch: Optional[Batch[K, T]] = None
cache: bool = False
cache_map: AbstractCache[K, T]
@overload
def __init__(
self,
# any BaseException is rethrown in 'load', so should be excluded from the T type
load_fn: Callable[[List[K]], Awaitable[Sequence[Union[T, BaseException]]]],
max_batch_size: Optional[int] = None,
cache: bool = True,
loop: Optional[AbstractEventLoop] = None,
cache_map: Optional[AbstractCache[K, T]] = None,
cache_key_fn: Optional[Callable[[K], Hashable]] = None,
) -> None:
...
# fallback if load_fn is untyped and there's no other info for inference
@overload
def __init__(
self: DataLoader[K, Any],
load_fn: Callable[[List[K]], Awaitable[List[Any]]],
max_batch_size: Optional[int] = None,
cache: bool = True,
loop: Optional[AbstractEventLoop] = None,
cache_map: Optional[AbstractCache[K, T]] = None,
cache_key_fn: Optional[Callable[[K], Hashable]] = None,
) -> None:
...
def __init__(
self,
load_fn: Callable[[List[K]], Awaitable[Sequence[Union[T, BaseException]]]],
max_batch_size: Optional[int] = None,
cache: bool = True,
loop: Optional[AbstractEventLoop] = None,
cache_map: Optional[AbstractCache[K, T]] = None,
cache_key_fn: Optional[Callable[[K], Hashable]] = None,
):
self.load_fn = load_fn
self.max_batch_size = max_batch_size
self._loop = loop
self.cache = cache
if self.cache:
self.cache_map = (
DefaultCache(cache_key_fn) if cache_map is None else cache_map
)
@property
def loop(self) -> AbstractEventLoop:
if self._loop is None:
self._loop = get_event_loop()
return self._loop
def load(self, key: K) -> Awaitable[T]:
if self.cache:
future = self.cache_map.get(key)
if future and not future.cancelled():
return future
future = self.loop.create_future()
if self.cache:
self.cache_map.set(key, future)
batch = get_current_batch(self)
batch.add_task(key, future)
return future
def load_many(self, keys: Iterable[K]) -> Awaitable[List[T]]:
return gather(*map(self.load, keys))
def clear(self, key: K) -> None:
if self.cache:
self.cache_map.delete(key)
def clear_many(self, keys: Iterable[K]) -> None:
if self.cache:
for key in keys:
self.cache_map.delete(key)
def clear_all(self) -> None:
if self.cache:
self.cache_map.clear()
def prime(self, key: K, value: T, force: bool = False) -> None:
self.prime_many({key: value}, force)
def prime_many(self, data: Mapping[K, T], force: bool = False) -> None:
# Populate the cache with the specified values
if self.cache:
for key, value in data.items():
if not self.cache_map.get(key) or force:
future: Future = Future(loop=self.loop)
future.set_result(value)
self.cache_map.set(key, future)
# For keys that are pending on the current batch, but the
# batch hasn't started fetching yet: Remove it from the
# batch and set to the specified value
if self.batch is not None and not self.batch.dispatched:
batch_updated = False
for task in self.batch.tasks:
if task.key in data:
batch_updated = True
task.future.set_result(data[task.key])
if batch_updated:
self.batch.tasks = [
task for task in self.batch.tasks if not task.future.done()
]
def should_create_new_batch(loader: DataLoader, batch: Batch) -> bool:
if (
batch.dispatched
or loader.max_batch_size
and len(batch) >= loader.max_batch_size
):
return True
return False
def get_current_batch(loader: DataLoader) -> Batch:
if loader.batch and not should_create_new_batch(loader, loader.batch):
return loader.batch
loader.batch = Batch()
dispatch(loader, loader.batch)
return loader.batch
def dispatch(loader: DataLoader, batch: Batch) -> None:
loader.loop.call_soon(create_task, dispatch_batch(loader, batch))
async def dispatch_batch(loader: DataLoader, batch: Batch) -> None:
batch.dispatched = True
keys = [task.key for task in batch.tasks]
if len(keys) == 0:
# Ensure batch is not empty
# Unlikely, but could happen if the tasks are
# overriden with preset values
return
# TODO: check if load_fn return an awaitable and it is a list
try:
values = await loader.load_fn(keys)
values = list(values)
if len(values) != len(batch):
raise WrongNumberOfResultsReturned(
expected=len(batch), received=len(values)
)
for task, value in zip(batch.tasks, values):
# Trying to set_result in a cancelled future would raise
# asyncio.exceptions.InvalidStateError
if task.future.cancelled():
continue
if isinstance(value, BaseException):
task.future.set_exception(value)
else:
task.future.set_result(value)
except Exception as e:
for task in batch.tasks:
task.future.set_exception(e) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/dataloader.py | dataloader.py |
from __future__ import annotations
from typing import Any, Optional, Union, cast
from typing_extensions import Annotated, get_args, get_origin
from strawberry.type import StrawberryType
from .annotation import StrawberryAnnotation
class StrawberryAutoMeta(type):
"""Metaclass for StrawberryAuto.
This is used to make sure StrawberryAuto is a singleton and also to
override the behavior of `isinstance` so that it consider the following
cases:
>> isinstance(StrawberryAuto(), StrawberryAuto)
True
>> isinstance(StrawberryAnnotation(StrawberryAuto()), StrawberryAuto)
True
>> isinstance(Annotated[StrawberryAuto(), object()), StrawberryAuto)
True
"""
def __init__(self, *args, **kwargs):
self._instance: Optional[StrawberryAuto] = None
super().__init__(*args, **kwargs)
def __call__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__call__(*args, **kwargs)
return cls._instance
def __instancecheck__(
self,
instance: Union[StrawberryAuto, StrawberryAnnotation, StrawberryType, type],
):
if isinstance(instance, StrawberryAnnotation):
resolved = instance.annotation
if isinstance(resolved, str):
namespace = instance.namespace
resolved = namespace and namespace.get(resolved)
if resolved is not None:
instance = cast(type, resolved)
if instance is auto:
return True
# Support uses of Annotated[auto, something()]
if get_origin(instance) is Annotated:
args = get_args(instance)
if args[0] is Any:
return any(isinstance(arg, StrawberryAuto) for arg in args[1:])
# StrawberryType's `__eq__` tries to find the string passed in the global
# namespace, which will fail with a `NameError` if "strawberry.auto" hasn't
# been imported. So we can't use `instance == "strawberry.auto"` here.
# Instead, we'll use `isinstance(instance, str)` to check if the instance
# is a StrawberryType, in that case we can return False since we know it
# won't be a StrawberryAuto.
if isinstance(instance, StrawberryType):
return False
return instance == "strawberry.auto"
class StrawberryAuto(metaclass=StrawberryAutoMeta):
def __str__(self):
return "auto"
def __repr__(self):
return "<auto>"
auto = Annotated[Any, StrawberryAuto()] | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/auto.py | auto.py |
import dataclasses
import inspect
import sys
import types
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Type,
TypeVar,
Union,
cast,
overload,
)
from .exceptions import (
MissingFieldAnnotationError,
MissingReturnAnnotationError,
ObjectIsNotClassError,
)
from .field import StrawberryField, field
from .types.type_resolver import _get_fields
from .types.types import TypeDefinition
from .utils.dataclasses import add_custom_init_fn
from .utils.str_converters import to_camel_case
from .utils.typing import __dataclass_transform__
T = TypeVar("T", bound=Type)
def _get_interfaces(cls: Type[Any]) -> List[TypeDefinition]:
interfaces: List[TypeDefinition] = []
for base in cls.__mro__[1:]: # Exclude current class
type_definition = cast(
Optional[TypeDefinition], getattr(base, "_type_definition", None)
)
if type_definition and type_definition.is_interface:
interfaces.append(type_definition)
return interfaces
def _check_field_annotations(cls: Type[Any]):
"""Are any of the dataclass Fields missing type annotations?
This is similar to the check that dataclasses do during creation, but allows us to
manually add fields to cls' __annotations__ or raise proper Strawberry exceptions if
necessary
https://github.com/python/cpython/blob/6fed3c85402c5ca704eb3f3189ca3f5c67a08d19/Lib/dataclasses.py#L881-L884
"""
cls_annotations = cls.__dict__.get("__annotations__", {})
cls.__annotations__ = cls_annotations
for field_name, field_ in cls.__dict__.items():
if not isinstance(field_, (StrawberryField, dataclasses.Field)):
# Not a dataclasses.Field, nor a StrawberryField. Ignore
continue
# If the field is a StrawberryField we need to do a bit of extra work
# to make sure dataclasses.dataclass is ready for it
if isinstance(field_, StrawberryField):
# If the field has a type override then use that instead of using
# the class annotations or resolver annotation
if field_.type_annotation is not None:
cls_annotations[field_name] = field_.type_annotation.annotation
continue
# Make sure the cls has an annotation
if field_name not in cls_annotations:
# If the field uses the default resolver, the field _must_ be
# annotated
if not field_.base_resolver:
raise MissingFieldAnnotationError(field_name, cls)
# The resolver _must_ have a return type annotation
# TODO: Maybe check this immediately when adding resolver to
# field
if field_.base_resolver.type_annotation is None:
raise MissingReturnAnnotationError(
field_name, resolver=field_.base_resolver
)
cls_annotations[field_name] = field_.base_resolver.type_annotation
# TODO: Make sure the cls annotation agrees with the field's type
# >>> if cls_annotations[field_name] != field.base_resolver.type:
# >>> # TODO: Proper error
# >>> raise Exception
# If somehow a non-StrawberryField field is added to the cls without annotations
# it raises an exception. This would occur if someone manually uses
# dataclasses.field
if field_name not in cls_annotations:
# Field object exists but did not get an annotation
raise MissingFieldAnnotationError(field_name, cls)
def _wrap_dataclass(cls: Type[Any]):
"""Wrap a strawberry.type class with a dataclass and check for any issues
before doing so"""
# Ensure all Fields have been properly type-annotated
_check_field_annotations(cls)
dclass_kwargs: Dict[str, bool] = {}
# Python 3.10 introduces the kw_only param. If we're on an older version
# then generate our own custom init function
if sys.version_info >= (3, 10):
dclass_kwargs["kw_only"] = True
else:
dclass_kwargs["init"] = False
dclass = dataclasses.dataclass(cls, **dclass_kwargs)
if sys.version_info < (3, 10):
add_custom_init_fn(dclass)
return dclass
def _process_type(
cls,
*,
name: Optional[str] = None,
is_input: bool = False,
is_interface: bool = False,
description: Optional[str] = None,
directives: Optional[Sequence[object]] = (),
extend: bool = False,
):
name = name or to_camel_case(cls.__name__)
interfaces = _get_interfaces(cls)
fields = _get_fields(cls)
is_type_of = getattr(cls, "is_type_of", None)
cls._type_definition = TypeDefinition(
name=name,
is_input=is_input,
is_interface=is_interface,
interfaces=interfaces,
description=description,
directives=directives,
origin=cls,
extend=extend,
_fields=fields,
is_type_of=is_type_of,
)
# dataclasses removes attributes from the class here:
# https://github.com/python/cpython/blob/577d7c4e/Lib/dataclasses.py#L873-L880
# so we need to restore them, this will change in future, but for now this
# solution should suffice
for field_ in fields:
if field_.base_resolver and field_.python_name:
wrapped_func = field_.base_resolver.wrapped_func
# Bind the functions to the class object. This is necessary because when
# the @strawberry.field decorator is used on @staticmethod/@classmethods,
# we get the raw staticmethod/classmethod objects before class evaluation
# binds them to the class. We need to do this manually.
if isinstance(wrapped_func, staticmethod):
bound_method = wrapped_func.__get__(cls)
field_.base_resolver.wrapped_func = bound_method
elif isinstance(wrapped_func, classmethod):
bound_method = types.MethodType(wrapped_func.__func__, cls)
field_.base_resolver.wrapped_func = bound_method
setattr(cls, field_.python_name, wrapped_func)
return cls
@overload
@__dataclass_transform__(
order_default=True, kw_only_default=True, field_descriptors=(field, StrawberryField)
)
def type(
cls: T,
*,
name: Optional[str] = None,
is_input: bool = False,
is_interface: bool = False,
description: Optional[str] = None,
directives: Optional[Sequence[object]] = (),
extend: bool = False,
) -> T:
...
@overload
@__dataclass_transform__(
order_default=True, kw_only_default=True, field_descriptors=(field, StrawberryField)
)
def type(
*,
name: Optional[str] = None,
is_input: bool = False,
is_interface: bool = False,
description: Optional[str] = None,
directives: Optional[Sequence[object]] = (),
extend: bool = False,
) -> Callable[[T], T]:
...
def type(
cls: Optional[T] = None,
*,
name: Optional[str] = None,
is_input: bool = False,
is_interface: bool = False,
description: Optional[str] = None,
directives: Optional[Sequence[object]] = (),
extend: bool = False,
) -> Union[T, Callable[[T], T]]:
"""Annotates a class as a GraphQL type.
Example usage:
>>> @strawberry.type:
>>> class X:
>>> field_abc: str = "ABC"
"""
def wrap(cls):
if not inspect.isclass(cls):
if is_input:
exc = ObjectIsNotClassError.input
elif is_interface:
exc = ObjectIsNotClassError.interface
else:
exc = ObjectIsNotClassError.type
raise exc(cls)
wrapped = _wrap_dataclass(cls)
return _process_type(
wrapped,
name=name,
is_input=is_input,
is_interface=is_interface,
description=description,
directives=directives,
extend=extend,
)
if cls is None:
return wrap
return wrap(cls)
@overload
@__dataclass_transform__(
order_default=True, kw_only_default=True, field_descriptors=(field, StrawberryField)
)
def input(
cls: T,
*,
name: Optional[str] = None,
description: Optional[str] = None,
directives: Optional[Sequence[object]] = (),
) -> T:
...
@overload
@__dataclass_transform__(
order_default=True, kw_only_default=True, field_descriptors=(field, StrawberryField)
)
def input(
*,
name: Optional[str] = None,
description: Optional[str] = None,
directives: Optional[Sequence[object]] = (),
) -> Callable[[T], T]:
...
def input(
cls: Optional[T] = None,
*,
name: Optional[str] = None,
description: Optional[str] = None,
directives: Optional[Sequence[object]] = (),
):
"""Annotates a class as a GraphQL Input type.
Example usage:
>>> @strawberry.input:
>>> class X:
>>> field_abc: str = "ABC"
"""
return type( # type: ignore # not sure why mypy complains here
cls,
name=name,
description=description,
directives=directives,
is_input=True,
)
@overload
@__dataclass_transform__(
order_default=True, kw_only_default=True, field_descriptors=(field, StrawberryField)
)
def interface(
cls: T,
*,
name: Optional[str] = None,
description: Optional[str] = None,
directives: Optional[Sequence[object]] = (),
) -> T:
...
@overload
@__dataclass_transform__(
order_default=True, kw_only_default=True, field_descriptors=(field, StrawberryField)
)
def interface(
*,
name: Optional[str] = None,
description: Optional[str] = None,
directives: Optional[Sequence[object]] = (),
) -> Callable[[T], T]:
...
@__dataclass_transform__(
order_default=True, kw_only_default=True, field_descriptors=(field, StrawberryField)
)
def interface(
cls: Optional[T] = None,
*,
name: Optional[str] = None,
description: Optional[str] = None,
directives: Optional[Sequence[object]] = (),
):
"""Annotates a class as a GraphQL Interface.
Example usage:
>>> @strawberry.interface:
>>> class X:
>>> field_abc: str
"""
return type( # type: ignore # not sure why mypy complains here
cls,
name=name,
description=description,
directives=directives,
is_interface=True,
)
def asdict(obj: object) -> Dict[str, object]:
"""Convert a strawberry object into a dictionary.
This wraps the dataclasses.asdict function to strawberry.
Example usage:
>>> @strawberry.type
>>> class User:
>>> name: str
>>> age: int
>>> # should be {"name": "Lorem", "age": 25}
>>> user_dict = strawberry.asdict(User(name="Lorem", age=25))
"""
return dataclasses.asdict(obj) # type: ignore
__all__ = [
"TypeDefinition",
"input",
"interface",
"type",
"asdict",
] | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/object_type.py | object_type.py |
from __future__ import annotations
import sys
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
Any,
Callable,
Iterable,
Mapping,
NewType,
Optional,
Type,
TypeVar,
Union,
overload,
)
from strawberry.exceptions import InvalidUnionTypeError
from strawberry.type import StrawberryOptional, StrawberryType
from .utils.str_converters import to_camel_case
if TYPE_CHECKING:
from graphql import GraphQLScalarType
# in python 3.10+ NewType is a class
if sys.version_info >= (3, 10):
_T = TypeVar("_T", bound=Union[type, NewType])
else:
_T = TypeVar("_T", bound=type)
def identity(x: _T) -> _T:
return x
@dataclass
class ScalarDefinition(StrawberryType):
name: str
description: Optional[str]
specified_by_url: Optional[str]
serialize: Optional[Callable]
parse_value: Optional[Callable]
parse_literal: Optional[Callable]
directives: Iterable[object] = ()
# Optionally store the GraphQLScalarType instance so that we don't get
# duplicates
implementation: Optional[GraphQLScalarType] = None
# used for better error messages
_source_file: Optional[str] = None
_source_line: Optional[int] = None
def copy_with(
self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]
) -> Union[StrawberryType, type]:
return super().copy_with(type_var_map) # type: ignore[safe-super]
@property
def is_generic(self) -> bool:
return False
class ScalarWrapper:
_scalar_definition: ScalarDefinition
def __init__(self, wrap: Callable[[Any], Any]):
self.wrap = wrap
def __call__(self, *args, **kwargs):
return self.wrap(*args, **kwargs)
def __or__(self, other: Union[StrawberryType, type]) -> StrawberryType:
if other is None:
# Return the correct notation when using `StrawberryUnion | None`.
return StrawberryOptional(of_type=self)
# Raise an error in any other case.
# There is Work in progress to deal with more merging cases, see:
# https://github.com/strawberry-graphql/strawberry/pull/1455
raise InvalidUnionTypeError(str(other), self.wrap)
def _process_scalar(
cls: Type[_T],
*,
name: Optional[str] = None,
description: Optional[str] = None,
specified_by_url: Optional[str] = None,
serialize: Optional[Callable] = None,
parse_value: Optional[Callable] = None,
parse_literal: Optional[Callable] = None,
directives: Iterable[object] = (),
):
from strawberry.exceptions.handler import should_use_rich_exceptions
name = name or to_camel_case(cls.__name__)
_source_file = None
_source_line = None
if should_use_rich_exceptions():
frame = sys._getframe(3)
_source_file = frame.f_code.co_filename
_source_line = frame.f_lineno
wrapper = ScalarWrapper(cls)
wrapper._scalar_definition = ScalarDefinition(
name=name,
description=description,
specified_by_url=specified_by_url,
serialize=serialize,
parse_literal=parse_literal,
parse_value=parse_value,
directives=directives,
_source_file=_source_file,
_source_line=_source_line,
)
return wrapper
@overload
def scalar(
*,
name: Optional[str] = None,
description: Optional[str] = None,
specified_by_url: Optional[str] = None,
serialize: Callable = identity,
parse_value: Optional[Callable] = None,
parse_literal: Optional[Callable] = None,
directives: Iterable[object] = (),
) -> Callable[[_T], _T]:
...
@overload
def scalar(
cls: _T,
*,
name: Optional[str] = None,
description: Optional[str] = None,
specified_by_url: Optional[str] = None,
serialize: Callable = identity,
parse_value: Optional[Callable] = None,
parse_literal: Optional[Callable] = None,
directives: Iterable[object] = (),
) -> _T:
...
# FIXME: We are tricking pyright into thinking that we are returning the given type
# here or else it won't let us use any custom scalar to annotate attributes in
# dataclasses/types. This should be properly solved when implementing StrawberryScalar
def scalar(
cls=None,
*,
name: Optional[str] = None,
description: Optional[str] = None,
specified_by_url: Optional[str] = None,
serialize: Callable = identity,
parse_value: Optional[Callable] = None,
parse_literal: Optional[Callable] = None,
directives: Iterable[object] = (),
) -> Any:
"""Annotates a class or type as a GraphQL custom scalar.
Example usages:
>>> strawberry.scalar(
>>> datetime.date,
>>> serialize=lambda value: value.isoformat(),
>>> parse_value=datetime.parse_date
>>> )
>>> Base64Encoded = strawberry.scalar(
>>> NewType("Base64Encoded", bytes),
>>> serialize=base64.b64encode,
>>> parse_value=base64.b64decode
>>> )
>>> @strawberry.scalar(
>>> serialize=lambda value: ",".join(value.items),
>>> parse_value=lambda value: CustomList(value.split(","))
>>> )
>>> class CustomList:
>>> def __init__(self, items):
>>> self.items = items
"""
if parse_value is None:
parse_value = cls
def wrap(cls):
return _process_scalar(
cls,
name=name,
description=description,
specified_by_url=specified_by_url,
serialize=serialize,
parse_value=parse_value,
parse_literal=parse_literal,
directives=directives,
)
if cls is None:
return wrap
return wrap(cls) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/custom_scalar.py | custom_scalar.py |
from __future__ import annotations
import base64
from typing import TYPE_CHECKING, Any, Dict, NewType, Union
from .custom_scalar import scalar
if TYPE_CHECKING:
from .custom_scalar import ScalarDefinition, ScalarWrapper
ID = NewType("ID", str)
JSON = scalar(
NewType("JSON", object), # mypy doesn't like `NewType("name", Any)`
description=(
"The `JSON` scalar type represents JSON values as specified by "
"[ECMA-404]"
"(http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf)."
),
specified_by_url=(
"http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf"
),
serialize=lambda v: v,
parse_value=lambda v: v,
)
Base16 = scalar(
NewType("Base16", bytes),
description="Represents binary data as Base16-encoded (hexadecimal) strings.",
specified_by_url="https://datatracker.ietf.org/doc/html/rfc4648.html#section-8",
serialize=lambda v: base64.b16encode(v).decode("utf-8"),
parse_value=lambda v: base64.b16decode(v.encode("utf-8"), casefold=True),
)
Base32 = scalar(
NewType("Base32", bytes),
description=(
"Represents binary data as Base32-encoded strings, using the standard alphabet."
),
specified_by_url=("https://datatracker.ietf.org/doc/html/rfc4648.html#section-6"),
serialize=lambda v: base64.b32encode(v).decode("utf-8"),
parse_value=lambda v: base64.b32decode(v.encode("utf-8"), casefold=True),
)
Base64 = scalar(
NewType("Base64", bytes),
description=(
"Represents binary data as Base64-encoded strings, using the standard alphabet."
),
specified_by_url="https://datatracker.ietf.org/doc/html/rfc4648.html#section-4",
serialize=lambda v: base64.b64encode(v).decode("utf-8"),
parse_value=lambda v: base64.b64decode(v.encode("utf-8")),
)
def is_scalar(
annotation: Any,
scalar_registry: Dict[object, Union[ScalarWrapper, ScalarDefinition]],
) -> bool:
if annotation in scalar_registry:
return True
return hasattr(annotation, "_scalar_definition") | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/scalars.py | scalars.py |
from __future__ import annotations
import itertools
from itertools import chain
from typing import (
TYPE_CHECKING,
Any,
Collection,
Iterable,
List,
Mapping,
NoReturn,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from typing_extensions import Annotated, get_origin
from graphql import GraphQLNamedType, GraphQLUnionType
from strawberry.annotation import StrawberryAnnotation
from strawberry.exceptions import (
InvalidTypeForUnionMergeError,
InvalidUnionTypeError,
UnallowedReturnTypeForUnion,
WrongReturnTypeForUnion,
)
from strawberry.lazy_type import LazyType
from strawberry.type import StrawberryOptional, StrawberryType
if TYPE_CHECKING:
from graphql import (
GraphQLAbstractType,
GraphQLResolveInfo,
GraphQLType,
GraphQLTypeResolver,
)
from strawberry.schema.types.concrete_type import TypeMap
from strawberry.types.types import TypeDefinition
class StrawberryUnion(StrawberryType):
def __init__(
self,
name: Optional[str] = None,
type_annotations: Tuple[StrawberryAnnotation, ...] = tuple(),
description: Optional[str] = None,
directives: Iterable[object] = (),
):
self.graphql_name = name
self.type_annotations = type_annotations
self.description = description
self.directives = directives
def __eq__(self, other: object) -> bool:
if isinstance(other, StrawberryType):
if isinstance(other, StrawberryUnion):
return (
self.graphql_name == other.graphql_name
and self.type_annotations == other.type_annotations
and self.description == other.description
)
return False
return super().__eq__(other)
def __hash__(self) -> int:
# TODO: Is this a bad idea? __eq__ objects are supposed to have the same hash
return id(self)
def __or__(self, other: Union[StrawberryType, type]) -> StrawberryType:
if other is None:
# Return the correct notation when using `StrawberryUnion | None`.
return StrawberryOptional(of_type=self)
# Raise an error in any other case.
# There is Work in progress to deal with more merging cases, see:
# https://github.com/strawberry-graphql/strawberry/pull/1455
raise InvalidTypeForUnionMergeError(self, other)
@property
def types(self) -> Tuple[StrawberryType, ...]:
return tuple(
cast(StrawberryType, annotation.resolve())
for annotation in self.type_annotations
)
@property
def type_params(self) -> List[TypeVar]:
def _get_type_params(type_: StrawberryType):
if isinstance(type_, LazyType):
type_ = cast("StrawberryType", type_.resolve_type())
if hasattr(type_, "_type_definition"):
parameters = getattr(type_, "__parameters__", None)
return list(parameters) if parameters else []
return type_.type_params
# TODO: check if order is important:
# https://github.com/strawberry-graphql/strawberry/issues/445
return list(
set(itertools.chain(*(_get_type_params(type_) for type_ in self.types)))
)
@property
def is_generic(self) -> bool:
def _is_generic(type_: object) -> bool:
if hasattr(type_, "_type_definition"):
type_ = type_._type_definition
if isinstance(type_, StrawberryType):
return type_.is_generic
return False
return any(map(_is_generic, self.types))
def copy_with(
self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]
) -> StrawberryType:
if not self.is_generic:
return self
new_types = []
for type_ in self.types:
new_type: Union[StrawberryType, type]
if hasattr(type_, "_type_definition"):
type_definition: TypeDefinition = type_._type_definition
if type_definition.is_generic:
new_type = type_definition.copy_with(type_var_map)
if isinstance(type_, StrawberryType) and type_.is_generic:
new_type = type_.copy_with(type_var_map)
else:
new_type = type_
new_types.append(new_type)
return StrawberryUnion(
type_annotations=tuple(map(StrawberryAnnotation, new_types)),
description=self.description,
)
def __call__(self, *_args, **_kwargs) -> NoReturn:
"""Do not use.
Used to bypass
https://github.com/python/cpython/blob/5efb1a77e75648012f8b52960c8637fc296a5c6d/Lib/typing.py#L148-L149
"""
raise ValueError("Cannot use union type directly")
def get_type_resolver(self, type_map: TypeMap) -> GraphQLTypeResolver:
def _resolve_union_type(
root: Any, info: GraphQLResolveInfo, type_: GraphQLAbstractType
) -> str:
assert isinstance(type_, GraphQLUnionType)
from strawberry.types.types import TypeDefinition
# If the type given is not an Object type, try resolving using `is_type_of`
# defined on the union's inner types
if not hasattr(root, "_type_definition"):
for inner_type in type_.types:
if inner_type.is_type_of is not None and inner_type.is_type_of(
root, info
):
return inner_type.name
# Couldn't resolve using `is_type_of`
raise WrongReturnTypeForUnion(info.field_name, str(type(root)))
return_type: Optional[GraphQLType]
# Iterate over all of our known types and find the first concrete
# type that implements the type. We prioritise checking types named in the
# Union in case a nested generic object matches against more than one type.
concrete_types_for_union = (type_map[x.name] for x in type_.types)
# TODO: do we still need to iterate over all types in `type_map`?
for possible_concrete_type in chain(
concrete_types_for_union, type_map.values()
):
possible_type = possible_concrete_type.definition
if not isinstance(possible_type, TypeDefinition):
continue
if possible_type.is_implemented_by(root):
return_type = possible_concrete_type.implementation
break
else:
return_type = None
# Make sure the found type is expected by the Union
if return_type is None or return_type not in type_.types:
raise UnallowedReturnTypeForUnion(
info.field_name, str(type(root)), set(type_.types)
)
# Return the name of the type. Returning the actual type is now deprecated
if isinstance(return_type, GraphQLNamedType):
# TODO: Can return_type ever _not_ be a GraphQLNamedType?
return return_type.name
else:
# todo: check if this is correct
return return_type.__name__ # type: ignore
return _resolve_union_type
@staticmethod
def is_valid_union_type(type_: object) -> bool:
# Usual case: Union made of @strawberry.types
if hasattr(type_, "_type_definition"):
return True
# Can't confidently assert that these types are valid/invalid within Unions
# until full type resolving stage is complete
ignored_types = (LazyType, TypeVar)
if isinstance(type_, ignored_types):
return True
if get_origin(type_) is Annotated:
return True
return False
Types = TypeVar("Types", bound=Type)
# We return a Union type here in order to allow to use the union type as type
# annotation.
# For the `types` argument we'd ideally use a TypeVarTuple, but that's not
# yet supported in any python implementation (or in typing_extensions).
# See https://www.python.org/dev/peps/pep-0646/ for more information
def union(
name: str,
types: Collection[Types],
*,
description: Optional[str] = None,
directives: Iterable[object] = (),
) -> Union[Types]:
"""Creates a new named Union type.
Example usages:
>>> @strawberry.type
... class A: ...
>>> @strawberry.type
... class B: ...
>>> strawberry.union("Name", (A, Optional[B]))
"""
# Validate types
if not types:
raise TypeError("No types passed to `union`")
for type_ in types:
# Due to TypeVars, Annotations, LazyTypes, etc., this does not perfectly detect
# issues. This check also occurs in the Schema conversion stage as a backup.
if not StrawberryUnion.is_valid_union_type(type_):
raise InvalidUnionTypeError(union_name=name, invalid_type=type_)
union_definition = StrawberryUnion(
name=name,
type_annotations=tuple(StrawberryAnnotation(type_) for type_ in types),
description=description,
directives=directives,
)
return union_definition # type: ignore | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/union.py | union.py |
from __future__ import annotations
import asyncio
import json
from datetime import timedelta
from typing import TYPE_CHECKING, Iterable
from aiohttp import web
from strawberry.aiohttp.handlers import (
GraphQLTransportWSHandler,
GraphQLWSHandler,
HTTPHandler,
)
from strawberry.http import process_result
from strawberry.subscriptions import GRAPHQL_TRANSPORT_WS_PROTOCOL, GRAPHQL_WS_PROTOCOL
if TYPE_CHECKING:
from strawberry.http import GraphQLHTTPResponse
from strawberry.schema import BaseSchema
from strawberry.types import ExecutionResult
class GraphQLView:
# Mark the view as coroutine so that AIOHTTP does not confuse it with a deprecated
# bare handler function.
_is_coroutine = asyncio.coroutines._is_coroutine # type: ignore[attr-defined]
graphql_transport_ws_handler_class = GraphQLTransportWSHandler
graphql_ws_handler_class = GraphQLWSHandler
http_handler_class = HTTPHandler
def __init__(
self,
schema: BaseSchema,
graphiql: bool = True,
allow_queries_via_get: bool = True,
keep_alive: bool = True,
keep_alive_interval: float = 1,
debug: bool = False,
subscription_protocols: Iterable[str] = (
GRAPHQL_TRANSPORT_WS_PROTOCOL,
GRAPHQL_WS_PROTOCOL,
),
connection_init_wait_timeout: timedelta = timedelta(minutes=1),
):
self.schema = schema
self.graphiql = graphiql
self.allow_queries_via_get = allow_queries_via_get
self.keep_alive = keep_alive
self.keep_alive_interval = keep_alive_interval
self.debug = debug
self.subscription_protocols = subscription_protocols
self.connection_init_wait_timeout = connection_init_wait_timeout
async def __call__(self, request: web.Request) -> web.StreamResponse:
ws = web.WebSocketResponse(protocols=self.subscription_protocols)
ws_test = ws.can_prepare(request)
if ws_test.ok:
if ws_test.protocol == GRAPHQL_TRANSPORT_WS_PROTOCOL:
return await self.graphql_transport_ws_handler_class(
schema=self.schema,
debug=self.debug,
connection_init_wait_timeout=self.connection_init_wait_timeout,
get_context=self.get_context, # type: ignore
get_root_value=self.get_root_value,
request=request,
).handle()
elif ws_test.protocol == GRAPHQL_WS_PROTOCOL:
return await self.graphql_ws_handler_class(
schema=self.schema,
debug=self.debug,
keep_alive=self.keep_alive,
keep_alive_interval=self.keep_alive_interval,
get_context=self.get_context,
get_root_value=self.get_root_value,
request=request,
).handle()
else:
await ws.prepare(request)
await ws.close(code=4406, message=b"Subprotocol not acceptable")
return ws
else:
return await self.http_handler_class(
schema=self.schema,
graphiql=self.graphiql,
allow_queries_via_get=self.allow_queries_via_get,
get_context=self.get_context,
get_root_value=self.get_root_value,
encode_json=self.encode_json,
process_result=self.process_result,
request=request,
).handle()
async def get_root_value(self, request: web.Request) -> object:
return None
async def get_context(
self, request: web.Request, response: web.StreamResponse
) -> object:
return {"request": request, "response": response}
async def process_result(
self, request: web.Request, result: ExecutionResult
) -> GraphQLHTTPResponse:
return process_result(result)
def encode_json(self, response_data: GraphQLHTTPResponse) -> str:
return json.dumps(response_data) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/aiohttp/views.py | views.py |
from __future__ import annotations
from contextlib import suppress
from typing import TYPE_CHECKING, Any, Optional
from aiohttp import http, web
from strawberry.subscriptions import GRAPHQL_WS_PROTOCOL
from strawberry.subscriptions.protocols.graphql_ws.handlers import BaseGraphQLWSHandler
if TYPE_CHECKING:
from strawberry.schema import BaseSchema
from strawberry.subscriptions.protocols.graphql_ws.types import OperationMessage
class GraphQLWSHandler(BaseGraphQLWSHandler):
def __init__(
self,
schema: BaseSchema,
debug: bool,
keep_alive: bool,
keep_alive_interval: float,
get_context,
get_root_value,
request: web.Request,
):
super().__init__(schema, debug, keep_alive, keep_alive_interval)
self._get_context = get_context
self._get_root_value = get_root_value
self._request = request
self._ws = web.WebSocketResponse(protocols=[GRAPHQL_WS_PROTOCOL])
async def get_context(self) -> Any:
return await self._get_context(request=self._request, response=self._ws)
async def get_root_value(self) -> Any:
return await self._get_root_value(request=self._request)
async def send_json(self, data: OperationMessage) -> None:
await self._ws.send_json(data)
async def close(self, code: int = 1000, reason: Optional[str] = None) -> None:
message = reason.encode() if reason else b""
await self._ws.close(code=code, message=message)
async def handle_request(self) -> Any:
await self._ws.prepare(self._request)
try:
async for ws_message in self._ws: # type: http.WSMessage
if ws_message.type == http.WSMsgType.TEXT:
message: OperationMessage = ws_message.json()
await self.handle_message(message)
finally:
if self.keep_alive_task:
self.keep_alive_task.cancel()
with suppress(BaseException):
await self.keep_alive_task
for operation_id in list(self.subscriptions.keys()):
await self.cleanup_operation(operation_id)
return self._ws | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/aiohttp/handlers/graphql_ws_handler.py | graphql_ws_handler.py |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, Dict
from aiohttp import http, web
from strawberry.subscriptions import GRAPHQL_TRANSPORT_WS_PROTOCOL
from strawberry.subscriptions.protocols.graphql_transport_ws.handlers import (
BaseGraphQLTransportWSHandler,
)
if TYPE_CHECKING:
from datetime import timedelta
from strawberry.schema import BaseSchema
class GraphQLTransportWSHandler(BaseGraphQLTransportWSHandler):
def __init__(
self,
schema: BaseSchema,
debug: bool,
connection_init_wait_timeout: timedelta,
get_context: Callable[..., Dict[str, Any]],
get_root_value: Any,
request: web.Request,
):
super().__init__(schema, debug, connection_init_wait_timeout)
self._get_context = get_context
self._get_root_value = get_root_value
self._request = request
self._ws = web.WebSocketResponse(protocols=[GRAPHQL_TRANSPORT_WS_PROTOCOL])
async def get_context(self) -> Any:
return await self._get_context(request=self._request, response=self._ws) # type: ignore # noqa: E501
async def get_root_value(self) -> Any:
return await self._get_root_value(request=self._request)
async def send_json(self, data: dict) -> None:
await self._ws.send_json(data)
async def close(self, code: int, reason: str) -> None:
await self._ws.close(code=code, message=reason.encode())
async def handle_request(self) -> web.StreamResponse:
await self._ws.prepare(self._request)
try:
async for ws_message in self._ws: # type: http.WSMessage
if ws_message.type == http.WSMsgType.TEXT:
await self.handle_message(ws_message.json())
else:
error_message = "WebSocket message type must be text"
await self.handle_invalid_message(error_message)
finally:
for operation_id in list(self.subscriptions.keys()):
await self.cleanup_operation(operation_id)
await self.reap_completed_tasks()
return self._ws | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/aiohttp/handlers/graphql_transport_ws_handler.py | graphql_transport_ws_handler.py |
from __future__ import annotations
import json
from io import BytesIO
from typing import TYPE_CHECKING, Any, Dict, Union
from aiohttp import web
from strawberry.exceptions import MissingQueryError
from strawberry.file_uploads.utils import replace_placeholders_with_files
from strawberry.http import parse_query_params, parse_request_data
from strawberry.schema.exceptions import InvalidOperationTypeError
from strawberry.types.graphql import OperationType
from strawberry.utils.graphiql import get_graphiql_html
if TYPE_CHECKING:
from typing_extensions import Literal
from strawberry.http import GraphQLRequestData
from strawberry.schema import BaseSchema
class HTTPHandler:
def __init__(
self,
schema: BaseSchema,
graphiql: bool,
allow_queries_via_get: bool,
get_context,
get_root_value,
encode_json,
process_result,
request: web.Request,
):
self.schema = schema
self.graphiql = graphiql
self.allow_queries_via_get = allow_queries_via_get
self.get_context = get_context
self.get_root_value = get_root_value
self.encode_json = encode_json
self.process_result = process_result
self.request = request
async def handle(self) -> web.StreamResponse:
if self.request.method == "GET":
return await self.get(self.request)
if self.request.method == "POST":
return await self.post(self.request)
raise web.HTTPMethodNotAllowed(self.request.method, ["GET", "POST"])
async def get(self, request: web.Request) -> web.StreamResponse:
if request.query:
try:
query_params = {
key: request.query.getone(key) for key in set(request.query.keys())
}
query_data = parse_query_params(query_params)
request_data = parse_request_data(query_data)
except json.JSONDecodeError:
raise web.HTTPBadRequest(reason="Unable to parse request body as JSON")
return await self.execute_request(
request=request, request_data=request_data, method="GET"
)
elif self.should_render_graphiql(request):
return self.render_graphiql()
raise web.HTTPNotFound()
async def post(self, request: web.Request) -> web.StreamResponse:
request_data = await self.get_request_data(request)
return await self.execute_request(
request=request, request_data=request_data, method="POST"
)
async def execute_request(
self,
request: web.Request,
request_data: GraphQLRequestData,
method: Union[Literal["GET"], Literal["POST"]],
) -> web.StreamResponse:
response = web.Response()
context = await self.get_context(request, response)
root_value = await self.get_root_value(request)
allowed_operation_types = OperationType.from_http(method)
if not self.allow_queries_via_get and method == "GET":
allowed_operation_types = allowed_operation_types - {OperationType.QUERY}
try:
result = await self.schema.execute(
query=request_data.query,
root_value=root_value,
variable_values=request_data.variables,
context_value=context,
operation_name=request_data.operation_name,
allowed_operation_types=allowed_operation_types,
)
except InvalidOperationTypeError as e:
raise web.HTTPBadRequest(
reason=e.as_http_error_reason(method=method)
) from e
except MissingQueryError:
raise web.HTTPBadRequest(reason="No GraphQL query found in the request")
response_data = await self.process_result(request, result)
response.text = self.encode_json(response_data)
response.content_type = "application/json"
return response
async def get_request_data(self, request: web.Request) -> GraphQLRequestData:
data = await self.parse_body(request)
return parse_request_data(data)
async def parse_body(self, request: web.Request) -> dict:
if request.content_type.startswith("multipart/form-data"):
return await self.parse_multipart_body(request)
try:
return await request.json()
except json.JSONDecodeError as e:
raise web.HTTPBadRequest(
reason="Unable to parse request body as JSON"
) from e
async def parse_multipart_body(self, request: web.Request) -> dict:
reader = await request.multipart()
operations: Dict[str, Any] = {}
files_map: Dict[str, Any] = {}
files: Dict[str, Any] = {}
try:
async for field in reader:
if field.name == "operations":
operations = (await field.json()) or {}
elif field.name == "map":
files_map = (await field.json()) or {}
elif field.filename:
assert field.name
files[field.name] = BytesIO(await field.read(decode=False))
except ValueError:
raise web.HTTPBadRequest(reason="Unable to parse the multipart body")
try:
return replace_placeholders_with_files(operations, files_map, files)
except KeyError:
raise web.HTTPBadRequest(reason="File(s) missing in form data")
def render_graphiql(self) -> web.StreamResponse:
html_string = get_graphiql_html()
return web.Response(text=html_string, content_type="text/html")
def should_render_graphiql(self, request: web.Request) -> bool:
if not self.graphiql:
return False
return any(
supported_header in request.headers.get("Accept", "")
for supported_header in ("text/html", "*/*")
) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/aiohttp/handlers/http_handler.py | http_handler.py |
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING, List, Optional, Type, Union
if TYPE_CHECKING:
from enum import EnumMeta
from typing_extensions import Literal
@dataclass
class GraphQLOptional:
of_type: GraphQLType
@dataclass
class GraphQLList:
of_type: GraphQLType
@dataclass
class GraphQLUnion:
name: str
types: List[GraphQLObjectType]
@dataclass
class GraphQLField:
name: str
alias: Optional[str]
type: GraphQLType
@dataclass
class GraphQLObjectType:
name: str
fields: List[GraphQLField]
@dataclass
class GraphQLEnum:
name: str
values: List[str]
python_type: EnumMeta
@dataclass
class GraphQLScalar:
name: str
python_type: Optional[Type]
GraphQLType = Union[
GraphQLObjectType,
GraphQLEnum,
GraphQLScalar,
GraphQLOptional,
GraphQLList,
GraphQLUnion,
]
@dataclass
class GraphQLFieldSelection:
field: str
alias: Optional[str]
selections: List[GraphQLSelection]
directives: List[GraphQLDirective]
arguments: List[GraphQLArgument]
@dataclass
class GraphQLInlineFragment:
type_condition: str
selections: List[GraphQLSelection]
GraphQLSelection = Union[GraphQLFieldSelection, GraphQLInlineFragment]
@dataclass
class GraphQLStringValue:
value: str
@dataclass
class GraphQLIntValue:
value: int
@dataclass
class GraphQLEnumValue:
name: str
@dataclass
class GraphQLBoolValue:
value: bool
@dataclass
class GraphQLListValue:
values: List[GraphQLArgumentValue]
@dataclass
class GraphQLVariableReference:
value: str
GraphQLArgumentValue = Union[
GraphQLStringValue,
GraphQLIntValue,
GraphQLVariableReference,
GraphQLListValue,
GraphQLEnumValue,
GraphQLBoolValue,
]
@dataclass
class GraphQLArgument:
name: str
value: GraphQLArgumentValue
@dataclass
class GraphQLDirective:
name: str
arguments: List[GraphQLArgument]
@dataclass
class GraphQLVariable:
name: str
type: GraphQLType
@dataclass
class GraphQLOperation:
name: str
kind: Literal["query", "mutation", "subscription"]
selections: List[GraphQLSelection]
directives: List[GraphQLDirective]
variables: List[GraphQLVariable]
type: GraphQLObjectType
variables_type: Optional[GraphQLObjectType] | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/codegen/types.py | types.py |
from __future__ import annotations
from dataclasses import dataclass
from typing import (
TYPE_CHECKING,
Callable,
Iterable,
List,
Optional,
Tuple,
Type,
Union,
cast,
)
from typing_extensions import Literal, Protocol
from graphql import (
BooleanValueNode,
EnumValueNode,
FieldNode,
InlineFragmentNode,
IntValueNode,
ListTypeNode,
ListValueNode,
NamedTypeNode,
NonNullTypeNode,
OperationDefinitionNode,
StringValueNode,
VariableNode,
parse,
)
from strawberry.custom_scalar import ScalarDefinition, ScalarWrapper
from strawberry.enum import EnumDefinition
from strawberry.lazy_type import LazyType
from strawberry.type import StrawberryList, StrawberryOptional, StrawberryType
from strawberry.types.types import TypeDefinition
from strawberry.union import StrawberryUnion
from strawberry.utils.str_converters import capitalize_first, to_camel_case
from .exceptions import (
MultipleOperationsProvidedError,
NoOperationNameProvidedError,
NoOperationProvidedError,
)
from .types import (
GraphQLArgument,
GraphQLBoolValue,
GraphQLDirective,
GraphQLEnum,
GraphQLEnumValue,
GraphQLField,
GraphQLFieldSelection,
GraphQLInlineFragment,
GraphQLIntValue,
GraphQLList,
GraphQLListValue,
GraphQLObjectType,
GraphQLOperation,
GraphQLOptional,
GraphQLScalar,
GraphQLStringValue,
GraphQLUnion,
GraphQLVariable,
GraphQLVariableReference,
)
if TYPE_CHECKING:
from pathlib import Path
from graphql import (
ArgumentNode,
DirectiveNode,
DocumentNode,
SelectionNode,
SelectionSetNode,
TypeNode,
ValueNode,
VariableDefinitionNode,
)
from strawberry.schema import Schema
from .types import GraphQLArgumentValue, GraphQLSelection, GraphQLType
@dataclass
class CodegenFile:
path: str
content: str
@dataclass
class CodegenResult:
files: List[CodegenFile]
def to_string(self) -> str:
return "\n".join(f.content for f in self.files) + "\n"
def write(self, folder: Path) -> None:
for file in self.files:
destination = folder / file.path
destination.write_text(file.content)
class HasSelectionSet(Protocol):
selection_set: Optional[SelectionSetNode]
class QueryCodegenPlugin:
def on_start(self) -> None:
...
def on_end(self, result: CodegenResult) -> None:
...
def generate_code(
self, types: List[GraphQLType], operation: GraphQLOperation
) -> List[CodegenFile]:
return []
class QueryCodegenPluginManager:
def __init__(self, plugins: List[QueryCodegenPlugin]) -> None:
self.plugins = plugins
def generate_code(
self, types: List[GraphQLType], operation: GraphQLOperation
) -> CodegenResult:
result = CodegenResult(files=[])
for plugin in self.plugins:
files = plugin.generate_code(types, operation)
result.files.extend(files)
return result
def on_start(self) -> None:
for plugin in self.plugins:
plugin.on_start()
def on_end(self, result: CodegenResult) -> None:
for plugin in self.plugins:
plugin.on_end(result)
class QueryCodegen:
def __init__(self, schema: Schema, plugins: List[QueryCodegenPlugin]):
self.schema = schema
self.plugin_manager = QueryCodegenPluginManager(plugins)
self.types: List[GraphQLType] = []
def run(self, query: str) -> CodegenResult:
self.plugin_manager.on_start()
ast = parse(query)
operations = self._get_operations(ast)
if not operations:
raise NoOperationProvidedError()
if len(operations) > 1:
raise MultipleOperationsProvidedError()
operation = operations[0]
if operation.name is None:
raise NoOperationNameProvidedError()
self.operation = self._convert_operation(operation)
result = self.generate_code()
self.plugin_manager.on_end(result)
return result
def _collect_type(self, type_: GraphQLType) -> None:
if type_ in self.types:
return
self.types.append(type_)
def _convert_selection(self, selection: SelectionNode) -> GraphQLSelection:
if isinstance(selection, FieldNode):
return GraphQLFieldSelection(
selection.name.value,
selection.alias.value if selection.alias else None,
selections=self._convert_selection_set(selection.selection_set),
directives=self._convert_directives(selection.directives),
arguments=self._convert_arguments(selection.arguments),
)
if isinstance(selection, InlineFragmentNode):
return GraphQLInlineFragment(
selection.type_condition.name.value,
self._convert_selection_set(selection.selection_set),
)
raise ValueError(f"Unsupported type: {type(selection)}") # pragma: no cover
def _convert_selection_set(
self, selection_set: Optional[SelectionSetNode]
) -> List[GraphQLSelection]:
if selection_set is None:
return []
return [
self._convert_selection(selection) for selection in selection_set.selections
]
def _convert_value(self, value: ValueNode) -> GraphQLArgumentValue:
if isinstance(value, StringValueNode):
return GraphQLStringValue(value.value)
if isinstance(value, IntValueNode):
return GraphQLIntValue(int(value.value))
if isinstance(value, VariableNode):
return GraphQLVariableReference(value.name.value)
if isinstance(value, ListValueNode):
return GraphQLListValue(
[self._convert_value(item) for item in value.values]
)
if isinstance(value, EnumValueNode):
return GraphQLEnumValue(value.value)
if isinstance(value, BooleanValueNode):
return GraphQLBoolValue(value.value)
raise ValueError(f"Unsupported type: {type(value)}") # pragma: no cover
def _convert_arguments(
self, arguments: Iterable[ArgumentNode]
) -> List[GraphQLArgument]:
return [
GraphQLArgument(argument.name.value, self._convert_value(argument.value))
for argument in arguments
]
def _convert_directives(
self, directives: Iterable[DirectiveNode]
) -> List[GraphQLDirective]:
return [
GraphQLDirective(
directive.name.value,
self._convert_arguments(directive.arguments),
)
for directive in directives
]
def _convert_operation(
self, operation_definition: OperationDefinitionNode
) -> GraphQLOperation:
query_type = self.schema.get_type_by_name("Query")
assert isinstance(query_type, TypeDefinition)
assert operation_definition.name is not None
operation_name = operation_definition.name.value
result_class_name = f"{operation_name}Result"
operation_type = self._collect_types(
cast(HasSelectionSet, operation_definition),
parent_type=query_type,
class_name=result_class_name,
)
operation_kind = cast(
Literal["query", "mutation", "subscription"],
operation_definition.operation.value,
)
variables, variables_type = self._convert_variable_definitions(
operation_definition.variable_definitions, operation_name=operation_name
)
return GraphQLOperation(
operation_definition.name.value,
kind=operation_kind,
selections=self._convert_selection_set(operation_definition.selection_set),
directives=self._convert_directives(operation_definition.directives),
variables=variables,
type=cast("GraphQLObjectType", operation_type),
variables_type=variables_type,
)
def _convert_variable_definitions(
self,
variable_definitions: Optional[Iterable[VariableDefinitionNode]],
operation_name: str,
) -> Tuple[List[GraphQLVariable], Optional[GraphQLObjectType]]:
if not variable_definitions:
return [], None
type_ = GraphQLObjectType(f"{operation_name}Variables", [])
self._collect_type(type_)
variables: List[GraphQLVariable] = []
for variable_definition in variable_definitions:
variable_type = self._collect_type_from_variable(variable_definition.type)
variable = GraphQLVariable(
variable_definition.variable.name.value,
variable_type,
)
type_.fields.append(GraphQLField(variable.name, None, variable_type))
variables.append(variable)
return variables, type_
def _get_operations(self, ast: DocumentNode) -> List[OperationDefinitionNode]:
return [
definition
for definition in ast.definitions
if isinstance(definition, OperationDefinitionNode)
]
def _get_field_type(
self,
field_type: Union[StrawberryType, type],
) -> GraphQLType:
if isinstance(field_type, StrawberryOptional):
return GraphQLOptional(self._get_field_type(field_type.of_type))
if isinstance(field_type, StrawberryList):
return GraphQLList(self._get_field_type(field_type.of_type))
if (
not isinstance(field_type, StrawberryType)
and field_type in self.schema.schema_converter.scalar_registry
):
field_type = self.schema.schema_converter.scalar_registry[field_type] # type: ignore # noqa: E501
if isinstance(field_type, ScalarWrapper):
python_type = field_type.wrap
if hasattr(python_type, "__supertype__"):
python_type = python_type.__supertype__
return self._collect_scalar(field_type._scalar_definition, python_type) # type: ignore # noqa: E501
if isinstance(field_type, ScalarDefinition):
return self._collect_scalar(field_type, None)
elif isinstance(field_type, EnumDefinition):
return self._collect_enum(field_type)
raise ValueError(f"Unsupported type: {field_type}") # pragma: no cover
def _collect_type_from_strawberry_type(
self, strawberry_type: Union[type, StrawberryType]
) -> GraphQLType:
type_: GraphQLType
if isinstance(strawberry_type, StrawberryOptional):
return GraphQLOptional(
self._collect_type_from_strawberry_type(strawberry_type.of_type)
)
if isinstance(strawberry_type, StrawberryList):
return GraphQLOptional(
self._collect_type_from_strawberry_type(strawberry_type.of_type)
)
if hasattr(strawberry_type, "_type_definition"):
strawberry_type = strawberry_type._type_definition
if isinstance(strawberry_type, TypeDefinition):
type_ = GraphQLObjectType(
strawberry_type.name,
[],
)
for field in strawberry_type.fields:
field_type = self._collect_type_from_strawberry_type(field.type)
type_.fields.append(GraphQLField(field.name, None, field_type))
self._collect_type(type_)
else:
type_ = self._get_field_type(strawberry_type)
return type_
def _collect_type_from_variable(
self, variable_type: TypeNode, parent_type: Optional[TypeNode] = None
) -> GraphQLType:
type_: Optional[GraphQLType] = None
if isinstance(variable_type, ListTypeNode):
type_ = GraphQLList(
self._collect_type_from_variable(variable_type.type, variable_type)
)
elif isinstance(variable_type, NonNullTypeNode):
return self._collect_type_from_variable(variable_type.type, variable_type)
elif isinstance(variable_type, NamedTypeNode):
strawberry_type = self.schema.get_type_by_name(variable_type.name.value)
assert strawberry_type
type_ = self._collect_type_from_strawberry_type(strawberry_type)
assert type_
if parent_type is not None and isinstance(parent_type, NonNullTypeNode):
return type_
return GraphQLOptional(type_)
def _field_from_selection(
self, selection: FieldNode, parent_type: TypeDefinition
) -> GraphQLField:
field = self.schema.get_field_for_type(selection.name.value, parent_type.name)
assert field
field_type = self._get_field_type(field.type)
return GraphQLField(
field.name, selection.alias.value if selection.alias else None, field_type
)
def _unwrap_type(
self, type_: Union[type, StrawberryType]
) -> Tuple[
Union[type, StrawberryType], Optional[Callable[[GraphQLType], GraphQLType]]
]:
wrapper = None
if isinstance(type_, StrawberryOptional):
type_, wrapper = self._unwrap_type(type_.of_type)
wrapper = (
GraphQLOptional
if wrapper is None
else lambda t: GraphQLOptional(wrapper(t)) # type: ignore[misc]
)
elif isinstance(type_, StrawberryList):
type_, wrapper = self._unwrap_type(type_.of_type)
wrapper = (
GraphQLList
if wrapper is None
else lambda t: GraphQLList(wrapper(t)) # type: ignore[misc]
)
elif isinstance(type_, LazyType):
return self._unwrap_type(type_.resolve_type())
return type_, wrapper
def _field_from_selection_set(
self, selection: FieldNode, class_name: str, parent_type: TypeDefinition
) -> GraphQLField:
assert selection.selection_set is not None
selected_field = self.schema.get_field_for_type(
selection.name.value, parent_type.name
)
assert selected_field
selected_field_type, wrapper = self._unwrap_type(selected_field.type)
name = capitalize_first(to_camel_case(selection.name.value))
class_name = f"{class_name}{(name)}"
field_type: GraphQLType
if isinstance(selected_field_type, StrawberryUnion):
field_type = self._collect_types_with_inline_fragments(
selection, parent_type, class_name
)
else:
parent_type = cast(
TypeDefinition, selected_field_type._type_definition # type: ignore
)
field_type = self._collect_types(selection, parent_type, class_name)
if wrapper:
field_type = wrapper(field_type)
return GraphQLField(
selected_field.name,
selection.alias.value if selection.alias else None,
field_type,
)
def _get_field(
self, selection: FieldNode, class_name: str, parent_type: TypeDefinition
) -> GraphQLField:
if selection.selection_set:
return self._field_from_selection_set(selection, class_name, parent_type)
return self._field_from_selection(selection, parent_type)
def _collect_types_with_inline_fragments(
self,
selection: HasSelectionSet,
parent_type: TypeDefinition,
class_name: str,
) -> Union[GraphQLObjectType, GraphQLUnion]:
sub_types = self._collect_types_using_fragments(
selection, parent_type, class_name
)
if len(sub_types) == 1:
return sub_types[0]
union = GraphQLUnion(class_name, sub_types)
self._collect_type(union)
return union
def _collect_types(
self,
selection: HasSelectionSet,
parent_type: TypeDefinition,
class_name: str,
) -> GraphQLType:
assert selection.selection_set is not None
selection_set = selection.selection_set
if any(
isinstance(selection, InlineFragmentNode)
for selection in selection_set.selections
):
return self._collect_types_with_inline_fragments(
selection, parent_type, class_name
)
current_type = GraphQLObjectType(class_name, [])
for sub_selection in selection_set.selections:
assert isinstance(sub_selection, FieldNode)
field = self._get_field(sub_selection, class_name, parent_type)
current_type.fields.append(field)
self._collect_type(current_type)
return current_type
def generate_code(self) -> CodegenResult:
return self.plugin_manager.generate_code(
types=self.types, operation=self.operation
)
def _collect_types_using_fragments(
self,
selection: HasSelectionSet,
parent_type: TypeDefinition,
class_name: str,
) -> List[GraphQLObjectType]:
assert selection.selection_set
common_fields: List[GraphQLField] = []
fragments: List[InlineFragmentNode] = []
sub_types: List[GraphQLObjectType] = []
for sub_selection in selection.selection_set.selections:
if isinstance(sub_selection, FieldNode):
common_fields.append(
self._get_field(sub_selection, class_name, parent_type)
)
if isinstance(sub_selection, InlineFragmentNode):
fragments.append(sub_selection)
for fragment in fragments:
fragment_class_name = class_name + fragment.type_condition.name.value
current_type = GraphQLObjectType(fragment_class_name, [])
for sub_selection in fragment.selection_set.selections:
# TODO: recurse, use existing method ?
assert isinstance(sub_selection, FieldNode)
current_type.fields = list(common_fields)
parent_type = cast(
TypeDefinition,
self.schema.get_type_by_name(fragment.type_condition.name.value),
)
assert parent_type
current_type.fields.append(
self._get_field(
selection=sub_selection,
class_name=fragment_class_name,
parent_type=parent_type,
)
)
sub_types.append(current_type)
self.types.extend(sub_types)
return sub_types
def _collect_scalar(
self, scalar_definition: ScalarDefinition, python_type: Optional[Type]
) -> GraphQLScalar:
graphql_scalar = GraphQLScalar(scalar_definition.name, python_type=python_type)
self._collect_type(graphql_scalar)
return graphql_scalar
def _collect_enum(self, enum: EnumDefinition) -> GraphQLEnum:
graphql_enum = GraphQLEnum(
enum.name,
[value.name for value in enum.values],
python_type=enum.wrapped_cls,
)
self._collect_type(graphql_enum)
return graphql_enum | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/codegen/query_codegen.py | query_codegen.py |
from __future__ import annotations
import textwrap
from typing import TYPE_CHECKING, List
from strawberry.codegen import CodegenFile, QueryCodegenPlugin
from strawberry.codegen.types import (
GraphQLEnum,
GraphQLList,
GraphQLObjectType,
GraphQLOptional,
GraphQLScalar,
GraphQLUnion,
)
if TYPE_CHECKING:
from strawberry.codegen.types import GraphQLField, GraphQLOperation, GraphQLType
class TypeScriptPlugin(QueryCodegenPlugin):
SCALARS_TO_TS_TYPE = {
"ID": "string",
"Int": "number",
"String": "string",
"Float": "number",
"Boolean": "boolean",
"UUID": "string",
"Date": "string",
"DateTime": "string",
"Time": "string",
"Decimal": "string",
str: "string",
float: "number",
}
def generate_code(
self, types: List[GraphQLType], operation: GraphQLOperation
) -> List[CodegenFile]:
printed_types = list(filter(None, (self._print_type(type) for type in types)))
return [CodegenFile("types.ts", "\n\n".join(printed_types))]
def _get_type_name(self, type_: GraphQLType) -> str:
if isinstance(type_, GraphQLOptional):
return f"{self._get_type_name(type_.of_type)} | undefined"
if isinstance(type_, GraphQLList):
child_type = self._get_type_name(type_.of_type)
if "|" in child_type:
child_type = f"({child_type})"
return f"{child_type}[]"
if isinstance(type_, GraphQLUnion):
return type_.name
if isinstance(type_, (GraphQLObjectType, GraphQLEnum)):
return type_.name
if isinstance(type_, GraphQLScalar) and type_.name in self.SCALARS_TO_TS_TYPE:
return self.SCALARS_TO_TS_TYPE[type_.name]
return type_.name
def _print_field(self, field: GraphQLField) -> str:
name = field.name
if field.alias:
name = f"// alias for {field.name}\n{field.alias}"
return f"{name}: {self._get_type_name(field.type)}"
def _print_enum_value(self, value: str) -> str:
return f'{value} = "{value}",'
def _print_object_type(self, type_: GraphQLObjectType) -> str:
fields = "\n".join(self._print_field(field) for field in type_.fields)
return "\n".join(
[f"type {type_.name} = {{", textwrap.indent(fields, " " * 4), "}"],
)
def _print_enum_type(self, type_: GraphQLEnum) -> str:
values = "\n".join(self._print_enum_value(value) for value in type_.values)
return "\n".join(
[
f"enum {type_.name} {{",
textwrap.indent(values, " " * 4),
"}",
]
)
def _print_scalar_type(self, type_: GraphQLScalar) -> str:
if type_.name in self.SCALARS_TO_TS_TYPE:
return ""
return f"type {type_.name} = {self.SCALARS_TO_TS_TYPE[type_.python_type]}"
def _print_union_type(self, type_: GraphQLUnion) -> str:
return f"type {type_.name} = {' | '.join([t.name for t in type_.types])}"
def _print_type(self, type_: GraphQLType) -> str:
if isinstance(type_, GraphQLUnion):
return self._print_union_type(type_)
if isinstance(type_, GraphQLObjectType):
return self._print_object_type(type_)
if isinstance(type_, GraphQLEnum):
return self._print_enum_type(type_)
if isinstance(type_, GraphQLScalar):
return self._print_scalar_type(type_)
raise ValueError(f"Unknown type: {type}") # pragma: no cover | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/codegen/plugins/typescript.py | typescript.py |
from __future__ import annotations
import textwrap
from collections import defaultdict
from dataclasses import dataclass
from typing import TYPE_CHECKING, Dict, List, Optional, Set
from strawberry.codegen import CodegenFile, QueryCodegenPlugin
from strawberry.codegen.types import (
GraphQLEnum,
GraphQLList,
GraphQLObjectType,
GraphQLOptional,
GraphQLScalar,
GraphQLUnion,
)
if TYPE_CHECKING:
from strawberry.codegen.types import GraphQLField, GraphQLOperation, GraphQLType
@dataclass
class PythonType:
type: str
module: Optional[str] = None
class PythonPlugin(QueryCodegenPlugin):
SCALARS_TO_PYTHON_TYPES = {
"ID": PythonType("str"),
"Int": PythonType("int"),
"String": PythonType("str"),
"Float": PythonType("float"),
"Boolean": PythonType("bool"),
"UUID": PythonType("UUID", "uuid"),
"Date": PythonType("date", "datetime"),
"DateTime": PythonType("datetime", "datetime"),
"Time": PythonType("time", "datetime"),
"Decimal": PythonType("Decimal", "decimal"),
}
def __init__(self) -> None:
self.imports: Dict[str, Set[str]] = defaultdict(set)
def generate_code(
self, types: List[GraphQLType], operation: GraphQLOperation
) -> List[CodegenFile]:
printed_types = list(filter(None, (self._print_type(type) for type in types)))
imports = self._print_imports()
code = imports + "\n\n" + "\n\n".join(printed_types)
return [CodegenFile("types.py", code.strip())]
def _print_imports(self) -> str:
imports = [
f'from {import_} import {", ".join(sorted(types))}'
for import_, types in self.imports.items()
]
return "\n".join(imports)
def _get_type_name(self, type_: GraphQLType) -> str:
if isinstance(type_, GraphQLOptional):
self.imports["typing"].add("Optional")
return f"Optional[{self._get_type_name(type_.of_type)}]"
if isinstance(type_, GraphQLList):
self.imports["typing"].add("List")
return f"List[{self._get_type_name(type_.of_type)}]"
if isinstance(type_, GraphQLUnion):
# TODO: wrong place for this
self.imports["typing"].add("Union")
return type_.name
if isinstance(type_, (GraphQLObjectType, GraphQLEnum)):
if isinstance(type_, GraphQLEnum):
self.imports["enum"].add("Enum")
return type_.name
if (
isinstance(type_, GraphQLScalar)
and type_.name in self.SCALARS_TO_PYTHON_TYPES
):
python_type = self.SCALARS_TO_PYTHON_TYPES[type_.name]
if python_type.module is not None:
self.imports[python_type.module].add(python_type.type)
return python_type.type
self.imports["typing"].add("NewType")
return type_.name
def _print_field(self, field: GraphQLField) -> str:
name = field.name
if field.alias:
name = f"# alias for {field.name}\n{field.alias}"
return f"{name}: {self._get_type_name(field.type)}"
def _print_enum_value(self, value: str) -> str:
return f'{value} = "{value}"'
def _print_object_type(self, type_: GraphQLObjectType) -> str:
fields = "\n".join(self._print_field(field) for field in type_.fields)
return "\n".join(
[
f"class {type_.name}:",
textwrap.indent(fields, " " * 4),
]
)
def _print_enum_type(self, type_: GraphQLEnum) -> str:
values = "\n".join(self._print_enum_value(value) for value in type_.values)
return "\n".join(
[
f"class {type_.name}(Enum):",
textwrap.indent(values, " " * 4),
]
)
def _print_scalar_type(self, type_: GraphQLScalar) -> str:
if type_.name in self.SCALARS_TO_PYTHON_TYPES:
return ""
assert (
type_.python_type is not None
), f"Scalar type must have a python type: {type_.name}"
return f'{type_.name} = NewType("{type_.name}", {type_.python_type.__name__})'
def _print_union_type(self, type_: GraphQLUnion) -> str:
return f"{type_.name} = Union[{', '.join([t.name for t in type_.types])}]"
def _print_type(self, type_: GraphQLType) -> str:
if isinstance(type_, GraphQLUnion):
return self._print_union_type(type_)
if isinstance(type_, GraphQLObjectType):
return self._print_object_type(type_)
if isinstance(type_, GraphQLEnum):
return self._print_enum_type(type_)
if isinstance(type_, GraphQLScalar):
return self._print_scalar_type(type_)
raise ValueError(f"Unknown type: {type}") # pragma: no cover
__all__ = ["PythonPlugin"] | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/codegen/plugins/python.py | python.py |
from __future__ import annotations
import textwrap
from typing import TYPE_CHECKING, List, Optional
from strawberry.codegen import CodegenFile, QueryCodegenPlugin
from strawberry.codegen.types import (
GraphQLBoolValue,
GraphQLEnumValue,
GraphQLFieldSelection,
GraphQLInlineFragment,
GraphQLIntValue,
GraphQLList,
GraphQLListValue,
GraphQLOptional,
GraphQLStringValue,
GraphQLVariableReference,
)
if TYPE_CHECKING:
from strawberry.codegen.types import (
GraphQLArgument,
GraphQLArgumentValue,
GraphQLDirective,
GraphQLOperation,
GraphQLSelection,
GraphQLType,
)
class PrintOperationPlugin(QueryCodegenPlugin):
def generate_code(
self, types: List[GraphQLType], operation: GraphQLOperation
) -> List[CodegenFile]:
code = "\n".join(
[
(
f"{operation.kind} {operation.name}"
f"{self._print_operation_variables(operation)}"
f"{self._print_directives(operation.directives)} {{"
),
self._print_selections(operation.selections),
"}",
]
)
return [CodegenFile("query.graphql", code)]
def _print_operation_variables(self, operation: GraphQLOperation) -> str:
if not operation.variables:
return ""
variables = ", ".join(
f"${v.name}: {self._print_graphql_type(v.type)}"
for v in operation.variables
)
return f"({variables})"
def _print_graphql_type(
self, type: GraphQLType, parent_type: Optional[GraphQLType] = None
) -> str:
if isinstance(type, GraphQLOptional):
return self._print_graphql_type(type.of_type, type)
if isinstance(type, GraphQLList):
type_name = f"[{self._print_graphql_type(type.of_type, type)}]"
else:
type_name = type.name
if parent_type and isinstance(parent_type, GraphQLOptional):
return type_name
return f"{type_name}!"
def _print_argument_value(self, value: GraphQLArgumentValue) -> str:
if isinstance(value, GraphQLStringValue):
return f'"{value.value}"'
if isinstance(value, GraphQLIntValue):
return str(value.value)
if isinstance(value, GraphQLVariableReference):
return f"${value.value}"
if isinstance(value, GraphQLListValue):
return f"[{', '.join(self._print_argument_value(v) for v in value.values)}]"
if isinstance(value, GraphQLEnumValue):
return value.name
if isinstance(value, GraphQLBoolValue):
return str(value.value).lower()
raise ValueError(f"not supported: {type(value)}") # pragma: no cover
def _print_arguments(self, arguments: List[GraphQLArgument]) -> str:
if not arguments:
return ""
return (
"("
+ ", ".join(
[
f"{argument.name}: {self._print_argument_value(argument.value)}"
for argument in arguments
]
)
+ ")"
)
def _print_directives(self, directives: List[GraphQLDirective]) -> str:
if not directives:
return ""
return " " + " ".join(
[
f"@{directive.name}{self._print_arguments(directive.arguments)}"
for directive in directives
]
)
def _print_field_selection(self, selection: GraphQLFieldSelection) -> str:
field = (
f"{selection.field}"
f"{self._print_arguments(selection.arguments)}"
f"{self._print_directives(selection.directives)}"
)
if selection.alias:
field = f"{selection.alias}: {field}"
if selection.selections:
return field + f" {{\n{self._print_selections(selection.selections)}\n}}"
return field
def _print_inline_fragment(self, fragment: GraphQLInlineFragment) -> str:
return "\n".join(
[
f"... on {fragment.type_condition} {{",
self._print_selections(fragment.selections),
"}",
]
)
def _print_selection(self, selection: GraphQLSelection) -> str:
if isinstance(selection, GraphQLFieldSelection):
return self._print_field_selection(selection)
if isinstance(selection, GraphQLInlineFragment):
return self._print_inline_fragment(selection)
raise ValueError(f"Unsupported selection: {selection}") # pragma: no cover
def _print_selections(self, selections: List[GraphQLSelection]) -> str:
selections_text = "\n".join(
[self._print_selection(selection) for selection in selections]
)
return textwrap.indent(selections_text, " " * 2) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/codegen/plugins/print_operation.py | print_operation.py |
from __future__ import annotations
import dataclasses
import warnings
from typing import (
TYPE_CHECKING,
Any,
Callable,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from pydantic import BaseModel
from pydantic.utils import lenient_issubclass
from strawberry.auto import StrawberryAuto
from strawberry.experimental.pydantic.utils import (
get_private_fields,
get_strawberry_type_from_model,
normalize_type,
)
from strawberry.object_type import _process_type, _wrap_dataclass
from strawberry.types.type_resolver import _get_fields
from strawberry.utils.typing import get_list_annotation, is_list
from .exceptions import MissingFieldsListError
if TYPE_CHECKING:
from pydantic.fields import ModelField
def get_type_for_field(field: ModelField) -> Union[Any, Type[None], Type[List]]:
type_ = field.outer_type_
type_ = normalize_type(type_)
return field_type_to_type(type_)
def field_type_to_type(type_) -> Union[Any, List[Any], None]:
error_class: Any = str
strawberry_type: Any = error_class
if is_list(type_):
child_type = get_list_annotation(type_)
if is_list(child_type):
strawberry_type = field_type_to_type(child_type)
elif lenient_issubclass(child_type, BaseModel):
strawberry_type = get_strawberry_type_from_model(child_type)
else:
strawberry_type = List[error_class]
strawberry_type = Optional[strawberry_type]
elif lenient_issubclass(type_, BaseModel):
strawberry_type = get_strawberry_type_from_model(type_)
return Optional[strawberry_type]
return Optional[List[strawberry_type]]
def error_type(
model: Type[BaseModel],
*,
fields: Optional[List[str]] = None,
name: Optional[str] = None,
description: Optional[str] = None,
directives: Optional[Sequence[object]] = (),
all_fields: bool = False,
) -> Callable[..., Type]:
def wrap(cls):
model_fields = model.__fields__
fields_set = set(fields) if fields else set()
if fields:
warnings.warn(
"`fields` is deprecated, use `auto` type annotations instead",
DeprecationWarning,
stacklevel=2,
)
existing_fields = getattr(cls, "__annotations__", {})
fields_set = fields_set.union(
{
name
for name, type_ in existing_fields.items()
if isinstance(type_, StrawberryAuto)
}
)
if all_fields:
if fields_set:
warnings.warn(
"Using all_fields overrides any explicitly defined fields "
"in the model, using both is likely a bug",
stacklevel=2,
)
fields_set = set(model_fields.keys())
if not fields_set:
raise MissingFieldsListError(cls)
all_model_fields: List[Tuple[str, Any, dataclasses.Field]] = [
(
name,
get_type_for_field(field),
dataclasses.field(default=None), # type: ignore[arg-type]
)
for name, field in model_fields.items()
if name in fields_set
]
wrapped = _wrap_dataclass(cls)
extra_fields = cast(List[dataclasses.Field], _get_fields(wrapped))
private_fields = get_private_fields(wrapped)
all_model_fields.extend(
(
field.name,
field.type,
field,
)
for field in extra_fields + private_fields
if not isinstance(field.type, StrawberryAuto)
)
cls = dataclasses.make_dataclass(
cls.__name__,
all_model_fields,
bases=cls.__bases__,
)
_process_type(
cls,
name=name,
is_input=False,
is_interface=False,
description=description,
directives=directives,
)
model._strawberry_type = cls # type: ignore[attr-defined]
cls._pydantic_type = model
return cls
return wrap | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/experimental/pydantic/error_type.py | error_type.py |
from __future__ import annotations
import dataclasses
import sys
import warnings
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Set,
Type,
cast,
)
from strawberry.annotation import StrawberryAnnotation
from strawberry.auto import StrawberryAuto
from strawberry.experimental.pydantic.conversion import (
convert_pydantic_model_to_strawberry_class,
convert_strawberry_class_to_pydantic_model,
)
from strawberry.experimental.pydantic.exceptions import MissingFieldsListError
from strawberry.experimental.pydantic.fields import replace_types_recursively
from strawberry.experimental.pydantic.utils import (
DataclassCreationFields,
ensure_all_auto_fields_in_pydantic,
get_default_factory_for_field,
get_private_fields,
)
from strawberry.field import StrawberryField
from strawberry.object_type import _process_type, _wrap_dataclass
from strawberry.types.type_resolver import _get_fields
from strawberry.utils.dataclasses import add_custom_init_fn
if TYPE_CHECKING:
from graphql import GraphQLResolveInfo
from pydantic.fields import ModelField
def get_type_for_field(field: ModelField, is_input: bool): # noqa: ANN201
outer_type = field.outer_type_
replaced_type = replace_types_recursively(outer_type, is_input)
default_defined: bool = (
field.default_factory is not None or field.default is not None
)
should_add_optional: bool = not (field.required or default_defined)
if should_add_optional:
return Optional[replaced_type]
else:
return replaced_type
def _build_dataclass_creation_fields(
field: ModelField,
is_input: bool,
existing_fields: Dict[str, StrawberryField],
auto_fields_set: Set[str],
use_pydantic_alias: bool,
) -> DataclassCreationFields:
field_type = (
get_type_for_field(field, is_input)
if field.name in auto_fields_set
else existing_fields[field.name].type
)
if (
field.name in existing_fields
and existing_fields[field.name].base_resolver is not None
):
# if the user has defined a resolver for this field, always use it
strawberry_field = existing_fields[field.name]
else:
# otherwise we build an appropriate strawberry field that resolves it
existing_field = existing_fields.get(field.name)
graphql_name = None
if existing_field and existing_field.graphql_name:
graphql_name = existing_field.graphql_name
elif field.has_alias and use_pydantic_alias:
graphql_name = field.alias
strawberry_field = StrawberryField(
python_name=field.name,
graphql_name=graphql_name,
# always unset because we use default_factory instead
default=dataclasses.MISSING,
default_factory=get_default_factory_for_field(field),
type_annotation=StrawberryAnnotation.from_annotation(field_type),
description=field.field_info.description,
deprecation_reason=(
existing_field.deprecation_reason if existing_field else None
),
permission_classes=(
existing_field.permission_classes if existing_field else []
),
directives=existing_field.directives if existing_field else (),
metadata=existing_field.metadata if existing_field else {},
)
return DataclassCreationFields(
name=field.name,
field_type=field_type,
field=strawberry_field,
)
if TYPE_CHECKING:
from strawberry.experimental.pydantic.conversion_types import (
PydanticModel,
StrawberryTypeFromPydantic,
)
def type(
model: Type[PydanticModel],
*,
fields: Optional[List[str]] = None,
name: Optional[str] = None,
is_input: bool = False,
is_interface: bool = False,
description: Optional[str] = None,
directives: Optional[Sequence[object]] = (),
all_fields: bool = False,
use_pydantic_alias: bool = True,
) -> Callable[..., Type[StrawberryTypeFromPydantic[PydanticModel]]]:
def wrap(cls: Any) -> Type[StrawberryTypeFromPydantic[PydanticModel]]:
model_fields = model.__fields__
original_fields_set = set(fields) if fields else set()
if fields:
warnings.warn(
"`fields` is deprecated, use `auto` type annotations instead",
DeprecationWarning,
stacklevel=2,
)
existing_fields = getattr(cls, "__annotations__", {})
# these are the fields that matched a field name in the pydantic model
# and should copy their alias from the pydantic model
fields_set = original_fields_set.union(
{name for name, _ in existing_fields.items() if name in model_fields}
)
# these are the fields that were marked with strawberry.auto and
# should copy their type from the pydantic model
auto_fields_set = original_fields_set.union(
{
name
for name, type_ in existing_fields.items()
if isinstance(type_, StrawberryAuto)
}
)
if all_fields:
if fields_set:
warnings.warn(
"Using all_fields overrides any explicitly defined fields "
"in the model, using both is likely a bug",
stacklevel=2,
)
fields_set = set(model_fields.keys())
auto_fields_set = set(model_fields.keys())
if not fields_set:
raise MissingFieldsListError(cls)
ensure_all_auto_fields_in_pydantic(
model=model, auto_fields=auto_fields_set, cls_name=cls.__name__
)
wrapped = _wrap_dataclass(cls)
extra_strawberry_fields = _get_fields(wrapped)
extra_fields = cast(List[dataclasses.Field], extra_strawberry_fields)
private_fields = get_private_fields(wrapped)
extra_fields_dict = {field.name: field for field in extra_strawberry_fields}
all_model_fields: List[DataclassCreationFields] = [
_build_dataclass_creation_fields(
field, is_input, extra_fields_dict, auto_fields_set, use_pydantic_alias
)
for field_name, field in model_fields.items()
if field_name in fields_set
]
all_model_fields = [
DataclassCreationFields(
name=field.name,
field_type=field.type,
field=field,
)
for field in extra_fields + private_fields
if field.name not in fields_set
] + all_model_fields
# Implicitly define `is_type_of` to support interfaces/unions that use
# pydantic objects (not the corresponding strawberry type)
@classmethod # type: ignore
def is_type_of(cls: Type, obj: Any, _info: GraphQLResolveInfo) -> bool:
return isinstance(obj, (cls, model))
namespace = {"is_type_of": is_type_of}
# We need to tell the difference between a from_pydantic method that is
# inherited from a base class and one that is defined by the user in the
# decorated class. We want to override the method only if it is
# inherited. To tell the difference, we compare the class name to the
# fully qualified name of the method, which will end in <class>.from_pydantic
has_custom_from_pydantic = hasattr(
cls, "from_pydantic"
) and cls.from_pydantic.__qualname__.endswith(f"{cls.__name__}.from_pydantic")
has_custom_to_pydantic = hasattr(
cls, "to_pydantic"
) and cls.to_pydantic.__qualname__.endswith(f"{cls.__name__}.to_pydantic")
if has_custom_from_pydantic:
namespace["from_pydantic"] = cls.from_pydantic
if has_custom_to_pydantic:
namespace["to_pydantic"] = cls.to_pydantic
if hasattr(cls, "resolve_reference"):
namespace["resolve_reference"] = cls.resolve_reference
kwargs: Dict[str, object] = {}
# Python 3.10.1 introduces the kw_only param to `make_dataclass`.
# If we're on an older version then generate our own custom init function
# Note: Python 3.10.0 added the `kw_only` param to dataclasses, it was
# just missed from the `make_dataclass` function:
# https://github.com/python/cpython/issues/89961
if sys.version_info >= (3, 10, 1):
kwargs["kw_only"] = dataclasses.MISSING
else:
kwargs["init"] = False
cls = dataclasses.make_dataclass(
cls.__name__,
[field.to_tuple() for field in all_model_fields],
bases=cls.__bases__,
namespace=namespace,
**kwargs, # type: ignore
)
if sys.version_info < (3, 10, 1):
add_custom_init_fn(cls)
_process_type(
cls,
name=name,
is_input=is_input,
is_interface=is_interface,
description=description,
directives=directives,
)
if is_input:
model._strawberry_input_type = cls # type: ignore
else:
model._strawberry_type = cls # type: ignore
cls._pydantic_type = model
def from_pydantic_default(
instance: PydanticModel, extra: Optional[Dict[str, Any]] = None
) -> StrawberryTypeFromPydantic[PydanticModel]:
return convert_pydantic_model_to_strawberry_class(
cls=cls, model_instance=instance, extra=extra
)
def to_pydantic_default(self, **kwargs) -> PydanticModel:
instance_kwargs = {
f.name: convert_strawberry_class_to_pydantic_model(
getattr(self, f.name)
)
for f in dataclasses.fields(self)
}
instance_kwargs.update(kwargs)
return model(**instance_kwargs)
if not has_custom_from_pydantic:
cls.from_pydantic = staticmethod(from_pydantic_default)
if not has_custom_to_pydantic:
cls.to_pydantic = to_pydantic_default
return cls
return wrap
def input(
model: Type[PydanticModel],
*,
fields: Optional[List[str]] = None,
name: Optional[str] = None,
is_interface: bool = False,
description: Optional[str] = None,
directives: Optional[Sequence[object]] = (),
all_fields: bool = False,
use_pydantic_alias: bool = True,
) -> Callable[..., Type[StrawberryTypeFromPydantic[PydanticModel]]]:
"""Convenience decorator for creating an input type from a Pydantic model.
Equal to partial(type, is_input=True)
See https://github.com/strawberry-graphql/strawberry/issues/1830
"""
return type(
model=model,
fields=fields,
name=name,
is_input=True,
is_interface=is_interface,
description=description,
directives=directives,
all_fields=all_fields,
use_pydantic_alias=use_pydantic_alias,
)
def interface(
model: Type[PydanticModel],
*,
fields: Optional[List[str]] = None,
name: Optional[str] = None,
is_input: bool = False,
description: Optional[str] = None,
directives: Optional[Sequence[object]] = (),
all_fields: bool = False,
use_pydantic_alias: bool = True,
) -> Callable[..., Type[StrawberryTypeFromPydantic[PydanticModel]]]:
"""Convenience decorator for creating an interface type from a Pydantic model.
Equal to partial(type, is_interface=True)
See https://github.com/strawberry-graphql/strawberry/issues/1830
"""
return type(
model=model,
fields=fields,
name=name,
is_input=is_input,
is_interface=True,
description=description,
directives=directives,
all_fields=all_fields,
use_pydantic_alias=use_pydantic_alias,
) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/experimental/pydantic/object_type.py | object_type.py |
from __future__ import annotations
import dataclasses
from typing import (
TYPE_CHECKING,
Any,
List,
NamedTuple,
NoReturn,
Set,
Tuple,
Type,
Union,
cast,
)
from pydantic.utils import smart_deepcopy
from strawberry.experimental.pydantic.exceptions import (
AutoFieldsNotInBaseModelError,
BothDefaultAndDefaultFactoryDefinedError,
UnregisteredTypeException,
)
from strawberry.private import is_private
from strawberry.unset import UNSET
from strawberry.utils.typing import (
get_list_annotation,
get_optional_annotation,
is_list,
is_optional,
)
if TYPE_CHECKING:
from pydantic import BaseModel
from pydantic.fields import ModelField
from pydantic.typing import NoArgAnyCallable
def normalize_type(type_) -> Any:
if is_list(type_):
return List[normalize_type(get_list_annotation(type_))] # type: ignore
if is_optional(type_):
return get_optional_annotation(type_)
return type_
def get_strawberry_type_from_model(type_: Any) -> Any:
if hasattr(type_, "_strawberry_type"):
return type_._strawberry_type
else:
raise UnregisteredTypeException(type_)
def get_private_fields(cls: Type) -> List[dataclasses.Field]:
private_fields: List[dataclasses.Field] = []
for field in dataclasses.fields(cls):
if is_private(field.type):
private_fields.append(field)
return private_fields
class DataclassCreationFields(NamedTuple):
"""Fields required for the fields parameter of make_dataclass"""
name: str
field_type: Type
field: dataclasses.Field
def to_tuple(self) -> Tuple[str, Type, dataclasses.Field]:
# fields parameter wants (name, type, Field)
return self.name, self.field_type, self.field
def get_default_factory_for_field(
field: ModelField,
) -> Union[NoArgAnyCallable, dataclasses._MISSING_TYPE]:
"""
Gets the default factory for a pydantic field.
Handles mutable defaults when making the dataclass by
using pydantic's smart_deepcopy
Returns optionally a NoArgAnyCallable representing a default_factory parameter
"""
# replace dataclasses.MISSING with our own UNSET to make comparisons easier
default_factory = (
field.default_factory
if field.default_factory is not dataclasses.MISSING
else UNSET
)
default = field.default if field.default is not dataclasses.MISSING else UNSET
has_factory = default_factory is not None and default_factory is not UNSET
has_default = default is not None and default is not UNSET
# defining both default and default_factory is not supported
if has_factory and has_default:
default_factory = cast("NoArgAnyCallable", default_factory)
raise BothDefaultAndDefaultFactoryDefinedError(
default=default, default_factory=default_factory
)
# if we have a default_factory, we should return it
if has_factory:
default_factory = cast("NoArgAnyCallable", default_factory)
return default_factory
# if we have a default, we should return it
if has_default:
return lambda: smart_deepcopy(default)
# if we don't have default or default_factory, but the field is not required,
# we should return a factory that returns None
if not field.required:
return lambda: None
return dataclasses.MISSING
def ensure_all_auto_fields_in_pydantic(
model: Type[BaseModel], auto_fields: Set[str], cls_name: str
) -> Union[NoReturn, None]:
# Raise error if user defined a strawberry.auto field not present in the model
non_existing_fields = list(auto_fields - model.__fields__.keys())
if non_existing_fields:
raise AutoFieldsNotInBaseModelError(
fields=non_existing_fields, cls_name=cls_name, model=model
)
else:
return None | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/experimental/pydantic/utils.py | utils.py |
from __future__ import annotations
import copy
import dataclasses
from typing import TYPE_CHECKING, Any, Union, cast
from strawberry.enum import EnumDefinition
from strawberry.type import StrawberryList, StrawberryOptional
from strawberry.union import StrawberryUnion
if TYPE_CHECKING:
from strawberry.field import StrawberryField
from strawberry.type import StrawberryType
def _convert_from_pydantic_to_strawberry_type(
type_: Union[StrawberryType, type], data_from_model=None, extra=None
):
data = data_from_model if data_from_model is not None else extra
if isinstance(type_, StrawberryOptional):
if data is None:
return data
return _convert_from_pydantic_to_strawberry_type(
type_.of_type, data_from_model=data, extra=extra
)
if isinstance(type_, StrawberryUnion):
for option_type in type_.types:
if hasattr(option_type, "_pydantic_type"):
source_type = option_type._pydantic_type
else:
source_type = cast(type, option_type)
if isinstance(data, source_type):
return _convert_from_pydantic_to_strawberry_type(
option_type, data_from_model=data, extra=extra
)
if isinstance(type_, EnumDefinition):
return data
if isinstance(type_, StrawberryList):
items = []
for index, item in enumerate(data):
items.append(
_convert_from_pydantic_to_strawberry_type(
type_.of_type,
data_from_model=item,
extra=extra[index] if extra else None,
)
)
return items
if hasattr(type_, "_type_definition"):
# in the case of an interface, the concrete type may be more specific
# than the type in the field definition
# don't check _strawberry_input_type because inputs can't be interfaces
if hasattr(type(data), "_strawberry_type"):
type_ = type(data)._strawberry_type
if hasattr(type_, "from_pydantic"):
return type_.from_pydantic(data_from_model, extra)
return convert_pydantic_model_to_strawberry_class(
type_, model_instance=data_from_model, extra=extra
)
return data
def convert_pydantic_model_to_strawberry_class(
cls, *, model_instance=None, extra=None
) -> Any:
extra = extra or {}
kwargs = {}
for field_ in cls._type_definition.fields:
field = cast("StrawberryField", field_)
python_name = field.python_name
data_from_extra = extra.get(python_name, None)
data_from_model = (
getattr(model_instance, python_name, None) if model_instance else None
)
# only convert and add fields to kwargs if they are present in the `__init__`
# method of the class
if field.init:
kwargs[python_name] = _convert_from_pydantic_to_strawberry_type(
field.type, data_from_model, extra=data_from_extra
)
return cls(**kwargs)
def convert_strawberry_class_to_pydantic_model(obj) -> Any:
if hasattr(obj, "to_pydantic"):
return obj.to_pydantic()
elif dataclasses.is_dataclass(obj):
result = []
for f in dataclasses.fields(obj):
value = convert_strawberry_class_to_pydantic_model(getattr(obj, f.name))
result.append((f.name, value))
return dict(result)
elif isinstance(obj, (list, tuple)):
# Assume we can create an object of this type by passing in a
# generator (which is not true for namedtuples, not supported).
return type(obj)(convert_strawberry_class_to_pydantic_model(v) for v in obj)
elif isinstance(obj, dict):
return type(obj)(
(
convert_strawberry_class_to_pydantic_model(k),
convert_strawberry_class_to_pydantic_model(v),
)
for k, v in obj.items()
)
else:
return copy.deepcopy(obj) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/experimental/pydantic/conversion.py | conversion.py |
import builtins
from decimal import Decimal
from typing import Any, List, Optional, Type
from uuid import UUID
import pydantic
from pydantic import BaseModel
from pydantic.typing import get_args, get_origin, is_new_type, new_type_supertype
from pydantic.utils import lenient_issubclass
from strawberry.experimental.pydantic.exceptions import (
UnregisteredTypeException,
UnsupportedTypeError,
)
from strawberry.types.types import TypeDefinition
try:
from typing import GenericAlias as TypingGenericAlias # type: ignore
except ImportError:
# python < 3.9 does not have GenericAlias (list[int], tuple[str, ...] and so on)
TypingGenericAlias = ()
ATTR_TO_TYPE_MAP = {
"NoneStr": Optional[str],
"NoneBytes": Optional[bytes],
"StrBytes": None,
"NoneStrBytes": None,
"StrictStr": str,
"ConstrainedBytes": bytes,
"conbytes": bytes,
"ConstrainedStr": str,
"constr": str,
"EmailStr": str,
"PyObject": None,
"ConstrainedInt": int,
"conint": int,
"PositiveInt": int,
"NegativeInt": int,
"ConstrainedFloat": float,
"confloat": float,
"PositiveFloat": float,
"NegativeFloat": float,
"ConstrainedDecimal": Decimal,
"condecimal": Decimal,
"UUID1": UUID,
"UUID3": UUID,
"UUID4": UUID,
"UUID5": UUID,
"FilePath": None,
"DirectoryPath": None,
"Json": None,
"JsonWrapper": None,
"SecretStr": str,
"SecretBytes": bytes,
"StrictBool": bool,
"StrictInt": int,
"StrictFloat": float,
"PaymentCardNumber": None,
"ByteSize": None,
"AnyUrl": str,
"AnyHttpUrl": str,
"HttpUrl": str,
"PostgresDsn": str,
"RedisDsn": str,
}
FIELDS_MAP = {
getattr(pydantic, field_name): type
for field_name, type in ATTR_TO_TYPE_MAP.items()
if hasattr(pydantic, field_name)
}
def get_basic_type(type_) -> Type[Any]:
if lenient_issubclass(type_, pydantic.ConstrainedInt):
return int
if lenient_issubclass(type_, pydantic.ConstrainedFloat):
return float
if lenient_issubclass(type_, pydantic.ConstrainedStr):
return str
if lenient_issubclass(type_, pydantic.ConstrainedList):
return List[get_basic_type(type_.item_type)] # type: ignore
if type_ in FIELDS_MAP:
type_ = FIELDS_MAP.get(type_)
if type_ is None:
raise UnsupportedTypeError()
if is_new_type(type_):
return new_type_supertype(type_)
return type_
def replace_pydantic_types(type_: Any, is_input: bool) -> Any:
if lenient_issubclass(type_, BaseModel):
attr = "_strawberry_input_type" if is_input else "_strawberry_type"
if hasattr(type_, attr):
return getattr(type_, attr)
else:
raise UnregisteredTypeException(type_)
return type_
def replace_types_recursively(type_: Any, is_input: bool) -> Any:
"""Runs the conversions recursively into the arguments of generic types if any"""
basic_type = get_basic_type(type_)
replaced_type = replace_pydantic_types(basic_type, is_input)
origin = get_origin(type_)
if not origin or not hasattr(type_, "__args__"):
return replaced_type
converted = tuple(
replace_types_recursively(t, is_input=is_input) for t in get_args(replaced_type)
)
if isinstance(replaced_type, TypingGenericAlias):
return TypingGenericAlias(origin, converted)
replaced_type = replaced_type.copy_with(converted)
if isinstance(replaced_type, TypeDefinition):
# TODO: Not sure if this is necessary. No coverage in tests
# TODO: Unnecessary with StrawberryObject
replaced_type = builtins.type(
replaced_type.name,
(),
{"_type_definition": replaced_type},
)
return replaced_type | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/experimental/pydantic/fields.py | fields.py |
from __future__ import annotations
import contextlib
import inspect
import warnings
from asyncio import iscoroutinefunction
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Callable,
Iterator,
List,
NamedTuple,
Optional,
Type,
Union,
)
from strawberry.extensions import SchemaExtension
from strawberry.utils.await_maybe import AwaitableOrValue, await_maybe
if TYPE_CHECKING:
from types import TracebackType
from strawberry.extensions.base_extension import Hook
class WrappedHook(NamedTuple):
extension: SchemaExtension
initialized_hook: Union[AsyncIterator[None], Iterator[None]]
is_async: bool
class ExtensionContextManagerBase:
__slots__ = ("hooks", "deprecation_message", "default_hook")
def __init_subclass__(cls):
cls.DEPRECATION_MESSAGE = (
f"Event driven styled extensions for "
f"{cls.LEGACY_ENTER} or {cls.LEGACY_EXIT}"
f" are deprecated, use {cls.HOOK_NAME} instead"
)
HOOK_NAME: str
DEPRECATION_MESSAGE: str
LEGACY_ENTER: str
LEGACY_EXIT: str
def __init__(self, extensions: List[SchemaExtension]):
self.hooks: List[WrappedHook] = []
self.default_hook: Hook = getattr(SchemaExtension, self.HOOK_NAME)
for extension in extensions:
hook = self.get_hook(extension)
if hook:
self.hooks.append(hook)
def get_hook(self, extension: SchemaExtension) -> Optional[WrappedHook]:
on_start = getattr(extension, self.LEGACY_ENTER, None)
on_end = getattr(extension, self.LEGACY_EXIT, None)
is_legacy = on_start is not None or on_end is not None
hook_fn: Optional[Hook] = getattr(type(extension), self.HOOK_NAME)
hook_fn = hook_fn if hook_fn is not self.default_hook else None
if is_legacy and hook_fn is not None:
raise ValueError(
f"{extension} defines both legacy and new style extension hooks for "
"{self.HOOK_NAME}"
)
elif is_legacy:
warnings.warn(self.DEPRECATION_MESSAGE, DeprecationWarning, stacklevel=3)
return self.from_legacy(extension, on_start, on_end)
if hook_fn:
if inspect.isgeneratorfunction(hook_fn):
return WrappedHook(extension, hook_fn(extension), False)
if inspect.isasyncgenfunction(hook_fn):
return WrappedHook(extension, hook_fn(extension), True)
if callable(hook_fn):
return self.from_callable(extension, hook_fn)
raise ValueError(
f"Hook {self.HOOK_NAME} on {extension} "
f"must be callable, received {hook_fn!r}"
)
return None # Current extension does not define a hook for this lifecycle stage
@staticmethod
def from_legacy(
extension: SchemaExtension,
on_start: Optional[Callable[[], None]] = None,
on_end: Optional[Callable[[], None]] = None,
) -> WrappedHook:
if iscoroutinefunction(on_start) or iscoroutinefunction(on_end):
async def iterator():
if on_start:
await await_maybe(on_start())
yield
if on_end:
await await_maybe(on_end())
hook = iterator()
return WrappedHook(extension, hook, True)
else:
def iterator():
if on_start:
on_start()
yield
if on_end:
on_end()
hook = iterator()
return WrappedHook(extension, hook, False)
@staticmethod
def from_callable(
extension: SchemaExtension,
func: Callable[[SchemaExtension], AwaitableOrValue[Any]],
) -> WrappedHook:
if iscoroutinefunction(func):
async def async_iterator():
await func(extension)
yield
hook = async_iterator()
return WrappedHook(extension, hook, True)
else:
def iterator():
func(extension)
yield
hook = iterator()
return WrappedHook(extension, hook, False)
def run_hooks_sync(self, is_exit: bool = False) -> None:
"""Run extensions synchronously."""
ctx = (
contextlib.suppress(StopIteration, StopAsyncIteration)
if is_exit
else contextlib.nullcontext()
)
for hook in self.hooks:
with ctx:
if hook.is_async:
raise RuntimeError(
f"SchemaExtension hook {hook.extension}.{self.HOOK_NAME} "
"failed to complete synchronously."
)
else:
hook.initialized_hook.__next__() # type: ignore[union-attr]
async def run_hooks_async(self, is_exit: bool = False) -> None:
"""Run extensions asynchronously with support for sync lifecycle hooks.
The ``is_exit`` flag is required as a `StopIteration` cannot be raised from
within a coroutine.
"""
ctx = (
contextlib.suppress(StopIteration, StopAsyncIteration)
if is_exit
else contextlib.nullcontext()
)
for hook in self.hooks:
with ctx:
if hook.is_async:
await hook.initialized_hook.__anext__() # type: ignore[union-attr]
else:
hook.initialized_hook.__next__() # type: ignore[union-attr]
def __enter__(self):
self.run_hooks_sync()
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
):
self.run_hooks_sync(is_exit=True)
async def __aenter__(self):
await self.run_hooks_async()
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
):
await self.run_hooks_async(is_exit=True)
class OperationContextManager(ExtensionContextManagerBase):
HOOK_NAME = SchemaExtension.on_operation.__name__
LEGACY_ENTER = "on_request_start"
LEGACY_EXIT = "on_request_end"
class ValidationContextManager(ExtensionContextManagerBase):
HOOK_NAME = SchemaExtension.on_validate.__name__
LEGACY_ENTER = "on_validation_start"
LEGACY_EXIT = "on_validation_end"
class ParsingContextManager(ExtensionContextManagerBase):
HOOK_NAME = SchemaExtension.on_parse.__name__
LEGACY_ENTER = "on_parsing_start"
LEGACY_EXIT = "on_parsing_end"
class ExecutingContextManager(ExtensionContextManagerBase):
HOOK_NAME = SchemaExtension.on_execute.__name__
LEGACY_ENTER = "on_executing_start"
LEGACY_EXIT = "on_executing_end" | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/extensions/context.py | context.py |
from __future__ import annotations
import inspect
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
from graphql import MiddlewareManager
from strawberry.extensions.context import (
ExecutingContextManager,
OperationContextManager,
ParsingContextManager,
ValidationContextManager,
)
from strawberry.utils.await_maybe import await_maybe
from . import SchemaExtension
if TYPE_CHECKING:
from strawberry.types import ExecutionContext
class SchemaExtensionsRunner:
extensions: List[SchemaExtension]
def __init__(
self,
execution_context: ExecutionContext,
extensions: Optional[
List[Union[Type[SchemaExtension], SchemaExtension]]
] = None,
):
self.execution_context = execution_context
if not extensions:
extensions = []
init_extensions: List[SchemaExtension] = []
for extension in extensions:
# If the extension has already been instantiated then set the
# `execution_context` attribute
if isinstance(extension, SchemaExtension):
extension.execution_context = execution_context
init_extensions.append(extension)
else:
init_extensions.append(extension(execution_context=execution_context))
self.extensions = init_extensions
def operation(self) -> OperationContextManager:
return OperationContextManager(self.extensions)
def validation(self) -> ValidationContextManager:
return ValidationContextManager(self.extensions)
def parsing(self) -> ParsingContextManager:
return ParsingContextManager(self.extensions)
def executing(self) -> ExecutingContextManager:
return ExecutingContextManager(self.extensions)
def get_extensions_results_sync(self) -> Dict[str, Any]:
data: Dict[str, Any] = {}
for extension in self.extensions:
if inspect.iscoroutinefunction(extension.get_results):
msg = "Cannot use async extension hook during sync execution"
raise RuntimeError(msg)
data.update(extension.get_results()) # type: ignore
return data
async def get_extensions_results(self) -> Dict[str, Any]:
data: Dict[str, Any] = {}
for extension in self.extensions:
results = await await_maybe(extension.get_results())
data.update(results)
return data
def as_middleware_manager(self, *additional_middlewares) -> MiddlewareManager:
middlewares = tuple(self.extensions) + additional_middlewares
return MiddlewareManager(*middlewares) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/extensions/runner.py | runner.py |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, Tuple
from strawberry.extensions import SchemaExtension
from strawberry.types import Info
from strawberry.types.nodes import convert_arguments
from strawberry.utils.await_maybe import await_maybe
if TYPE_CHECKING:
from graphql import DirectiveNode, GraphQLResolveInfo
from strawberry.directive import StrawberryDirective
from strawberry.field import StrawberryField
from strawberry.schema.schema import Schema
from strawberry.utils.await_maybe import AwaitableOrValue
SPECIFIED_DIRECTIVES = {"include", "skip"}
class DirectivesExtension(SchemaExtension):
async def resolve(
self, _next, root, info: GraphQLResolveInfo, *args, **kwargs
) -> AwaitableOrValue[Any]:
value = await await_maybe(_next(root, info, *args, **kwargs))
for directive in info.field_nodes[0].directives:
if directive.name.value in SPECIFIED_DIRECTIVES:
continue
strawberry_directive, arguments = process_directive(directive, value, info)
value = await await_maybe(strawberry_directive.resolver(**arguments))
return value
class DirectivesExtensionSync(SchemaExtension):
def resolve(
self, _next, root, info: GraphQLResolveInfo, *args, **kwargs
) -> AwaitableOrValue[Any]:
value = _next(root, info, *args, **kwargs)
for directive in info.field_nodes[0].directives:
if directive.name.value in SPECIFIED_DIRECTIVES:
continue
strawberry_directive, arguments = process_directive(directive, value, info)
value = strawberry_directive.resolver(**arguments)
return value
def process_directive(
directive: DirectiveNode,
value: Any,
info: GraphQLResolveInfo,
) -> Tuple[StrawberryDirective, Dict[str, Any]]:
"""Get a `StrawberryDirective` from ``directive` and prepare its arguments."""
directive_name = directive.name.value
schema: Schema = info.schema._strawberry_schema # type: ignore
strawberry_directive = schema.get_directive_by_name(directive_name)
assert strawberry_directive is not None, f"Directive {directive_name} not found"
arguments = convert_arguments(info=info, nodes=directive.arguments)
resolver = strawberry_directive.resolver
info_parameter = resolver.info_parameter
value_parameter = resolver.value_parameter
if info_parameter:
field: StrawberryField = schema.get_field_for_type( # type: ignore
field_name=info.field_name,
type_name=info.parent_type.name,
)
arguments[info_parameter.name] = Info(_raw_info=info, _field=field)
if value_parameter:
arguments[value_parameter.name] = value
return strawberry_directive, arguments | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/extensions/directives.py | directives.py |
from __future__ import annotations
import itertools
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Union
from strawberry.utils.cached_property import cached_property
if TYPE_CHECKING:
from strawberry.field import StrawberryField
from strawberry.types import Info
SyncExtensionResolver = Callable[..., Any]
AsyncExtensionResolver = Callable[..., Awaitable[Any]]
class FieldExtension:
def apply(self, field: StrawberryField) -> None: # pragma: no cover
pass
def resolve(
self, next_: SyncExtensionResolver, source: Any, info: Info, **kwargs
) -> Any: # pragma: no cover
raise NotImplementedError(
"Sync Resolve is not supported for this Field Extension"
)
async def resolve_async(
self, next_: AsyncExtensionResolver, source: Any, info: Info, **kwargs
) -> Any: # pragma: no cover
raise NotImplementedError(
"Async Resolve is not supported for this Field Extension"
)
@cached_property
def supports_sync(self) -> bool:
return type(self).resolve is not FieldExtension.resolve
@cached_property
def supports_async(self) -> bool:
return type(self).resolve_async is not FieldExtension.resolve_async
class SyncToAsyncExtension(FieldExtension):
"""Helper class for mixing async extensions with sync resolvers.
Applied automatically"""
async def resolve_async(
self, next_: AsyncExtensionResolver, source: Any, info: Info, **kwargs
) -> Any:
return next_(source, info, **kwargs)
def _get_sync_resolvers(
extensions: list[FieldExtension],
) -> list[SyncExtensionResolver]:
return [extension.resolve for extension in extensions]
def _get_async_resolvers(
extensions: list[FieldExtension],
) -> list[AsyncExtensionResolver]:
return [extension.resolve_async for extension in extensions]
def build_field_extension_resolvers(
field: StrawberryField,
) -> list[Union[SyncExtensionResolver, AsyncExtensionResolver]]:
"""
Verifies that all of the field extensions for a given field support
sync or async depending on the field resolver.
Inserts a SyncToAsyncExtension to be able to
use Async extensions on sync resolvers
Throws a TypeError otherwise.
Returns True if resolving should be async, False on sync resolving
based on the resolver and extensions
"""
if not field.extensions:
return [] # pragma: no cover
non_async_extensions = [
extension for extension in field.extensions if not extension.supports_async
]
non_async_extension_names = ",".join(
[extension.__class__.__name__ for extension in non_async_extensions]
)
if field.is_async:
if len(non_async_extensions) > 0:
raise TypeError(
f"Cannot add sync-only extension(s) {non_async_extension_names} "
f"to the async resolver of Field {field.name}. "
f"Please add a resolve_async method to the extension(s)."
)
return _get_async_resolvers(field.extensions)
else:
# Try to wrap all sync resolvers in async so that we can use async extensions
# on sync fields. This is not possible the other way around since
# the result of an async resolver would have to be awaited before calling
# the sync extension, making it impossible for the extension to modify
# any arguments.
non_sync_extensions = [
extension for extension in field.extensions if not extension.supports_sync
]
if len(non_sync_extensions) == 0:
# Resolve everything sync
return _get_sync_resolvers(field.extensions)
# We have async-only extensions and need to wrap the resolver
# That means we can't have sync-only extensions after the first async one
# Check if we have a chain of sync-compatible
# extensions before the async extensions
# -> S-S-S-S-A-A-A-A
found_sync_extensions = 0
# All sync only extensions must be found before the first async-only one
found_sync_only_extensions = 0
for extension in field.extensions:
# ...A, abort
if extension in non_sync_extensions:
break
# ...S
if extension in non_async_extensions:
found_sync_only_extensions += 1
found_sync_extensions += 1
# Length of the chain equals length of non async extensions
# All sync extensions run first
if len(non_async_extensions) == found_sync_only_extensions:
# Prepend sync to async extension to field extensions
return list(
itertools.chain(
_get_sync_resolvers(field.extensions[:found_sync_extensions]),
[SyncToAsyncExtension().resolve_async],
_get_async_resolvers(field.extensions[found_sync_extensions:]),
)
)
# Some sync extensions follow the first async-only extension. Error case
async_extension_names = ",".join(
[extension.__class__.__name__ for extension in non_sync_extensions]
)
raise TypeError(
f"Cannot mix async-only extension(s) {async_extension_names} "
f"with sync-only extension(s) {non_async_extension_names} "
f"on Field {field.name}. "
f"If possible try to change the execution order so that all sync-only "
f"extensions are executed first."
) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/extensions/field_extension.py | field_extension.py |
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Type, Union
from graphql import GraphQLError
from graphql.language import (
FieldNode,
FragmentDefinitionNode,
FragmentSpreadNode,
InlineFragmentNode,
OperationDefinitionNode,
)
from graphql.validation import ValidationRule
from strawberry.extensions import AddValidationRules
from strawberry.extensions.utils import is_introspection_key
if TYPE_CHECKING:
from graphql.language import DefinitionNode, Node
from graphql.validation import ValidationContext
IgnoreType = Union[Callable[[str], bool], re.Pattern, str]
class QueryDepthLimiter(AddValidationRules):
"""
Add a validator to limit the query depth of GraphQL operations
Example:
>>> import strawberry
>>> from strawberry.extensions import QueryDepthLimiter
>>>
>>> schema = strawberry.Schema(
... Query,
... extensions=[
... QueryDepthLimiter(max_depth=4)
... ]
... )
Arguments:
`max_depth: int`
The maximum allowed depth for any operation in a GraphQL document.
`ignore: Optional[List[IgnoreType]]`
Stops recursive depth checking based on a field name.
Either a string or regexp to match the name, or a function that returns
a boolean.
`callback: Optional[Callable[[Dict[str, int]], None]`
Called each time validation runs. Receives an Object which is a
map of the depths for each operation.
"""
def __init__(
self,
max_depth: int,
ignore: Optional[List[IgnoreType]] = None,
callback: Optional[Callable[[Dict[str, int]], None]] = None,
):
validator = create_validator(max_depth, ignore, callback)
super().__init__([validator])
def create_validator(
max_depth: int,
ignore: Optional[List[IgnoreType]] = None,
callback: Optional[Callable[[Dict[str, int]], None]] = None,
) -> Type[ValidationRule]:
class DepthLimitValidator(ValidationRule):
def __init__(self, validation_context: ValidationContext):
document = validation_context.document
definitions = document.definitions
fragments = get_fragments(definitions)
queries = get_queries_and_mutations(definitions)
query_depths = {}
for name in queries:
query_depths[name] = determine_depth(
node=queries[name],
fragments=fragments,
depth_so_far=0,
max_depth=max_depth,
context=validation_context,
operation_name=name,
ignore=ignore,
)
if callable(callback):
callback(query_depths)
super().__init__(validation_context)
return DepthLimitValidator
def get_fragments(
definitions: Iterable[DefinitionNode],
) -> Dict[str, FragmentDefinitionNode]:
fragments = {}
for definition in definitions:
if isinstance(definition, FragmentDefinitionNode):
fragments[definition.name.value] = definition
return fragments
# This will actually get both queries and mutations.
# We can basically treat those the same
def get_queries_and_mutations(
definitions: Iterable[DefinitionNode],
) -> Dict[str, OperationDefinitionNode]:
operations = {}
for definition in definitions:
if isinstance(definition, OperationDefinitionNode):
operation = definition.name.value if definition.name else "anonymous"
operations[operation] = definition
return operations
def determine_depth(
node: Node,
fragments: Dict[str, FragmentDefinitionNode],
depth_so_far: int,
max_depth: int,
context: ValidationContext,
operation_name: str,
ignore: Optional[List[IgnoreType]] = None,
) -> int:
if depth_so_far > max_depth:
context.report_error(
GraphQLError(
f"'{operation_name}' exceeds maximum operation depth of {max_depth}",
[node],
)
)
return depth_so_far
if isinstance(node, FieldNode):
# by default, ignore the introspection fields which begin
# with double underscores
should_ignore = is_introspection_key(node.name.value) or is_ignored(
node, ignore
)
if should_ignore or not node.selection_set:
return 0
return 1 + max(
map(
lambda selection: determine_depth(
node=selection,
fragments=fragments,
depth_so_far=depth_so_far + 1,
max_depth=max_depth,
context=context,
operation_name=operation_name,
ignore=ignore,
),
node.selection_set.selections,
)
)
elif isinstance(node, FragmentSpreadNode):
return determine_depth(
node=fragments[node.name.value],
fragments=fragments,
depth_so_far=depth_so_far,
max_depth=max_depth,
context=context,
operation_name=operation_name,
ignore=ignore,
)
elif isinstance(
node, (InlineFragmentNode, FragmentDefinitionNode, OperationDefinitionNode)
):
return max(
map(
lambda selection: determine_depth(
node=selection,
fragments=fragments,
depth_so_far=depth_so_far,
max_depth=max_depth,
context=context,
operation_name=operation_name,
ignore=ignore,
),
node.selection_set.selections,
)
)
else:
raise TypeError(f"Depth crawler cannot handle: {node.kind}") # pragma: no cover
def is_ignored(node: FieldNode, ignore: Optional[List[IgnoreType]] = None) -> bool:
if ignore is None:
return False
for rule in ignore:
field_name = node.name.value
if isinstance(rule, str):
if field_name == rule:
return True
elif isinstance(rule, re.Pattern):
if rule.match(field_name):
return True
elif callable(rule):
if rule(field_name):
return True
else:
raise TypeError(f"Invalid ignore option: {rule}")
return False | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/extensions/query_depth_limiter.py | query_depth_limiter.py |
from __future__ import annotations
import hashlib
from inspect import isawaitable
from typing import TYPE_CHECKING, Any, Generator, Iterator, Optional
from ddtrace import tracer
from strawberry.extensions import SchemaExtension
from strawberry.extensions.tracing.utils import should_skip_tracing
from strawberry.utils.cached_property import cached_property
if TYPE_CHECKING:
from strawberry.types.execution import ExecutionContext
class DatadogTracingExtension(SchemaExtension):
def __init__(
self,
*,
execution_context: Optional[ExecutionContext] = None,
):
if execution_context:
self.execution_context = execution_context
@cached_property
def _resource_name(self):
assert self.execution_context.query
query_hash = self.hash_query(self.execution_context.query)
if self.execution_context.operation_name:
return f"{self.execution_context.operation_name}:{query_hash}"
return query_hash
def hash_query(self, query: str) -> str:
return hashlib.md5(query.encode("utf-8")).hexdigest()
def on_operation(self) -> Iterator[None]:
self._operation_name = self.execution_context.operation_name
span_name = (
f"{self._operation_name}" if self._operation_name else "Anonymous Query"
)
self.request_span = tracer.trace(
span_name,
resource=self._resource_name,
span_type="graphql",
service="strawberry",
)
self.request_span.set_tag("graphql.operation_name", self._operation_name)
operation_type = "query"
assert self.execution_context.query
if self.execution_context.query.strip().startswith("mutation"):
operation_type = "mutation"
if self.execution_context.query.strip().startswith("subscription"):
operation_type = "subscription"
self.request_span.set_tag("graphql.operation_type", operation_type)
yield
self.request_span.finish()
def on_validate(self) -> Generator[None, None, None]:
self.validation_span = tracer.trace("Validation", span_type="graphql")
yield
self.validation_span.finish()
def on_parse(self) -> Generator[None, None, None]:
self.parsing_span = tracer.trace("Parsing", span_type="graphql")
yield
self.parsing_span.finish()
async def resolve(self, _next, root, info, *args, **kwargs) -> Any:
if should_skip_tracing(_next, info):
result = _next(root, info, *args, **kwargs)
if isawaitable(result): # pragma: no cover
result = await result
return result
field_path = f"{info.parent_type}.{info.field_name}"
with tracer.trace(f"Resolving: {field_path}", span_type="graphql") as span:
span.set_tag("graphql.field_name", info.field_name)
span.set_tag("graphql.parent_type", info.parent_type.name)
span.set_tag("graphql.field_path", field_path)
span.set_tag("graphql.path", ".".join(map(str, info.path.as_list())))
result = _next(root, info, *args, **kwargs)
if isawaitable(result):
result = await result
return result
class DatadogTracingExtensionSync(DatadogTracingExtension):
def resolve(self, _next, root, info, *args, **kwargs) -> Any:
if should_skip_tracing(_next, info):
return _next(root, info, *args, **kwargs)
field_path = f"{info.parent_type}.{info.field_name}"
with tracer.trace(f"Resolving: {field_path}", span_type="graphql") as span:
span.set_tag("graphql.field_name", info.field_name)
span.set_tag("graphql.parent_type", info.parent_type.name)
span.set_tag("graphql.field_path", field_path)
span.set_tag("graphql.path", ".".join(map(str, info.path.as_list())))
return _next(root, info, *args, **kwargs) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/extensions/tracing/datadog.py | datadog.py |
from __future__ import annotations
import enum
from copy import deepcopy
from inspect import isawaitable
from typing import TYPE_CHECKING, Any, Callable, Dict, Generator, Optional
from opentelemetry import trace
from opentelemetry.trace import SpanKind
from strawberry.extensions import SchemaExtension
from strawberry.extensions.utils import get_path_from_info
from .utils import should_skip_tracing
if TYPE_CHECKING:
from graphql import GraphQLResolveInfo
from opentelemetry.trace import Span, Tracer
from strawberry.types.execution import ExecutionContext
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
ArgFilter = Callable[[Dict[str, Any], "GraphQLResolveInfo"], Dict[str, Any]]
class RequestStage(enum.Enum):
REQUEST = enum.auto()
PARSING = enum.auto()
VALIDATION = enum.auto()
class OpenTelemetryExtension(SchemaExtension):
_arg_filter: Optional[ArgFilter]
_span_holder: Dict[RequestStage, Span] = dict()
_tracer: Tracer
def __init__(
self,
*,
execution_context: Optional[ExecutionContext] = None,
arg_filter: Optional[ArgFilter] = None,
):
self._arg_filter = arg_filter
self._tracer = trace.get_tracer("strawberry")
if execution_context:
self.execution_context = execution_context
def on_operation(self) -> Generator[None, None, None]:
self._operation_name = self.execution_context.operation_name
span_name = (
f"GraphQL Query: {self._operation_name}"
if self._operation_name
else "GraphQL Query"
)
self._span_holder[RequestStage.REQUEST] = self._tracer.start_span(
span_name, kind=SpanKind.SERVER
)
self._span_holder[RequestStage.REQUEST].set_attribute("component", "graphql")
if self.execution_context.query:
self._span_holder[RequestStage.REQUEST].set_attribute(
"query", self.execution_context.query
)
yield
# If the client doesn't provide an operation name then GraphQL will
# execute the first operation in the query string. This might be a named
# operation but we don't know until the parsing stage has finished. If
# that's the case we want to update the span name so that we have a more
# useful name in our trace.
if not self._operation_name and self.execution_context.operation_name:
span_name = f"GraphQL Query: {self.execution_context.operation_name}"
self._span_holder[RequestStage.REQUEST].update_name(span_name)
self._span_holder[RequestStage.REQUEST].end()
def on_validate(self) -> Generator[None, None, None]:
ctx = trace.set_span_in_context(self._span_holder[RequestStage.REQUEST])
self._span_holder[RequestStage.VALIDATION] = self._tracer.start_span(
"GraphQL Validation",
context=ctx,
)
yield
self._span_holder[RequestStage.VALIDATION].end()
def on_parse(self) -> Generator[None, None, None]:
ctx = trace.set_span_in_context(self._span_holder[RequestStage.REQUEST])
self._span_holder[RequestStage.PARSING] = self._tracer.start_span(
"GraphQL Parsing", context=ctx
)
yield
self._span_holder[RequestStage.PARSING].end()
def filter_resolver_args(
self, args: Dict[str, Any], info: GraphQLResolveInfo
) -> Dict[str, Any]:
if not self._arg_filter:
return args
return self._arg_filter(deepcopy(args), info)
def add_tags(
self, span: Span, info: GraphQLResolveInfo, kwargs: Dict[str, Any]
) -> None:
graphql_path = ".".join(map(str, get_path_from_info(info)))
span.set_attribute("component", "graphql")
span.set_attribute("graphql.parentType", info.parent_type.name)
span.set_attribute("graphql.path", graphql_path)
if kwargs:
filtered_kwargs = self.filter_resolver_args(kwargs, info)
for kwarg, value in filtered_kwargs.items():
span.set_attribute(f"graphql.param.{kwarg}", value)
async def resolve(self, _next, root, info, *args, **kwargs) -> Any:
if should_skip_tracing(_next, info):
result = _next(root, info, *args, **kwargs)
if isawaitable(result): # pragma: no cover
result = await result
return result
with self._tracer.start_as_current_span(
f"GraphQL Resolving: {info.field_name}",
context=trace.set_span_in_context(self._span_holder[RequestStage.REQUEST]),
) as span:
self.add_tags(span, info, kwargs)
result = _next(root, info, *args, **kwargs)
if isawaitable(result):
result = await result
return result
class OpenTelemetryExtensionSync(OpenTelemetryExtension):
def resolve(self, _next, root, info, *args, **kwargs) -> Any:
if should_skip_tracing(_next, info):
result = _next(root, info, *args, **kwargs)
return result
with self._tracer.start_as_current_span(
f"GraphQL Resolving: {info.field_name}",
context=trace.set_span_in_context(self._span_holder[RequestStage.REQUEST]),
) as span:
self.add_tags(span, info, kwargs)
result = _next(root, info, *args, **kwargs)
return result | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/extensions/tracing/opentelemetry.py | opentelemetry.py |
from __future__ import annotations
import dataclasses
import time
from datetime import datetime
from inspect import isawaitable
from typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional
from strawberry.extensions import SchemaExtension
from strawberry.extensions.utils import get_path_from_info
from .utils import should_skip_tracing
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
if TYPE_CHECKING:
from strawberry.types.execution import ExecutionContext
@dataclasses.dataclass
class ApolloStepStats:
start_offset: int
duration: int
def to_json(self) -> Dict[str, Any]:
return {"startOffset": self.start_offset, "duration": self.duration}
@dataclasses.dataclass
class ApolloResolverStats:
path: List[str]
parent_type: Any
field_name: str
return_type: Any
start_offset: int
duration: Optional[int] = None
def to_json(self) -> Dict[str, Any]:
return {
"path": self.path,
"field_name": self.field_name,
"parentType": str(self.parent_type),
"returnType": str(self.return_type),
"startOffset": self.start_offset,
"duration": self.duration,
}
@dataclasses.dataclass
class ApolloExecutionStats:
resolvers: List[ApolloResolverStats]
def to_json(self) -> Dict[str, Any]:
return {"resolvers": [resolver.to_json() for resolver in self.resolvers]}
@dataclasses.dataclass
class ApolloTracingStats:
start_time: datetime
end_time: datetime
duration: int
execution: ApolloExecutionStats
validation: ApolloStepStats
parsing: ApolloStepStats
version: int = 1
def to_json(self) -> Dict[str, Any]:
return {
"version": self.version,
"startTime": self.start_time.strftime(DATETIME_FORMAT),
"endTime": self.end_time.strftime(DATETIME_FORMAT),
"duration": self.duration,
"execution": self.execution.to_json(),
"validation": self.validation.to_json(),
"parsing": self.parsing.to_json(),
}
class ApolloTracingExtension(SchemaExtension):
def __init__(self, execution_context: ExecutionContext):
self._resolver_stats: List[ApolloResolverStats] = []
self.execution_context = execution_context
def on_operation(self) -> Generator[None, None, None]:
self.start_timestamp = self.now()
self.start_time = datetime.utcnow()
yield
self.end_timestamp = self.now()
self.end_time = datetime.utcnow()
def on_parse(self) -> Generator[None, None, None]:
self._start_parsing = self.now()
yield
self._end_parsing = self.now()
def on_validate(self) -> Generator[None, None, None]:
self._start_validation = self.now()
yield
self._end_validation = self.now()
def now(self) -> int:
return time.perf_counter_ns()
@property
def stats(self) -> ApolloTracingStats:
return ApolloTracingStats(
start_time=self.start_time,
end_time=self.end_time,
duration=self.end_timestamp - self.start_timestamp,
execution=ApolloExecutionStats(self._resolver_stats),
validation=ApolloStepStats(
start_offset=self._start_validation - self.start_timestamp,
duration=self._end_validation - self._start_validation,
),
parsing=ApolloStepStats(
start_offset=self._start_parsing - self.start_timestamp,
duration=self._end_parsing - self._start_parsing,
),
)
def get_results(self) -> Dict[str, Dict[str, Any]]:
return {"tracing": self.stats.to_json()}
async def resolve(self, _next, root, info, *args, **kwargs) -> Any:
if should_skip_tracing(_next, info):
result = _next(root, info, *args, **kwargs)
if isawaitable(result):
result = await result # pragma: no cover
return result
start_timestamp = self.now()
resolver_stats = ApolloResolverStats(
path=get_path_from_info(info),
field_name=info.field_name,
parent_type=info.parent_type,
return_type=info.return_type,
start_offset=start_timestamp - self.start_timestamp,
)
try:
result = _next(root, info, *args, **kwargs)
if isawaitable(result):
result = await result
return result
finally:
end_timestamp = self.now()
resolver_stats.duration = end_timestamp - start_timestamp
self._resolver_stats.append(resolver_stats)
class ApolloTracingExtensionSync(ApolloTracingExtension):
def resolve(self, _next, root, info, *args, **kwargs) -> Any:
if should_skip_tracing(_next, info):
return _next(root, info, *args, **kwargs)
start_timestamp = self.now()
resolver_stats = ApolloResolverStats(
path=get_path_from_info(info),
field_name=info.field_name,
parent_type=info.parent_type,
return_type=info.return_type,
start_offset=start_timestamp - self.start_timestamp,
)
try:
return _next(root, info, *args, **kwargs)
finally:
end_timestamp = self.now()
resolver_stats.duration = end_timestamp - start_timestamp
self._resolver_stats.append(resolver_stats) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/extensions/tracing/apollo.py | apollo.py |
from __future__ import annotations
import json
import warnings
from typing import TYPE_CHECKING, Any, Dict, Optional, Type, Union
from sanic.exceptions import NotFound, SanicException, ServerError
from sanic.response import HTTPResponse, html
from sanic.views import HTTPMethodView
from strawberry.exceptions import MissingQueryError
from strawberry.file_uploads.utils import replace_placeholders_with_files
from strawberry.http import (
parse_query_params,
parse_request_data,
process_result,
)
from strawberry.http.temporal_response import TemporalResponse
from strawberry.sanic.graphiql import should_render_graphiql
from strawberry.sanic.utils import convert_request_to_files_dict
from strawberry.schema.exceptions import InvalidOperationTypeError
from strawberry.types.graphql import OperationType
from strawberry.utils.graphiql import get_graphiql_html
if TYPE_CHECKING:
from typing_extensions import Literal
from sanic.request import Request
from strawberry.http import GraphQLHTTPResponse, GraphQLRequestData
from strawberry.schema import BaseSchema
from strawberry.types import ExecutionResult
from .context import StrawberrySanicContext
class GraphQLView(HTTPMethodView):
"""
Class based view to handle GraphQL HTTP Requests
Args:
schema: strawberry.Schema
graphiql: bool, default is True
allow_queries_via_get: bool, default is True
Returns:
None
Example:
app.add_route(
GraphQLView.as_view(schema=schema, graphiql=True),
"/graphql"
)
"""
def __init__(
self,
schema: BaseSchema,
graphiql: bool = True,
allow_queries_via_get: bool = True,
json_encoder: Optional[Type[json.JSONEncoder]] = None,
json_dumps_params: Optional[Dict[str, Any]] = None,
):
self.schema = schema
self.graphiql = graphiql
self.allow_queries_via_get = allow_queries_via_get
self.json_encoder = json_encoder
self.json_dumps_params = json_dumps_params
if self.json_encoder is not None:
warnings.warn(
"json_encoder is deprecated, override encode_json instead",
DeprecationWarning,
stacklevel=2,
)
if self.json_dumps_params is not None:
warnings.warn(
"json_dumps_params is deprecated, override encode_json instead",
DeprecationWarning,
stacklevel=2,
)
self.json_encoder = json.JSONEncoder
def get_root_value(self) -> Any:
return None
async def get_context(
self, request: Request, response: TemporalResponse
) -> StrawberrySanicContext:
return {"request": request, "response": response}
def render_template(self, template: str) -> HTTPResponse:
return html(template)
async def process_result(
self, request: Request, result: ExecutionResult
) -> GraphQLHTTPResponse:
return process_result(result)
async def get(self, request: Request) -> HTTPResponse:
if request.args:
# Sanic request.args uses urllib.parse.parse_qs
# returns a dictionary where the keys are the unique variable names
# and the values are a list of values for each variable name
# Enforcing using the first value
query_data = {
variable_name: value[0] for variable_name, value in request.args.items()
}
try:
data = parse_query_params(query_data)
except json.JSONDecodeError:
raise ServerError(
"Unable to parse request body as JSON", status_code=400
)
request_data = parse_request_data(data)
return await self.execute_request(
request=request, request_data=request_data, method="GET"
)
elif should_render_graphiql(self.graphiql, request):
template = get_graphiql_html(False)
return self.render_template(template=template)
raise NotFound()
async def get_response(
self, response_data: GraphQLHTTPResponse, context: StrawberrySanicContext
) -> HTTPResponse:
status_code = 200
if "response" in context and context["response"]:
status_code = context["response"].status_code
data = self.encode_json(response_data)
return HTTPResponse(
data,
status=status_code,
content_type="application/json",
)
def encode_json(self, response_data: GraphQLHTTPResponse) -> str:
if self.json_dumps_params:
assert self.json_encoder
return json.dumps(
response_data, cls=self.json_encoder, **self.json_dumps_params
)
if self.json_encoder:
return json.dumps(response_data, cls=self.json_encoder)
return json.dumps(response_data)
async def post(self, request: Request) -> HTTPResponse:
request_data = self.get_request_data(request)
return await self.execute_request(
request=request, request_data=request_data, method="POST"
)
async def execute_request(
self,
request: Request,
request_data: GraphQLRequestData,
method: Union[Literal["GET"], Literal["POST"]],
) -> HTTPResponse:
context = await self.get_context(request, TemporalResponse())
root_value = self.get_root_value()
allowed_operation_types = OperationType.from_http(method)
if not self.allow_queries_via_get and method == "GET":
allowed_operation_types = allowed_operation_types - {OperationType.QUERY}
try:
result = await self.schema.execute(
query=request_data.query,
variable_values=request_data.variables,
context_value=context,
root_value=root_value,
operation_name=request_data.operation_name,
allowed_operation_types=allowed_operation_types,
)
except InvalidOperationTypeError as e:
raise ServerError(
e.as_http_error_reason(method=method), status_code=400
) from e
except MissingQueryError:
raise ServerError("No GraphQL query found in the request", status_code=400)
response_data = await self.process_result(request, result)
return await self.get_response(response_data, context)
def get_request_data(self, request: Request) -> GraphQLRequestData:
try:
data = self.parse_request(request)
except json.JSONDecodeError:
raise ServerError("Unable to parse request body as JSON", status_code=400)
return parse_request_data(data)
def parse_request(self, request: Request) -> Dict[str, Any]:
content_type = request.content_type or ""
if "application/json" in content_type:
return json.loads(request.body)
elif content_type.startswith("multipart/form-data"):
files = convert_request_to_files_dict(request)
operations = json.loads(request.form.get("operations", "{}"))
files_map = json.loads(request.form.get("map", "{}"))
try:
return replace_placeholders_with_files(operations, files_map, files)
except KeyError:
raise SanicException(
status_code=400, message="File(s) missing in form data"
)
raise ServerError("Unsupported Media Type", status_code=415) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/sanic/views.py | views.py |
from __future__ import annotations
import json
import warnings
from typing import TYPE_CHECKING, Dict, Mapping, Optional
from chalice.app import BadRequestError, Response
from strawberry.exceptions import MissingQueryError
from strawberry.http import (
parse_query_params,
parse_request_data,
process_result,
)
from strawberry.http.temporal_response import TemporalResponse
from strawberry.schema.exceptions import InvalidOperationTypeError
from strawberry.types.graphql import OperationType
from strawberry.utils.graphiql import get_graphiql_html
if TYPE_CHECKING:
from chalice.app import Request
from strawberry.http import GraphQLHTTPResponse
from strawberry.schema import BaseSchema
from strawberry.types import ExecutionResult
class GraphQLView:
def __init__(
self,
schema: BaseSchema,
graphiql: bool = True,
allow_queries_via_get: bool = True,
**kwargs,
):
if "render_graphiql" in kwargs:
self.graphiql = kwargs.pop("render_graphiql")
warnings.warn(
"The `render_graphiql` argument is deprecated. "
"Use `graphiql` instead.",
DeprecationWarning,
stacklevel=2,
)
else:
self.graphiql = graphiql
self.allow_queries_via_get = allow_queries_via_get
self._schema = schema
def get_root_value(self, request: Request) -> Optional[object]:
return None
@staticmethod
def render_graphiql() -> str:
"""
Returns a string containing the html for the graphiql webpage. It also caches
the result using lru cache.
This saves loading from disk each time it is invoked.
Returns:
The GraphiQL html page as a string
"""
return get_graphiql_html(subscription_enabled=False)
@staticmethod
def should_render_graphiql(graphiql: bool, request: Request) -> bool:
"""
Do the headers indicate that the invoker has requested html?
Args:
headers: A dictionary containing the headers in the request
Returns:
Whether html has been requested True for yes, False for no
"""
if not graphiql:
return False
return any(
supported_header in request.headers.get("accept", "")
for supported_header in {"text/html", "*/*"}
)
@staticmethod
def error_response(
message: str,
error_code: str,
http_status_code: int,
headers: Optional[Dict[str, str]] = None,
) -> Response:
"""
A wrapper for error responses
Returns:
An errors response
"""
body = {"Code": error_code, "Message": message}
return Response(body=body, status_code=http_status_code, headers=headers)
def get_context(
self, request: Request, response: TemporalResponse
) -> Mapping[str, object]:
return {"request": request, "response": response}
def process_result(
self, request: Request, result: ExecutionResult
) -> GraphQLHTTPResponse:
return process_result(result)
def execute_request(self, request: Request) -> Response:
"""
Parse the request process it with strawberry and return a response
Args:
request: The chalice request this contains the headers and body
Returns:
A chalice response
"""
method = request.method
if method not in {"POST", "GET"}:
return self.error_response(
error_code="MethodNotAllowedError",
message="Unsupported method, must be of request type POST or GET",
http_status_code=405,
)
content_type = request.headers.get("content-type", "")
if "application/json" in content_type:
try:
data = request.json_body
if not (isinstance(data, dict)):
return self.error_response(
error_code="BadRequestError",
message=(
"Provide a valid graphql query "
"in the body of your request"
),
http_status_code=400,
)
except BadRequestError:
return self.error_response(
error_code="BadRequestError",
message="Unable to parse request body as JSON",
http_status_code=400,
)
elif method == "GET" and request.query_params:
try:
data = parse_query_params(request.query_params) # type: ignore
except json.JSONDecodeError:
return self.error_response(
error_code="BadRequestError",
message="Unable to parse request body as JSON",
http_status_code=400,
)
elif method == "GET" and self.should_render_graphiql(self.graphiql, request):
return Response(
body=self.render_graphiql(),
headers={"content-type": "text/html"},
status_code=200,
)
else:
return self.error_response(
error_code="NotFoundError",
message="Not found",
http_status_code=404,
)
request_data = parse_request_data(data)
allowed_operation_types = OperationType.from_http(method)
if not self.allow_queries_via_get and method == "GET":
allowed_operation_types = allowed_operation_types - {OperationType.QUERY}
context = self.get_context(request, response=TemporalResponse())
try:
result: ExecutionResult = self._schema.execute_sync(
request_data.query,
variable_values=request_data.variables,
context_value=context,
operation_name=request_data.operation_name,
root_value=self.get_root_value(request),
allowed_operation_types=allowed_operation_types,
)
except InvalidOperationTypeError as e:
return self.error_response(
error_code="BadRequestError",
message=e.as_http_error_reason(method),
http_status_code=400,
)
except MissingQueryError:
return self.error_response(
error_code="BadRequestError",
message="No GraphQL query found in the request",
http_status_code=400,
)
http_result: GraphQLHTTPResponse = self.process_result(request, result)
status_code = 200
if "response" in context:
# TODO: we might want to use typed dict for context
status_code = context["response"].status_code # type: ignore[attr-defined]
return Response(body=self.encode_json(http_result), status_code=status_code)
def encode_json(self, response_data: GraphQLHTTPResponse) -> str:
return json.dumps(response_data) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/chalice/views.py | views.py |
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Dict
from flask import Response, render_template_string, request
if TYPE_CHECKING:
from flask.typing import ResponseReturnValue
from strawberry.http import GraphQLHTTPResponse
from strawberry.schema.base import BaseSchema
from strawberry.types import ExecutionResult
from flask.views import View
from strawberry.exceptions import MissingQueryError
from strawberry.file_uploads.utils import replace_placeholders_with_files
from strawberry.flask.graphiql import should_render_graphiql
from strawberry.http import (
parse_query_params,
parse_request_data,
process_result,
)
from strawberry.schema.exceptions import InvalidOperationTypeError
from strawberry.types.graphql import OperationType
from strawberry.utils.graphiql import get_graphiql_html
class BaseGraphQLView(View):
methods = ["GET", "POST"]
def __init__(
self,
schema: BaseSchema,
graphiql: bool = True,
allow_queries_via_get: bool = True,
):
self.schema = schema
self.graphiql = graphiql
self.allow_queries_via_get = allow_queries_via_get
def render_template(self, template: str) -> str:
return render_template_string(template)
def encode_json(self, response_data: GraphQLHTTPResponse) -> str:
return json.dumps(response_data)
class GraphQLView(BaseGraphQLView):
def get_root_value(self) -> object:
return None
def get_context(self, response: Response) -> Dict[str, object]:
return {"request": request, "response": response}
def process_result(self, result: ExecutionResult) -> GraphQLHTTPResponse:
return process_result(result)
def dispatch_request(self) -> ResponseReturnValue:
method = request.method
content_type = request.content_type or ""
if request.method not in {"POST", "GET"}:
return Response(
"Unsupported method, must be of request type POST or GET", 405
)
if "application/json" in content_type:
try:
data = json.loads(request.data)
except json.JSONDecodeError:
return Response(
status=400, response="Unable to parse request body as JSON"
)
elif content_type.startswith("multipart/form-data"):
try:
operations = json.loads(request.form.get("operations", "{}"))
files_map = json.loads(request.form.get("map", "{}"))
except json.JSONDecodeError:
return Response(
status=400, response="Unable to parse request body as JSON"
)
try:
data = replace_placeholders_with_files(
operations, files_map, request.files
)
except KeyError:
return Response(status=400, response="File(s) missing in form data")
elif method == "GET" and request.args:
try:
data = parse_query_params(request.args.to_dict())
except json.JSONDecodeError:
return Response(
status=400, response="Unable to parse request body as JSON"
)
elif method == "GET" and should_render_graphiql(self.graphiql, request):
template = get_graphiql_html(False)
return self.render_template(template=template)
elif method == "GET":
return Response(status=404)
else:
return Response("Unsupported Media Type", 415)
request_data = parse_request_data(data)
response = Response(status=200, content_type="application/json")
context = self.get_context(response)
allowed_operation_types = OperationType.from_http(method)
if not self.allow_queries_via_get and method == "GET":
allowed_operation_types = allowed_operation_types - {OperationType.QUERY}
try:
result = self.schema.execute_sync(
request_data.query,
variable_values=request_data.variables,
context_value=context,
operation_name=request_data.operation_name,
root_value=self.get_root_value(),
allowed_operation_types=allowed_operation_types,
)
except InvalidOperationTypeError as e:
return Response(e.as_http_error_reason(method), 400)
except MissingQueryError:
return Response("No GraphQL query found in the request", 400)
response_data = self.process_result(result)
response.set_data(self.encode_json(response_data))
return response
class AsyncGraphQLView(BaseGraphQLView):
methods = ["GET", "POST"]
async def get_root_value(self) -> object:
return None
async def get_context(self, response: Response) -> Dict[str, object]:
return {"request": request, "response": response}
async def process_result(self, result: ExecutionResult) -> GraphQLHTTPResponse:
return process_result(result)
async def dispatch_request(self) -> ResponseReturnValue: # type: ignore[override]
method = request.method
content_type = request.content_type or ""
if request.method not in {"POST", "GET"}:
return Response(
"Unsupported method, must be of request type POST or GET", 405
)
if "application/json" in content_type:
try:
data = json.loads(request.data)
except json.JSONDecodeError:
return Response(
status=400, response="Unable to parse request body as JSON"
)
elif content_type.startswith("multipart/form-data"):
try:
operations = json.loads(request.form.get("operations", "{}"))
files_map = json.loads(request.form.get("map", "{}"))
except json.JSONDecodeError:
return Response(
status=400, response="Unable to parse request body as JSON"
)
try:
data = replace_placeholders_with_files(
operations, files_map, request.files
)
except KeyError:
return Response(status=400, response="File(s) missing in form data")
elif method == "GET" and request.args:
try:
data = parse_query_params(request.args.to_dict())
except json.JSONDecodeError:
return Response(
status=400, response="Unable to parse request body as JSON"
)
elif method == "GET" and should_render_graphiql(self.graphiql, request):
template = get_graphiql_html(False)
return self.render_template(template=template)
elif method == "GET":
return Response(status=404)
else:
return Response("Unsupported Media Type", 415)
request_data = parse_request_data(data)
response = Response(status=200, content_type="application/json")
context = await self.get_context(response)
allowed_operation_types = OperationType.from_http(method)
if not self.allow_queries_via_get and method == "GET":
allowed_operation_types = allowed_operation_types - {OperationType.QUERY}
root_value = await self.get_root_value()
try:
result = await self.schema.execute(
request_data.query,
variable_values=request_data.variables,
context_value=context,
operation_name=request_data.operation_name,
root_value=root_value,
allowed_operation_types=allowed_operation_types,
)
except InvalidOperationTypeError as e:
return Response(e.as_http_error_reason(method), 400)
except MissingQueryError:
return Response("No GraphQL query found in the request", 400)
response_data = await self.process_result(result)
response.set_data(self.encode_json(response_data))
return response | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/flask/views.py | views.py |
from __future__ import annotations
import datetime
from typing import TYPE_CHECKING, Optional, Sequence, Union
from strawberry.subscriptions import GRAPHQL_TRANSPORT_WS_PROTOCOL, GRAPHQL_WS_PROTOCOL
from .base import ChannelsWSConsumer
from .graphql_transport_ws_handler import GraphQLTransportWSHandler
from .graphql_ws_handler import GraphQLWSHandler
if TYPE_CHECKING:
from strawberry.schema import BaseSchema
class GraphQLWSConsumer(ChannelsWSConsumer):
"""A channels websocket consumer for GraphQL
This handles the connections, then hands off to the appropriate
handler based on the subprotocol.
To use this, place it in your ProtocolTypeRouter for your channels project, e.g:
```
from strawberry.channels import GraphQLHttpRouter
from channels.routing import ProtocolTypeRouter
from django.core.asgi import get_asgi_application
application = ProtocolTypeRouter({
"http": URLRouter([
re_path("^graphql", GraphQLHTTPRouter(schema=schema)),
re_path("^", get_asgi_application()),
]),
"websocket": URLRouter([
re_path("^ws/graphql", GraphQLWebSocketRouter(schema=schema)),
]),
})
```
"""
graphql_transport_ws_handler_class = GraphQLTransportWSHandler
graphql_ws_handler_class = GraphQLWSHandler
_handler: Union[GraphQLWSHandler, GraphQLTransportWSHandler]
def __init__(
self,
schema: BaseSchema,
keep_alive: bool = False,
keep_alive_interval: float = 1,
debug: bool = False,
subscription_protocols=(GRAPHQL_TRANSPORT_WS_PROTOCOL, GRAPHQL_WS_PROTOCOL),
connection_init_wait_timeout: Optional[datetime.timedelta] = None,
):
if connection_init_wait_timeout is None:
connection_init_wait_timeout = datetime.timedelta(minutes=1)
self.connection_init_wait_timeout = connection_init_wait_timeout
self.schema = schema
self.keep_alive = keep_alive
self.keep_alive_interval = keep_alive_interval
self.debug = debug
self.protocols = subscription_protocols
super().__init__()
def pick_preferred_protocol(
self, accepted_subprotocols: Sequence[str]
) -> Optional[str]:
intersection = set(accepted_subprotocols) & set(self.protocols)
sorted_intersection = sorted(intersection, key=accepted_subprotocols.index)
return next(iter(sorted_intersection), None)
async def connect(self) -> None:
preferred_protocol = self.pick_preferred_protocol(self.scope["subprotocols"])
if preferred_protocol == GRAPHQL_TRANSPORT_WS_PROTOCOL:
self._handler = self.graphql_transport_ws_handler_class(
schema=self.schema,
debug=self.debug,
connection_init_wait_timeout=self.connection_init_wait_timeout,
get_context=self.get_context,
get_root_value=self.get_root_value,
ws=self,
)
elif preferred_protocol == GRAPHQL_WS_PROTOCOL:
self._handler = self.graphql_ws_handler_class(
schema=self.schema,
debug=self.debug,
keep_alive=self.keep_alive,
keep_alive_interval=self.keep_alive_interval,
get_context=self.get_context,
get_root_value=self.get_root_value,
ws=self,
)
else:
# Subprotocol not acceptable
return await self.close(code=4406)
await self._handler.handle()
return None
async def receive(self, *args, **kwargs) -> None:
# Overriding this so that we can pass the errors to handle_invalid_message
try:
await super().receive(*args, **kwargs)
except ValueError as e:
await self._handler.handle_invalid_message(str(e))
async def receive_json(self, content, **kwargs) -> None:
await self._handler.handle_message(content)
async def disconnect(self, code) -> None:
await self._handler.handle_disconnect(code) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/channels/handlers/ws_handler.py | ws_handler.py |
from __future__ import annotations
from contextlib import suppress
from typing import TYPE_CHECKING, Any, Optional
from strawberry.subscriptions import GRAPHQL_WS_PROTOCOL
from strawberry.subscriptions.protocols.graphql_ws.handlers import BaseGraphQLWSHandler
if TYPE_CHECKING:
from strawberry.channels.handlers.base import ChannelsWSConsumer
from strawberry.schema import BaseSchema
from strawberry.subscriptions.protocols.graphql_ws.types import OperationMessage
class GraphQLWSHandler(BaseGraphQLWSHandler):
def __init__(
self,
schema: BaseSchema,
debug: bool,
keep_alive: bool,
keep_alive_interval: float,
get_context,
get_root_value,
ws: ChannelsWSConsumer,
):
super().__init__(schema, debug, keep_alive, keep_alive_interval)
self._get_context = get_context
self._get_root_value = get_root_value
self._ws = ws
async def get_context(self) -> Any:
return await self._get_context(
request=self._ws, connection_params=self.connection_params
)
async def get_root_value(self) -> Any:
return await self._get_root_value(request=self._ws)
async def send_json(self, data: OperationMessage) -> None:
await self._ws.send_json(data)
async def close(self, code: int = 1000, reason: Optional[str] = None) -> None:
# Close messages are not part of the ASGI ref yet
await self._ws.close(code=code)
async def handle_request(self) -> Any:
await self._ws.accept(subprotocol=GRAPHQL_WS_PROTOCOL)
async def handle_disconnect(self, code) -> None:
if self.keep_alive_task:
self.keep_alive_task.cancel()
with suppress(BaseException):
await self.keep_alive_task
for operation_id in list(self.subscriptions.keys()):
await self.cleanup_operation(operation_id)
async def handle_invalid_message(self, error_message: str) -> None:
# This is not part of the BaseGraphQLWSHandler's interface, but the
# channels integration is a high level wrapper that forwards this to
# both us and the BaseGraphQLTransportWSHandler.
pass | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/channels/handlers/graphql_ws_handler.py | graphql_ws_handler.py |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from strawberry.subscriptions import GRAPHQL_TRANSPORT_WS_PROTOCOL
from strawberry.subscriptions.protocols.graphql_transport_ws.handlers import (
BaseGraphQLTransportWSHandler,
)
if TYPE_CHECKING:
from datetime import timedelta
from strawberry.channels.handlers.base import ChannelsWSConsumer
from strawberry.schema import BaseSchema
class GraphQLTransportWSHandler(BaseGraphQLTransportWSHandler):
def __init__(
self,
schema: BaseSchema,
debug: bool,
connection_init_wait_timeout: timedelta,
get_context,
get_root_value,
ws: ChannelsWSConsumer,
):
super().__init__(schema, debug, connection_init_wait_timeout)
self._get_context = get_context
self._get_root_value = get_root_value
self._ws = ws
async def get_context(self) -> Any:
return await self._get_context(
request=self._ws, connection_params=self.connection_params
)
async def get_root_value(self) -> Any:
return await self._get_root_value(request=self._ws)
async def send_json(self, data: dict) -> None:
await self._ws.send_json(data)
async def close(self, code: int = 1000, reason: Optional[str] = None) -> None:
# FIXME: We are using `self._ws.base_send` directly instead of `self._ws.close`
# because the later doesn't accept the `reason` argument.
await self._ws.base_send(
{
"type": "websocket.close",
"code": code,
"reason": reason or "",
}
)
async def handle_request(self) -> Any:
await self._ws.accept(subprotocol=GRAPHQL_TRANSPORT_WS_PROTOCOL)
async def handle_disconnect(self, code) -> None:
for operation_id in list(self.subscriptions.keys()):
await self.cleanup_operation(operation_id)
await self.reap_completed_tasks() | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/channels/handlers/graphql_transport_ws_handler.py | graphql_transport_ws_handler.py |
import asyncio
import contextlib
from collections import defaultdict
from typing import (
Any,
AsyncGenerator,
Awaitable,
Callable,
DefaultDict,
Dict,
List,
Optional,
Sequence,
)
from typing_extensions import Literal, Protocol, TypedDict
from weakref import WeakSet
from channels.consumer import AsyncConsumer
from channels.generic.websocket import AsyncJsonWebsocketConsumer
from strawberry.channels.context import StrawberryChannelsContext
class ChannelsMessage(TypedDict, total=False):
type: str
class ChannelsLayer(Protocol): # pragma: no cover
"""Channels layer spec.
Based on: https://channels.readthedocs.io/en/stable/channel_layer_spec.html
"""
# Default channels API
extensions: List[Literal["groups", "flush"]]
async def send(self, channel: str, message: dict) -> None:
...
async def receive(self, channel: str) -> dict:
...
async def new_channel(self, prefix: str = ...) -> str:
...
# If groups extension is supported
group_expiry: int
async def group_add(self, group: str, channel: str) -> None:
...
async def group_discard(self, group: str, channel: str) -> None:
...
async def group_send(self, group: str, message: dict) -> None:
...
# If flush extension is supported
async def flush(self) -> None:
...
class ChannelsConsumer(AsyncConsumer):
"""Base channels async consumer."""
channel_name: str
channel_layer: Optional[ChannelsLayer]
channel_receive: Callable[[], Awaitable[dict]]
def __init__(self, *args, **kwargs):
self.listen_queues: DefaultDict[str, WeakSet[asyncio.Queue]] = defaultdict(
WeakSet
)
super().__init__(*args, **kwargs)
@property
def headers(self) -> Dict[str, str]:
return {
header_name.decode().lower(): header_value.decode()
for header_name, header_value in self.scope["headers"]
}
async def get_root_value(self, request: Optional["ChannelsConsumer"] = None) -> Any:
return None
async def get_context(
self,
request: Optional["ChannelsConsumer"] = None,
connection_params: Optional[Dict[str, Any]] = None,
) -> StrawberryChannelsContext:
return StrawberryChannelsContext(
request=request or self, connection_params=connection_params
)
async def dispatch(self, message: ChannelsMessage) -> None:
# AsyncConsumer will try to get a function for message["type"] to handle
# for both http/websocket types and also for layers communication.
# In case the type isn't one of those, pass it to the listen queue so
# that it can be consumed by self.channel_listen
type_ = message.get("type", "")
if type_ and not type_.startswith(("http.", "websocket.")):
for queue in self.listen_queues[type_]:
queue.put_nowait(message)
return
await super().dispatch(message)
async def channel_listen(
self,
type: str,
*,
timeout: Optional[float] = None,
groups: Sequence[str] = (),
) -> AsyncGenerator[Any, None]:
"""Listen for messages sent to this consumer.
Utility to listen for channels messages for this consumer inside
a resolver (usually inside a subscription).
Parameters:
type:
The type of the message to wait for.
timeout:
An optional timeout to wait for each subsequent message
groups:
An optional sequence of groups to receive messages from.
When passing this parameter, the groups will be registered
using `self.channel_layer.group_add` at the beggining of the
execution and then discarded using `self.channel_layer.group_discard`
at the end of the execution.
"""
if self.channel_layer is None:
raise RuntimeError(
"Layers integration is required listening for channels.\n"
"Check https://channels.readthedocs.io/en/stable/topics/channel_layers.html " # noqa:E501
"for more information"
)
added_groups = []
try:
# This queue will receive incoming messages for this generator instance
queue: asyncio.Queue = asyncio.Queue()
# Create a weak reference to the queue. Once we leave the current scope, it
# will be garbage collected
self.listen_queues[type].add(queue)
for group in groups:
await self.channel_layer.group_add(group, self.channel_name)
added_groups.append(group)
while True:
awaitable = queue.get()
if timeout is not None:
awaitable = asyncio.wait_for(awaitable, timeout)
try:
yield await awaitable
except asyncio.TimeoutError:
# TODO: shall we add log here and maybe in the suppress below?
return
finally:
for group in added_groups:
with contextlib.suppress(Exception):
await self.channel_layer.group_discard(group, self.channel_name)
class ChannelsWSConsumer(ChannelsConsumer, AsyncJsonWebsocketConsumer):
"""Base channels websocket async consumer.""" | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/channels/handlers/base.py | base.py |
from __future__ import annotations
import dataclasses
import json
from typing import TYPE_CHECKING, Any, Optional
from urllib.parse import parse_qs
from channels.db import database_sync_to_async
from channels.generic.http import AsyncHttpConsumer
from strawberry.channels.context import StrawberryChannelsContext
from strawberry.exceptions import MissingQueryError
from strawberry.http import (
parse_query_params,
parse_request_data,
process_result,
)
from strawberry.schema.exceptions import InvalidOperationTypeError
from strawberry.types.graphql import OperationType
from strawberry.utils.graphiql import get_graphiql_html
from .base import ChannelsConsumer
if TYPE_CHECKING:
from strawberry.http import GraphQLHTTPResponse, GraphQLRequestData
from strawberry.schema import BaseSchema
from strawberry.types import ExecutionResult
class MethodNotAllowed(Exception):
...
class ExecutionError(Exception):
...
@dataclasses.dataclass
class Result:
response: bytes
status: int = 200
content_type: str = "application/json"
class GraphQLHTTPConsumer(ChannelsConsumer, AsyncHttpConsumer):
"""A consumer to provide a view for GraphQL over HTTP.
To use this, place it in your ProtocolTypeRouter for your channels project:
```
from strawberry.channels import GraphQLHttpRouter
from channels.routing import ProtocolTypeRouter
from django.core.asgi import get_asgi_application
application = ProtocolTypeRouter({
"http": URLRouter([
re_path("^graphql", GraphQLHTTPRouter(schema=schema)),
re_path("^", get_asgi_application()),
]),
"websocket": URLRouter([
re_path("^ws/graphql", GraphQLWebSocketRouter(schema=schema)),
]),
})
```
"""
def __init__(
self,
schema: BaseSchema,
graphiql: bool = True,
allow_queries_via_get: bool = True,
subscriptions_enabled: bool = True,
**kwargs,
):
self.schema = schema
self.graphiql = graphiql
self.allow_queries_via_get = allow_queries_via_get
self.subscriptions_enabled = subscriptions_enabled
super().__init__(**kwargs)
async def handle(self, body: bytes) -> None:
try:
if self.scope["method"] == "GET":
result = await self.get(body)
elif self.scope["method"] == "POST":
result = await self.post(body)
else:
raise MethodNotAllowed()
except MethodNotAllowed:
await self.send_response(
405,
b"Method not allowed",
headers=[(b"Allow", b"GET, POST")],
)
except InvalidOperationTypeError as e:
error_str = e.as_http_error_reason(self.scope["method"])
await self.send_response(
406,
error_str.encode(),
)
except ExecutionError as e:
await self.send_response(
500,
str(e).encode(),
)
else:
await self.send_response(
result.status,
result.response,
headers=[(b"Content-Type", result.content_type.encode())],
)
async def get(self, body: bytes) -> Result:
if self.should_render_graphiql():
return await self.render_graphiql(body)
elif self.scope.get("query_string"):
params = parse_query_params(
{
k: v[0]
for k, v in parse_qs(self.scope["query_string"].decode()).items()
}
)
try:
result = await self.execute(parse_request_data(params))
except MissingQueryError as e:
raise ExecutionError("No GraphQL query found in the request") from e
return Result(response=json.dumps(result).encode())
else:
raise MethodNotAllowed()
async def post(self, body: bytes) -> Result:
request_data = await self.parse_body(body)
try:
result = await self.execute(request_data)
except MissingQueryError as e:
raise ExecutionError("No GraphQL query found in the request") from e
return Result(response=json.dumps(result).encode())
async def parse_body(self, body: bytes) -> GraphQLRequestData:
if self.headers.get("content-type", "").startswith("multipart/form-data"):
return await self.parse_multipart_body(body)
try:
data = json.loads(body)
except json.JSONDecodeError as e:
raise ExecutionError("Unable to parse request body as JSON") from e
return parse_request_data(data)
async def parse_multipart_body(self, body: bytes) -> GraphQLRequestData:
raise ExecutionError("Unable to parse the multipart body")
async def execute(self, request_data: GraphQLRequestData) -> GraphQLHTTPResponse:
context = await self.get_context()
root_value = await self.get_root_value()
method = self.scope["method"]
allowed_operation_types = OperationType.from_http(method)
if not self.allow_queries_via_get and method == "GET":
allowed_operation_types = allowed_operation_types - {OperationType.QUERY}
result = await self.schema.execute(
query=request_data.query,
root_value=root_value,
variable_values=request_data.variables,
context_value=context,
operation_name=request_data.operation_name,
allowed_operation_types=allowed_operation_types,
)
return await self.process_result(result)
async def process_result(self, result: ExecutionResult) -> GraphQLHTTPResponse:
return process_result(result)
async def render_graphiql(self, body) -> Result:
html = get_graphiql_html(self.subscriptions_enabled)
return Result(response=html.encode(), content_type="text/html")
def should_render_graphiql(self) -> bool:
accept_list = self.headers.get("accept", "").split(",")
return self.graphiql and any(
accepted in accept_list for accepted in ["text/html", "*/*"]
)
class SyncGraphQLHTTPConsumer(GraphQLHTTPConsumer):
"""Synchronous version of the HTTPConsumer.
This is the same as `GraphQLHTTPConsumer`, but it can be used with
synchronous schemas (i.e. the schema's resolvers are espected to be
synchronous and not asynchronous).
"""
def get_root_value(self, request: Optional[ChannelsConsumer] = None) -> Any:
return None
def get_context( # type: ignore[override]
self,
request: Optional[ChannelsConsumer] = None,
) -> StrawberryChannelsContext:
return StrawberryChannelsContext(request=request or self)
def process_result( # type:ignore [override]
self, result: ExecutionResult
) -> GraphQLHTTPResponse:
return process_result(result)
# Sync channels is actually async, but it uses database_sync_to_async to call
# handlers in a threadpool. Check SyncConsumer's documentation for more info:
# https://github.com/django/channels/blob/main/channels/consumer.py#L104
@database_sync_to_async
def execute(self, request_data: GraphQLRequestData) -> GraphQLHTTPResponse:
context = self.get_context(self)
root_value = self.get_root_value(self)
method = self.scope["method"]
allowed_operation_types = OperationType.from_http(method)
if not self.allow_queries_via_get and method == "GET":
allowed_operation_types = allowed_operation_types - {OperationType.QUERY}
result = self.schema.execute_sync(
query=request_data.query,
root_value=root_value,
variable_values=request_data.variables,
context_value=context,
operation_name=request_data.operation_name,
allowed_operation_types=allowed_operation_types,
)
return self.process_result(result) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/channels/handlers/http_handler.py | http_handler.py |
from __future__ import annotations
import dataclasses
from typing import (
TYPE_CHECKING,
Any,
Callable,
Iterable,
List,
Optional,
Sequence,
Type,
TypeVar,
Union,
overload,
)
from strawberry.field import field as base_field
from strawberry.unset import UNSET
if TYPE_CHECKING:
from typing_extensions import Literal
from strawberry.field import _RESOLVER_TYPE, StrawberryField
from strawberry.permission import BasePermission
T = TypeVar("T")
@overload
def field(
*,
resolver: _RESOLVER_TYPE[T],
name: Optional[str] = None,
is_subscription: bool = False,
description: Optional[str] = None,
provides: Optional[List[str]] = None,
requires: Optional[List[str]] = None,
external: bool = False,
shareable: bool = False,
tags: Optional[Iterable[str]] = (),
override: Optional[str] = None,
inaccessible: bool = False,
init: Literal[False] = False,
permission_classes: Optional[List[Type[BasePermission]]] = None,
deprecation_reason: Optional[str] = None,
default: Any = UNSET,
default_factory: Union[Callable[..., object], object] = UNSET,
directives: Sequence[object] = (),
graphql_type: Optional[Any] = None,
) -> T:
...
@overload
def field(
*,
name: Optional[str] = None,
is_subscription: bool = False,
description: Optional[str] = None,
provides: Optional[List[str]] = None,
requires: Optional[List[str]] = None,
external: bool = False,
shareable: bool = False,
tags: Optional[Iterable[str]] = (),
override: Optional[str] = None,
inaccessible: bool = False,
init: Literal[True] = True,
permission_classes: Optional[List[Type[BasePermission]]] = None,
deprecation_reason: Optional[str] = None,
default: Any = UNSET,
default_factory: Union[Callable[..., object], object] = UNSET,
directives: Sequence[object] = (),
graphql_type: Optional[Any] = None,
) -> Any:
...
@overload
def field(
resolver: _RESOLVER_TYPE[T],
*,
name: Optional[str] = None,
is_subscription: bool = False,
description: Optional[str] = None,
provides: Optional[List[str]] = None,
requires: Optional[List[str]] = None,
external: bool = False,
shareable: bool = False,
tags: Optional[Iterable[str]] = (),
override: Optional[str] = None,
inaccessible: bool = False,
permission_classes: Optional[List[Type[BasePermission]]] = None,
deprecation_reason: Optional[str] = None,
default: Any = UNSET,
default_factory: Union[Callable[..., object], object] = UNSET,
directives: Sequence[object] = (),
graphql_type: Optional[Any] = None,
) -> StrawberryField:
...
def field(
resolver: Optional[_RESOLVER_TYPE[Any]] = None,
*,
name: Optional[str] = None,
is_subscription: bool = False,
description: Optional[str] = None,
provides: Optional[List[str]] = None,
requires: Optional[List[str]] = None,
external: bool = False,
shareable: bool = False,
tags: Optional[Iterable[str]] = (),
override: Optional[str] = None,
inaccessible: bool = False,
permission_classes: Optional[List[Type[BasePermission]]] = None,
deprecation_reason: Optional[str] = None,
default: Any = dataclasses.MISSING,
default_factory: Union[Callable[..., object], object] = dataclasses.MISSING,
directives: Sequence[object] = (),
graphql_type: Optional[Any] = None,
# This init parameter is used by PyRight to determine whether this field
# is added in the constructor or not. It is not used to change
# any behavior at the moment.
init: Literal[True, False, None] = None,
) -> Any:
from .schema_directives import (
External,
Inaccessible,
Override,
Provides,
Requires,
Shareable,
Tag,
)
directives = list(directives)
if provides:
directives.append(Provides(fields=" ".join(provides)))
if requires:
directives.append(Requires(fields=" ".join(requires)))
if external:
directives.append(External())
if shareable:
directives.append(Shareable())
if tags:
directives.extend(Tag(name=tag) for tag in tags)
if override:
directives.append(Override(override_from=override))
if inaccessible:
directives.append(Inaccessible())
return base_field( # type: ignore
resolver=resolver, # type: ignore
name=name,
is_subscription=is_subscription,
description=description,
permission_classes=permission_classes,
deprecation_reason=deprecation_reason,
default=default,
default_factory=default_factory,
init=init, # type: ignore
directives=directives,
graphql_type=graphql_type,
) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/federation/field.py | field.py |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, Iterable, Optional, Union, overload
from strawberry.enum import _process_enum
from strawberry.enum import enum_value as base_enum_value
if TYPE_CHECKING:
from strawberry.enum import EnumType, EnumValueDefinition
def enum_value(
value: Any,
deprecation_reason: Optional[str] = None,
directives: Iterable[object] = (),
inaccessible: bool = False,
tags: Iterable[str] = (),
) -> EnumValueDefinition:
from strawberry.federation.schema_directives import Inaccessible, Tag
directives = list(directives)
if inaccessible:
directives.append(Inaccessible())
if tags:
directives.extend(Tag(name=tag) for tag in tags)
return base_enum_value(value, deprecation_reason, directives)
@overload
def enum(
_cls: EnumType,
*,
name=None,
description=None,
directives: Iterable[object] = (),
inaccessible: bool = False,
tags: Optional[Iterable[str]] = (),
) -> EnumType:
...
@overload
def enum(
_cls: None = None,
*,
name=None,
description=None,
directives: Iterable[object] = (),
inaccessible: bool = False,
tags: Optional[Iterable[str]] = (),
) -> Callable[[EnumType], EnumType]:
...
def enum(
_cls: Optional[EnumType] = None,
*,
name=None,
description=None,
directives=(),
inaccessible=False,
tags=(),
) -> Union[EnumType, Callable[[EnumType], EnumType]]:
"""Registers the enum in the GraphQL type system.
If name is passed, the name of the GraphQL type will be
the value passed of name instead of the Enum class name.
"""
from strawberry.federation.schema_directives import Inaccessible, Tag
directives = list(directives)
if inaccessible:
directives.append(Inaccessible())
if tags:
directives.extend(Tag(name=tag) for tag in tags)
def wrap(cls: EnumType) -> EnumType:
return _process_enum(cls, name, description, directives=directives)
if not _cls:
return wrap
return wrap(_cls) # pragma: no cover | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/federation/enum.py | enum.py |
import sys
from typing import Any, Callable, Iterable, NewType, Optional, TypeVar, Union, overload
from strawberry.custom_scalar import _process_scalar
# in python 3.10+ NewType is a class
if sys.version_info >= (3, 10):
_T = TypeVar("_T", bound=Union[type, NewType])
else:
_T = TypeVar("_T", bound=type)
def identity(x: _T) -> _T: # pragma: no cover
return x
@overload
def scalar(
*,
name: Optional[str] = None,
description: Optional[str] = None,
specified_by_url: Optional[str] = None,
serialize: Callable = identity,
parse_value: Optional[Callable] = None,
parse_literal: Optional[Callable] = None,
directives: Iterable[object] = (),
inaccessible: bool = False,
tags: Optional[Iterable[str]] = (),
) -> Callable[[_T], _T]:
...
@overload
def scalar(
cls: _T,
*,
name: Optional[str] = None,
description: Optional[str] = None,
specified_by_url: Optional[str] = None,
serialize: Callable = identity,
parse_value: Optional[Callable] = None,
parse_literal: Optional[Callable] = None,
directives: Iterable[object] = (),
inaccessible: bool = False,
tags: Optional[Iterable[str]] = (),
) -> _T:
...
def scalar(
cls=None,
*,
name: Optional[str] = None,
description: Optional[str] = None,
specified_by_url: Optional[str] = None,
serialize: Callable = identity,
parse_value: Optional[Callable] = None,
parse_literal: Optional[Callable] = None,
directives: Iterable[object] = (),
inaccessible: bool = False,
tags: Optional[Iterable[str]] = (),
) -> Any:
"""Annotates a class or type as a GraphQL custom scalar.
Example usages:
>>> strawberry.federation.scalar(
>>> datetime.date,
>>> serialize=lambda value: value.isoformat(),
>>> parse_value=datetime.parse_date
>>> )
>>> Base64Encoded = strawberry.federation.scalar(
>>> NewType("Base64Encoded", bytes),
>>> serialize=base64.b64encode,
>>> parse_value=base64.b64decode
>>> )
>>> @strawberry.federation.scalar(
>>> serialize=lambda value: ",".join(value.items),
>>> parse_value=lambda value: CustomList(value.split(","))
>>> )
>>> class CustomList:
>>> def __init__(self, items):
>>> self.items = items
"""
from strawberry.federation.schema_directives import Inaccessible, Tag
if parse_value is None:
parse_value = cls
directives = list(directives)
if inaccessible:
directives.append(Inaccessible())
if tags:
directives.extend(Tag(name=tag) for tag in tags)
def wrap(cls):
return _process_scalar(
cls,
name=name,
description=description,
specified_by_url=specified_by_url,
serialize=serialize,
parse_value=parse_value,
parse_literal=parse_literal,
directives=directives,
)
if cls is None:
return wrap
return wrap(cls) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/federation/scalar.py | scalar.py |
from collections import defaultdict
from copy import copy
from functools import partial
from itertools import chain
from typing import (
TYPE_CHECKING,
Any,
DefaultDict,
Dict,
Iterable,
List,
Mapping,
Optional,
Set,
Type,
Union,
cast,
)
from graphql import (
GraphQLError,
GraphQLField,
GraphQLInterfaceType,
GraphQLList,
GraphQLNonNull,
GraphQLScalarType,
GraphQLUnionType,
)
from graphql.type.definition import GraphQLArgument
from strawberry.printer import print_schema
from strawberry.schema import Schema as BaseSchema
from strawberry.types.types import TypeDefinition
from strawberry.utils.cached_property import cached_property
from strawberry.utils.inspect import get_func_args
from .schema_directive import StrawberryFederationSchemaDirective
if TYPE_CHECKING:
from graphql import ExecutionContext as GraphQLExecutionContext
from graphql import GraphQLObjectType
from strawberry.custom_scalar import ScalarDefinition, ScalarWrapper
from strawberry.enum import EnumDefinition
from strawberry.extensions import SchemaExtension
from strawberry.federation.schema_directives import ComposeDirective
from strawberry.schema.config import StrawberryConfig
from strawberry.schema.types.concrete_type import TypeMap
from strawberry.schema_directive import StrawberrySchemaDirective
from strawberry.union import StrawberryUnion
class Schema(BaseSchema):
def __init__(
self,
query: Optional[Type] = None,
mutation: Optional[Type] = None,
subscription: Optional[Type] = None,
# TODO: we should update directives' type in the main schema
directives: Iterable[Type] = (),
types: Iterable[Type] = (),
extensions: Iterable[Union[Type["SchemaExtension"], "SchemaExtension"]] = (),
execution_context_class: Optional[Type["GraphQLExecutionContext"]] = None,
config: Optional["StrawberryConfig"] = None,
scalar_overrides: Optional[
Dict[object, Union[Type, "ScalarWrapper", "ScalarDefinition"]]
] = None,
schema_directives: Iterable[object] = (),
enable_federation_2: bool = False,
):
query = self._get_federation_query_type(query)
super().__init__(
query=query,
mutation=mutation,
subscription=subscription,
directives=directives, # type: ignore
types=types,
extensions=extensions,
execution_context_class=execution_context_class,
config=config,
scalar_overrides=scalar_overrides,
schema_directives=schema_directives,
)
self.schema_directives = list(schema_directives)
self._add_scalars()
self._add_entities_to_query()
if enable_federation_2:
composed_directives = self._add_compose_directives()
self._add_link_directives(composed_directives) # type: ignore
else:
self._remove_resolvable_field()
def _get_federation_query_type(self, query: Optional[Type]) -> Type:
"""Returns a new query type that includes the _service field.
If the query type is provided, it will be used as the base for the new
query type. Otherwise, a new query type will be created.
Federation needs the following two fields to be present in the query type:
- _service: This field is used by the gateway to query for the capabilities
of the federated service.
- _entities: This field is used by the gateway to query for the entities
that are part of the federated service.
The _service field is added by default, but the _entities field is only
added if the schema contains an entity type.
"""
# note we don't add the _entities field here, as we need to know if the
# schema contains an entity type first and we do that by leveraging
# the schema converter type map, so we don't have to do that twice
# TODO: ideally we should be able to do this without using the schema
# converter, but for now this is the easiest way to do it
# see `_add_entities_to_query`
import strawberry
from strawberry.tools.create_type import create_type
from strawberry.tools.merge_types import merge_types
@strawberry.type(name="_Service")
class Service:
sdl: str = strawberry.field(
resolver=lambda: print_schema(self),
)
@strawberry.field(name="_service")
def service() -> Service:
return Service()
fields = [service]
FederationQuery = create_type(name="Query", fields=fields)
if query is None:
return FederationQuery
query_type = merge_types(
"Query",
(
FederationQuery,
query,
),
)
# TODO: this should be probably done in merge_types
if query._type_definition.extend:
query_type._type_definition.extend = True # type: ignore
return query_type
def _add_entities_to_query(self):
entity_type = _get_entity_type(self.schema_converter.type_map)
if not entity_type:
return
self._schema.type_map[entity_type.name] = entity_type
fields = {"_entities": self._get_entities_field(entity_type)}
# Copy the query type, update it to use the modified fields
query_type = cast("GraphQLObjectType", self._schema.query_type)
fields.update(query_type.fields)
query_type = copy(query_type)
query_type._fields = fields
self._schema.query_type = query_type
self._schema.type_map[query_type.name] = query_type
def entities_resolver(self, root, info, representations) -> List[object]:
results = []
for representation in representations:
type_name = representation.pop("__typename")
type_ = self.schema_converter.type_map[type_name]
definition = cast(TypeDefinition, type_.definition)
if hasattr(definition.origin, "resolve_reference"):
resolve_reference = definition.origin.resolve_reference
func_args = get_func_args(resolve_reference)
kwargs = representation
# TODO: use the same logic we use for other resolvers
if "info" in func_args:
kwargs["info"] = info
get_result = partial(resolve_reference, **kwargs)
else:
from strawberry.arguments import convert_argument
strawberry_schema = info.schema.extensions["strawberry-definition"]
config = strawberry_schema.config
scalar_registry = strawberry_schema.schema_converter.scalar_registry
get_result = partial(
convert_argument,
representation,
type_=definition.origin,
scalar_registry=scalar_registry,
config=config,
)
try:
result = get_result()
except Exception as e:
result = GraphQLError(
f"Unable to resolve reference for {definition.origin}",
original_error=e,
)
results.append(result)
return results
def _add_scalars(self):
self.Any = GraphQLScalarType("_Any")
self._schema.type_map["_Any"] = self.Any
def _remove_resolvable_field(self) -> None:
# this might be removed when we remove support for federation 1
# or when we improve how we print the directives
from ..unset import UNSET
from .schema_directives import Key
for directive in self.schema_directives_in_use:
if isinstance(directive, Key):
directive.resolvable = UNSET
@cached_property
def schema_directives_in_use(self) -> List[object]:
all_graphql_types = self._schema.type_map.values()
directives: List[object] = []
for type_ in all_graphql_types:
strawberry_definition = type_.extensions.get("strawberry-definition")
if not strawberry_definition:
continue
directives.extend(strawberry_definition.directives)
fields = getattr(strawberry_definition, "fields", [])
values = getattr(strawberry_definition, "values", [])
for field in chain(fields, values):
directives.extend(field.directives)
return directives
def _add_link_for_composed_directive(
self,
directive: "StrawberrySchemaDirective",
directive_by_url: Mapping[str, Set[str]],
) -> None:
if not isinstance(directive, StrawberryFederationSchemaDirective):
return
if not directive.compose_options:
return
import_url = directive.compose_options.import_url
name = self.config.name_converter.from_directive(directive)
# import url is required by Apollo Federation, this might change in
# future to be optional, so for now, when it is not passed we
# define a mock one. The URL isn't used for validation anyway.
if import_url is None:
import_url = f"https://directives.strawberry.rocks/{name}/v0.1"
directive_by_url[import_url].add(f"@{name}")
def _add_link_directives(
self, additional_directives: Optional[List[object]] = None
):
from .schema_directives import FederationDirective, Link
directive_by_url: DefaultDict[str, Set[str]] = defaultdict(set)
additional_directives = additional_directives or []
for directive in self.schema_directives_in_use + additional_directives:
definition = directive.__strawberry_directive__ # type: ignore
self._add_link_for_composed_directive(definition, directive_by_url)
if isinstance(directive, FederationDirective):
directive_by_url[directive.imported_from.url].add(
f"@{directive.imported_from.name}"
)
link_directives: List[object] = [
Link(
url=url,
import_=list(sorted(directives)),
)
for url, directives in directive_by_url.items()
]
self.schema_directives = self.schema_directives + link_directives
def _add_compose_directives(self) -> List["ComposeDirective"]:
from .schema_directives import ComposeDirective
compose_directives: List[ComposeDirective] = []
for directive in self.schema_directives_in_use:
definition = directive.__strawberry_directive__ # type: ignore
is_federation_schema_directive = isinstance(
definition, StrawberryFederationSchemaDirective
)
if is_federation_schema_directive and definition.compose_options:
name = self.config.name_converter.from_directive(definition)
compose_directives.append(
ComposeDirective(
name=f"@{name}",
)
)
self.schema_directives = self.schema_directives + compose_directives
return compose_directives
def _get_entities_field(self, entity_type: GraphQLUnionType) -> GraphQLField:
return GraphQLField(
GraphQLNonNull(GraphQLList(entity_type)),
args={
"representations": GraphQLArgument(
GraphQLNonNull(GraphQLList(GraphQLNonNull(self.Any)))
)
},
resolve=self.entities_resolver,
)
def _warn_for_federation_directives(self) -> None:
# this is used in the main schema to raise if there's a directive
# that's for federation, but in this class we don't want to warn,
# since it is expected to have federation directives
pass
def _get_entity_type(type_map: "TypeMap"):
# https://www.apollographql.com/docs/apollo-server/federation/federation-spec/#resolve-requests-for-entities
# To implement the _Entity union, each type annotated with @key
# should be added to the _Entity union.
federation_key_types = [
type.implementation
for type in type_map.values()
if _has_federation_keys(type.definition)
# TODO: check this
and not isinstance(type.implementation, GraphQLInterfaceType)
]
# If no types are annotated with the key directive, then the _Entity
# union and Query._entities field should be removed from the schema.
if not federation_key_types:
return None
entity_type = GraphQLUnionType("_Entity", federation_key_types) # type: ignore
def _resolve_type(self, value, _type):
return self._type_definition.name
entity_type.resolve_type = _resolve_type
return entity_type
def _is_key(directive: Any) -> bool:
from .schema_directives import Key
return isinstance(directive, Key)
def _has_federation_keys(
definition: Union[
TypeDefinition, "ScalarDefinition", "EnumDefinition", "StrawberryUnion"
]
) -> bool:
if isinstance(definition, TypeDefinition):
return any(_is_key(directive) for directive in definition.directives or [])
return False | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/federation/schema.py | schema.py |
from dataclasses import dataclass
from typing import ClassVar, List, Optional
from strawberry import directive_field
from strawberry.schema_directive import Location, schema_directive
from strawberry.unset import UNSET
from .types import FieldSet, LinkImport, LinkPurpose
@dataclass
class ImportedFrom:
name: str
url: str = "https://specs.apollo.dev/federation/v2.3"
class FederationDirective:
imported_from: ClassVar[ImportedFrom]
@schema_directive(
locations=[Location.FIELD_DEFINITION], name="external", print_definition=False
)
class External(FederationDirective):
imported_from: ClassVar[ImportedFrom] = ImportedFrom(
name="external", url="https://specs.apollo.dev/federation/v2.3"
)
@schema_directive(
locations=[Location.FIELD_DEFINITION], name="requires", print_definition=False
)
class Requires(FederationDirective):
fields: FieldSet
imported_from: ClassVar[ImportedFrom] = ImportedFrom(
name="requires", url="https://specs.apollo.dev/federation/v2.3"
)
@schema_directive(
locations=[Location.FIELD_DEFINITION], name="provides", print_definition=False
)
class Provides(FederationDirective):
fields: FieldSet
imported_from: ClassVar[ImportedFrom] = ImportedFrom(
name="provides", url="https://specs.apollo.dev/federation/v2.3"
)
@schema_directive(
locations=[Location.OBJECT, Location.INTERFACE],
name="key",
repeatable=True,
print_definition=False,
)
class Key(FederationDirective):
fields: FieldSet
resolvable: Optional[bool] = True
imported_from: ClassVar[ImportedFrom] = ImportedFrom(
name="key", url="https://specs.apollo.dev/federation/v2.3"
)
@schema_directive(
locations=[Location.FIELD_DEFINITION, Location.OBJECT],
name="shareable",
repeatable=True,
print_definition=False,
)
class Shareable(FederationDirective):
imported_from: ClassVar[ImportedFrom] = ImportedFrom(
name="shareable", url="https://specs.apollo.dev/federation/v2.3"
)
@schema_directive(
locations=[Location.SCHEMA], name="link", repeatable=True, print_definition=False
)
class Link:
url: Optional[str]
as_: Optional[str] = directive_field(name="as")
for_: Optional[LinkPurpose] = directive_field(name="for")
import_: Optional[List[Optional[LinkImport]]] = directive_field(name="import")
def __init__(
self,
url: Optional[str] = UNSET,
as_: Optional[str] = UNSET,
for_: Optional[LinkPurpose] = UNSET,
import_: Optional[List[Optional[LinkImport]]] = UNSET,
):
self.url = url
self.as_ = as_
self.for_ = for_
self.import_ = import_
@schema_directive(
locations=[
Location.FIELD_DEFINITION,
Location.INTERFACE,
Location.OBJECT,
Location.UNION,
Location.ARGUMENT_DEFINITION,
Location.SCALAR,
Location.ENUM,
Location.ENUM_VALUE,
Location.INPUT_OBJECT,
Location.INPUT_FIELD_DEFINITION,
],
name="tag",
repeatable=True,
print_definition=False,
)
class Tag(FederationDirective):
name: str
imported_from: ClassVar[ImportedFrom] = ImportedFrom(
name="tag", url="https://specs.apollo.dev/federation/v2.3"
)
@schema_directive(
locations=[Location.FIELD_DEFINITION], name="override", print_definition=False
)
class Override(FederationDirective):
override_from: str = directive_field(name="from")
imported_from: ClassVar[ImportedFrom] = ImportedFrom(
name="override", url="https://specs.apollo.dev/federation/v2.3"
)
@schema_directive(
locations=[
Location.FIELD_DEFINITION,
Location.OBJECT,
Location.INTERFACE,
Location.UNION,
Location.ARGUMENT_DEFINITION,
Location.SCALAR,
Location.ENUM,
Location.ENUM_VALUE,
Location.INPUT_OBJECT,
Location.INPUT_FIELD_DEFINITION,
],
name="inaccessible",
print_definition=False,
)
class Inaccessible(FederationDirective):
imported_from: ClassVar[ImportedFrom] = ImportedFrom(
name="inaccessible", url="https://specs.apollo.dev/federation/v2.3"
)
@schema_directive(
locations=[Location.SCHEMA], name="composeDirective", print_definition=False
)
class ComposeDirective(FederationDirective):
name: str
imported_from: ClassVar[ImportedFrom] = ImportedFrom(
name="composeDirective", url="https://specs.apollo.dev/federation/v2.3"
)
@schema_directive(
locations=[Location.OBJECT], name="interfaceObject", print_definition=False
)
class InterfaceObject(FederationDirective):
imported_from: ClassVar[ImportedFrom] = ImportedFrom(
name="interfaceObject", url="https://specs.apollo.dev/federation/v2.3"
) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/federation/schema_directives.py | schema_directives.py |
from typing import (
TYPE_CHECKING,
Callable,
Iterable,
Optional,
Sequence,
Type,
TypeVar,
Union,
overload,
)
from strawberry.field import StrawberryField
from strawberry.field import field as base_field
from strawberry.object_type import type as base_type
from strawberry.unset import UNSET
from strawberry.utils.typing import __dataclass_transform__
from .field import field
if TYPE_CHECKING:
from .schema_directives import Key
T = TypeVar("T", bound=Type)
def _impl_type(
cls: Optional[T],
*,
name: Optional[str] = None,
description: Optional[str] = None,
directives: Iterable[object] = (),
keys: Iterable[Union["Key", str]] = (),
extend: bool = False,
shareable: bool = False,
inaccessible: bool = UNSET,
tags: Iterable[str] = (),
is_input: bool = False,
is_interface: bool = False,
is_interface_object: bool = False,
) -> T:
from strawberry.federation.schema_directives import (
Inaccessible,
InterfaceObject,
Key,
Shareable,
Tag,
)
directives = list(directives)
directives.extend(
Key(fields=key, resolvable=UNSET) if isinstance(key, str) else key
for key in keys
)
if shareable:
directives.append(Shareable())
if inaccessible is not UNSET:
directives.append(Inaccessible())
if tags:
directives.extend(Tag(name=tag) for tag in tags)
if is_interface_object:
directives.append(InterfaceObject())
return base_type( # type: ignore
cls,
name=name,
description=description,
directives=directives,
extend=extend,
is_input=is_input,
is_interface=is_interface,
)
@overload
@__dataclass_transform__(
order_default=True,
kw_only_default=True,
field_descriptors=(base_field, field, StrawberryField),
)
def type(
cls: T,
*,
name: Optional[str] = None,
description: Optional[str] = None,
keys: Iterable[Union["Key", str]] = (),
inaccessible: bool = UNSET,
tags: Iterable[str] = (),
extend: bool = False,
) -> T:
...
@overload
@__dataclass_transform__(
order_default=True,
kw_only_default=True,
field_descriptors=(base_field, field, StrawberryField),
)
def type(
*,
name: Optional[str] = None,
description: Optional[str] = None,
keys: Iterable[Union["Key", str]] = (),
inaccessible: bool = UNSET,
tags: Iterable[str] = (),
extend: bool = False,
shareable: bool = False,
directives: Iterable[object] = (),
) -> Callable[[T], T]:
...
def type(
cls: Optional[T] = None,
*,
name: Optional[str] = None,
description: Optional[str] = None,
keys: Iterable[Union["Key", str]] = (),
inaccessible: bool = UNSET,
tags: Iterable[str] = (),
extend: bool = False,
shareable: bool = False,
directives: Iterable[object] = (),
):
return _impl_type(
cls,
name=name,
description=description,
directives=directives,
keys=keys,
extend=extend,
shareable=shareable,
inaccessible=inaccessible,
tags=tags,
)
@overload
@__dataclass_transform__(
order_default=True,
kw_only_default=True,
field_descriptors=(base_field, field, StrawberryField),
)
def input(
cls: T,
*,
name: Optional[str] = None,
description: Optional[str] = None,
directives: Sequence[object] = (),
inaccessible: bool = UNSET,
tags: Iterable[str] = (),
) -> T:
...
@overload
@__dataclass_transform__(
order_default=True,
kw_only_default=True,
field_descriptors=(base_field, field, StrawberryField),
)
def input(
*,
name: Optional[str] = None,
description: Optional[str] = None,
directives: Sequence[object] = (),
inaccessible: bool = UNSET,
tags: Iterable[str] = (),
) -> Callable[[T], T]:
...
def input(
cls: Optional[T] = None,
*,
name: Optional[str] = None,
description: Optional[str] = None,
directives: Sequence[object] = (),
inaccessible: bool = UNSET,
tags: Iterable[str] = (),
):
return _impl_type(
cls,
name=name,
description=description,
directives=directives,
inaccessible=inaccessible,
is_input=True,
tags=tags,
)
@overload
@__dataclass_transform__(
order_default=True,
kw_only_default=True,
field_descriptors=(base_field, field, StrawberryField),
)
def interface(
cls: T,
*,
name: Optional[str] = None,
description: Optional[str] = None,
keys: Iterable[Union["Key", str]] = (),
inaccessible: bool = UNSET,
tags: Iterable[str] = (),
directives: Iterable[object] = (),
) -> T:
...
@overload
@__dataclass_transform__(
order_default=True,
kw_only_default=True,
field_descriptors=(base_field, field, StrawberryField),
)
def interface(
*,
name: Optional[str] = None,
description: Optional[str] = None,
keys: Iterable[Union["Key", str]] = (),
inaccessible: bool = UNSET,
tags: Iterable[str] = (),
directives: Iterable[object] = (),
) -> Callable[[T], T]:
...
def interface(
cls: Optional[T] = None,
*,
name: Optional[str] = None,
description: Optional[str] = None,
keys: Iterable[Union["Key", str]] = (),
inaccessible: bool = UNSET,
tags: Iterable[str] = (),
directives: Iterable[object] = (),
):
return _impl_type(
cls,
name=name,
description=description,
directives=directives,
keys=keys,
inaccessible=inaccessible,
is_interface=True,
tags=tags,
)
@overload
@__dataclass_transform__(
order_default=True,
kw_only_default=True,
field_descriptors=(base_field, field, StrawberryField),
)
def interface_object(
cls: T,
*,
keys: Iterable[Union["Key", str]],
name: Optional[str] = None,
description: Optional[str] = None,
inaccessible: bool = UNSET,
tags: Iterable[str] = (),
directives: Iterable[object] = (),
) -> T:
...
@overload
@__dataclass_transform__(
order_default=True,
kw_only_default=True,
field_descriptors=(base_field, field, StrawberryField),
)
def interface_object(
*,
keys: Iterable[Union["Key", str]],
name: Optional[str] = None,
description: Optional[str] = None,
inaccessible: bool = UNSET,
tags: Iterable[str] = (),
directives: Iterable[object] = (),
) -> Callable[[T], T]:
...
def interface_object(
cls: Optional[T] = None,
*,
keys: Iterable[Union["Key", str]],
name: Optional[str] = None,
description: Optional[str] = None,
inaccessible: bool = UNSET,
tags: Iterable[str] = (),
directives: Iterable[object] = (),
):
return _impl_type(
cls,
name=name,
description=description,
directives=directives,
keys=keys,
inaccessible=inaccessible,
is_interface=False,
is_interface_object=True,
tags=tags,
) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/federation/object_type.py | object_type.py |
from __future__ import annotations
import asyncio
from abc import ABC, abstractmethod
from contextlib import suppress
from typing import TYPE_CHECKING, Any, AsyncGenerator, Dict, Optional, cast
from graphql import ExecutionResult as GraphQLExecutionResult
from graphql import GraphQLError
from graphql.error.graphql_error import format_error as format_graphql_error
from strawberry.subscriptions.protocols.graphql_ws import (
GQL_COMPLETE,
GQL_CONNECTION_ACK,
GQL_CONNECTION_ERROR,
GQL_CONNECTION_INIT,
GQL_CONNECTION_KEEP_ALIVE,
GQL_CONNECTION_TERMINATE,
GQL_DATA,
GQL_ERROR,
GQL_START,
GQL_STOP,
)
from strawberry.utils.debug import pretty_print_graphql_operation
if TYPE_CHECKING:
from strawberry.schema import BaseSchema
from strawberry.subscriptions.protocols.graphql_ws.types import (
ConnectionInitPayload,
OperationMessage,
OperationMessagePayload,
StartPayload,
)
class BaseGraphQLWSHandler(ABC):
def __init__(
self,
schema: BaseSchema,
debug: bool,
keep_alive: bool,
keep_alive_interval: float,
):
self.schema = schema
self.debug = debug
self.keep_alive = keep_alive
self.keep_alive_interval = keep_alive_interval
self.keep_alive_task: Optional[asyncio.Task] = None
self.subscriptions: Dict[str, AsyncGenerator] = {}
self.tasks: Dict[str, asyncio.Task] = {}
self.connection_params: Optional[ConnectionInitPayload] = None
@abstractmethod
async def get_context(self) -> Any:
"""Return the operations context"""
@abstractmethod
async def get_root_value(self) -> Any:
"""Return the schemas root value"""
@abstractmethod
async def send_json(self, data: OperationMessage) -> None:
"""Send the data JSON encoded to the WebSocket client"""
@abstractmethod
async def close(self, code: int = 1000, reason: Optional[str] = None) -> None:
"""Close the WebSocket with the passed code and reason"""
@abstractmethod
async def handle_request(self) -> Any:
"""Handle the request this instance was created for"""
async def handle(self) -> Any:
return await self.handle_request()
async def handle_message(
self,
message: OperationMessage,
) -> None:
message_type = message["type"]
if message_type == GQL_CONNECTION_INIT:
await self.handle_connection_init(message)
elif message_type == GQL_CONNECTION_TERMINATE:
await self.handle_connection_terminate(message)
elif message_type == GQL_START:
await self.handle_start(message)
elif message_type == GQL_STOP:
await self.handle_stop(message)
async def handle_connection_init(self, message: OperationMessage) -> None:
payload = message.get("payload")
if payload is not None and not isinstance(payload, dict):
error_message: OperationMessage = {"type": GQL_CONNECTION_ERROR}
await self.send_json(error_message)
await self.close()
return
payload = cast(Optional["ConnectionInitPayload"], payload)
self.connection_params = payload
acknowledge_message: OperationMessage = {"type": GQL_CONNECTION_ACK}
await self.send_json(acknowledge_message)
if self.keep_alive:
keep_alive_handler = self.handle_keep_alive()
self.keep_alive_task = asyncio.create_task(keep_alive_handler)
async def handle_connection_terminate(self, message: OperationMessage) -> None:
await self.close()
async def handle_start(self, message: OperationMessage) -> None:
operation_id = message["id"]
payload = cast("StartPayload", message["payload"])
query = payload["query"]
operation_name = payload.get("operationName")
variables = payload.get("variables")
context = await self.get_context()
if isinstance(context, dict):
context["connection_params"] = self.connection_params
root_value = await self.get_root_value()
if self.debug:
pretty_print_graphql_operation(operation_name, query, variables)
try:
result_source = await self.schema.subscribe(
query=query,
variable_values=variables,
operation_name=operation_name,
context_value=context,
root_value=root_value,
)
except GraphQLError as error:
error_payload = format_graphql_error(error)
await self.send_message(GQL_ERROR, operation_id, error_payload)
self.schema.process_errors([error])
return
if isinstance(result_source, GraphQLExecutionResult):
assert result_source.errors
error_payload = format_graphql_error(result_source.errors[0])
await self.send_message(GQL_ERROR, operation_id, error_payload)
self.schema.process_errors(result_source.errors)
return
self.subscriptions[operation_id] = result_source
result_handler = self.handle_async_results(result_source, operation_id)
self.tasks[operation_id] = asyncio.create_task(result_handler)
async def handle_stop(self, message: OperationMessage) -> None:
operation_id = message["id"]
await self.cleanup_operation(operation_id)
async def handle_keep_alive(self) -> None:
while True:
data: OperationMessage = {"type": GQL_CONNECTION_KEEP_ALIVE}
await self.send_json(data)
await asyncio.sleep(self.keep_alive_interval)
async def handle_async_results(
self,
result_source: AsyncGenerator,
operation_id: str,
) -> None:
try:
async for result in result_source:
payload = {"data": result.data}
if result.errors:
payload["errors"] = [
format_graphql_error(err) for err in result.errors
]
await self.send_message(GQL_DATA, operation_id, payload)
# log errors after send_message to prevent potential
# slowdown of sending result
if result.errors:
self.schema.process_errors(result.errors)
except asyncio.CancelledError:
# CancelledErrors are expected during task cleanup.
pass
except Exception as error:
# GraphQLErrors are handled by graphql-core and included in the
# ExecutionResult
error = GraphQLError(str(error), original_error=error)
await self.send_message(
GQL_DATA,
operation_id,
{"data": None, "errors": [format_graphql_error(error)]},
)
self.schema.process_errors([error])
await self.send_message(GQL_COMPLETE, operation_id, None)
async def cleanup_operation(self, operation_id: str) -> None:
await self.subscriptions[operation_id].aclose()
del self.subscriptions[operation_id]
self.tasks[operation_id].cancel()
with suppress(BaseException):
await self.tasks[operation_id]
del self.tasks[operation_id]
async def send_message(
self,
type_: str,
operation_id: str,
payload: Optional[OperationMessagePayload] = None,
) -> None:
data: OperationMessage = {"type": type_, "id": operation_id}
if payload is not None:
data["payload"] = payload
await self.send_json(data) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/subscriptions/protocols/graphql_ws/handlers.py | handlers.py |
from __future__ import annotations
from dataclasses import asdict, dataclass
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from strawberry.unset import UNSET
if TYPE_CHECKING:
from graphql import GraphQLFormattedError
@dataclass
class GraphQLTransportMessage:
def as_dict(self) -> dict:
data = asdict(self)
if getattr(self, "payload", None) is UNSET:
# Unset fields must have a JSON value of "undefined" not "null"
data.pop("payload")
return data
@dataclass
class ConnectionInitMessage(GraphQLTransportMessage):
"""
Direction: Client -> Server
"""
payload: Optional[Dict[str, Any]] = UNSET
type: str = "connection_init"
@dataclass
class ConnectionAckMessage(GraphQLTransportMessage):
"""
Direction: Server -> Client
"""
payload: Optional[Dict[str, Any]] = UNSET
type: str = "connection_ack"
@dataclass
class PingMessage(GraphQLTransportMessage):
"""
Direction: bidirectional
"""
payload: Optional[Dict[str, Any]] = UNSET
type: str = "ping"
@dataclass
class PongMessage(GraphQLTransportMessage):
"""
Direction: bidirectional
"""
payload: Optional[Dict[str, Any]] = UNSET
type: str = "pong"
@dataclass
class SubscribeMessagePayload:
query: str
operationName: Optional[str] = None
variables: Optional[Dict[str, Any]] = None
extensions: Optional[Dict[str, Any]] = None
@dataclass
class SubscribeMessage(GraphQLTransportMessage):
"""
Direction: Client -> Server
"""
id: str
payload: SubscribeMessagePayload
type: str = "subscribe"
@dataclass
class NextMessage(GraphQLTransportMessage):
"""
Direction: Server -> Client
"""
id: str
payload: Dict[str, Any] # TODO: shape like ExecutionResult
type: str = "next"
def as_dict(self) -> dict:
return {"id": self.id, "payload": self.payload, "type": self.type}
@dataclass
class ErrorMessage(GraphQLTransportMessage):
"""
Direction: Server -> Client
"""
id: str
payload: List[GraphQLFormattedError]
type: str = "error"
@dataclass
class CompleteMessage(GraphQLTransportMessage):
"""
Direction: bidirectional
"""
id: str
type: str = "complete" | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/subscriptions/protocols/graphql_transport_ws/types.py | types.py |
from __future__ import annotations
import asyncio
from abc import ABC, abstractmethod
from contextlib import suppress
from typing import TYPE_CHECKING, Any, AsyncGenerator, Callable, Dict, List, Optional
from graphql import ExecutionResult as GraphQLExecutionResult
from graphql import GraphQLError, GraphQLSyntaxError, parse
from graphql.error.graphql_error import format_error as format_graphql_error
from strawberry.subscriptions.protocols.graphql_transport_ws.types import (
CompleteMessage,
ConnectionAckMessage,
ConnectionInitMessage,
ErrorMessage,
NextMessage,
PingMessage,
PongMessage,
SubscribeMessage,
SubscribeMessagePayload,
)
from strawberry.types.graphql import OperationType
from strawberry.unset import UNSET
from strawberry.utils.debug import pretty_print_graphql_operation
from strawberry.utils.operation import get_operation_type
if TYPE_CHECKING:
from datetime import timedelta
from strawberry.schema import BaseSchema
from strawberry.subscriptions.protocols.graphql_transport_ws.types import (
GraphQLTransportMessage,
)
class BaseGraphQLTransportWSHandler(ABC):
def __init__(
self,
schema: BaseSchema,
debug: bool,
connection_init_wait_timeout: timedelta,
):
self.schema = schema
self.debug = debug
self.connection_init_wait_timeout = connection_init_wait_timeout
self.connection_init_timeout_task: Optional[asyncio.Task] = None
self.connection_init_received = False
self.connection_acknowledged = False
self.subscriptions: Dict[str, AsyncGenerator] = {}
self.tasks: Dict[str, asyncio.Task] = {}
self.completed_tasks: List[asyncio.Task] = []
self.connection_params: Optional[Dict[str, Any]] = None
@abstractmethod
async def get_context(self) -> Any:
"""Return the operations context"""
@abstractmethod
async def get_root_value(self) -> Any:
"""Return the schemas root value"""
@abstractmethod
async def send_json(self, data: dict) -> None:
"""Send the data JSON encoded to the WebSocket client"""
@abstractmethod
async def close(self, code: int, reason: str) -> None:
"""Close the WebSocket with the passed code and reason"""
@abstractmethod
async def handle_request(self) -> Any:
"""Handle the request this instance was created for"""
async def handle(self) -> Any:
timeout_handler = self.handle_connection_init_timeout()
self.connection_init_timeout_task = asyncio.create_task(timeout_handler)
return await self.handle_request()
async def handle_connection_init_timeout(self) -> None:
delay = self.connection_init_wait_timeout.total_seconds()
await asyncio.sleep(delay=delay)
if self.connection_init_received:
return
reason = "Connection initialisation timeout"
await self.close(code=4408, reason=reason)
async def handle_message(self, message: dict) -> None:
handler: Callable
handler_arg: Any
try:
message_type = message.pop("type")
if message_type == ConnectionInitMessage.type:
handler = self.handle_connection_init
handler_arg = ConnectionInitMessage(**message)
elif message_type == PingMessage.type:
handler = self.handle_ping
handler_arg = PingMessage(**message)
elif message_type == PongMessage.type:
handler = self.handle_pong
handler_arg = PongMessage(**message)
elif message_type == SubscribeMessage.type:
handler = self.handle_subscribe
payload = SubscribeMessagePayload(**message.pop("payload"))
handler_arg = SubscribeMessage(payload=payload, **message)
elif message_type == CompleteMessage.type:
handler = self.handle_complete
handler_arg = CompleteMessage(**message)
else:
handler = self.handle_invalid_message
handler_arg = f"Unknown message type: {message_type}"
except (KeyError, TypeError):
handler = self.handle_invalid_message
handler_arg = "Failed to parse message"
await handler(handler_arg)
await self.reap_completed_tasks()
async def handle_connection_init(self, message: ConnectionInitMessage) -> None:
if message.payload is not UNSET and not isinstance(message.payload, dict):
await self.close(code=4400, reason="Invalid connection init payload")
return
self.connection_params = message.payload
if self.connection_init_received:
reason = "Too many initialisation requests"
await self.close(code=4429, reason=reason)
return
self.connection_init_received = True
await self.send_message(ConnectionAckMessage())
self.connection_acknowledged = True
async def handle_ping(self, message: PingMessage) -> None:
await self.send_message(PongMessage())
async def handle_pong(self, message: PongMessage) -> None:
pass
async def handle_subscribe(self, message: SubscribeMessage) -> None:
if not self.connection_acknowledged:
await self.close(code=4401, reason="Unauthorized")
return
try:
graphql_document = parse(message.payload.query)
except GraphQLSyntaxError as exc:
await self.close(code=4400, reason=exc.message)
return
try:
operation_type = get_operation_type(
graphql_document, message.payload.operationName
)
except RuntimeError:
await self.close(code=4400, reason="Can't get GraphQL operation type")
return
if message.id in self.subscriptions:
reason = f"Subscriber for {message.id} already exists"
await self.close(code=4409, reason=reason)
return
if self.debug: # pragma: no cover
pretty_print_graphql_operation(
message.payload.operationName,
message.payload.query,
message.payload.variables,
)
context = await self.get_context()
if isinstance(context, dict):
context["connection_params"] = self.connection_params
root_value = await self.get_root_value()
# Get an AsyncGenerator yielding the results
if operation_type == OperationType.SUBSCRIPTION:
result_source = await self.schema.subscribe(
query=message.payload.query,
variable_values=message.payload.variables,
operation_name=message.payload.operationName,
context_value=context,
root_value=root_value,
)
else:
# create AsyncGenerator returning a single result
async def get_result_source():
yield await self.schema.execute(
query=message.payload.query,
variable_values=message.payload.variables,
context_value=context,
root_value=root_value,
operation_name=message.payload.operationName,
)
result_source = get_result_source()
operation = Operation(self, message.id)
# Handle initial validation errors
if isinstance(result_source, GraphQLExecutionResult):
assert result_source.errors
payload = [format_graphql_error(result_source.errors[0])]
await self.send_message(ErrorMessage(id=message.id, payload=payload))
self.schema.process_errors(result_source.errors)
return
# Create task to handle this subscription, reserve the operation ID
self.subscriptions[message.id] = result_source
self.tasks[message.id] = asyncio.create_task(
self.operation_task(result_source, operation)
)
async def operation_task(
self, result_source: AsyncGenerator, operation: Operation
) -> None:
"""
Operation task top level method. Cleans up and de-registers the operation
once it is done.
"""
try:
await self.handle_async_results(result_source, operation)
except BaseException: # pragma: no cover
# cleanup in case of something really unexpected
# wait for generator to be closed to ensure that any existing
# 'finally' statement is called
result_source = self.subscriptions[operation.id]
with suppress(RuntimeError):
await result_source.aclose()
del self.subscriptions[operation.id]
del self.tasks[operation.id]
raise
else:
await operation.send_message(CompleteMessage(id=operation.id))
finally:
# add this task to a list to be reaped later
task = asyncio.current_task()
assert task is not None
self.completed_tasks.append(task)
async def handle_async_results(
self,
result_source: AsyncGenerator,
operation: Operation,
) -> None:
try:
async for result in result_source:
if result.errors:
error_payload = [format_graphql_error(err) for err in result.errors]
error_message = ErrorMessage(id=operation.id, payload=error_payload)
await operation.send_message(error_message)
self.schema.process_errors(result.errors)
return
else:
next_payload = {"data": result.data}
next_message = NextMessage(id=operation.id, payload=next_payload)
await operation.send_message(next_message)
except asyncio.CancelledError:
# CancelledErrors are expected during task cleanup.
return
except Exception as error:
# GraphQLErrors are handled by graphql-core and included in the
# ExecutionResult
error = GraphQLError(str(error), original_error=error)
error_payload = [format_graphql_error(error)]
error_message = ErrorMessage(id=operation.id, payload=error_payload)
await operation.send_message(error_message)
self.schema.process_errors([error])
return
def forget_id(self, id: str) -> None:
# de-register the operation id making it immediately available
# for re-use
del self.subscriptions[id]
del self.tasks[id]
async def handle_complete(self, message: CompleteMessage) -> None:
await self.cleanup_operation(operation_id=message.id)
async def handle_invalid_message(self, error_message: str) -> None:
await self.close(code=4400, reason=error_message)
async def send_message(self, message: GraphQLTransportMessage) -> None:
data = message.as_dict()
await self.send_json(data)
async def cleanup_operation(self, operation_id: str) -> None:
if operation_id not in self.subscriptions:
return
result_source = self.subscriptions.pop(operation_id)
task = self.tasks.pop(operation_id)
task.cancel()
with suppress(BaseException):
await task
# since python 3.8, generators cannot be reliably closed
with suppress(RuntimeError):
await result_source.aclose()
async def reap_completed_tasks(self) -> None:
"""
Await tasks that have completed
"""
tasks, self.completed_tasks = self.completed_tasks, []
for task in tasks:
with suppress(BaseException):
await task
class Operation:
"""
A class encapsulating a single operation with its id.
Helps enforce protocol state transition.
"""
__slots__ = ["handler", "id", "completed"]
def __init__(self, handler: BaseGraphQLTransportWSHandler, id: str):
self.handler = handler
self.id = id
self.completed = False
async def send_message(self, message: GraphQLTransportMessage) -> None:
if self.completed:
return
if isinstance(message, (CompleteMessage, ErrorMessage)):
self.completed = True
# de-register the operation _before_ sending the final message
self.handler.forget_id(self.id)
await self.handler.send_message(message) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/subscriptions/protocols/graphql_transport_ws/handlers.py | handlers.py |
from __future__ import annotations
import asyncio
import json
import warnings
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Type, Union
from django.core.exceptions import BadRequest, SuspiciousOperation
from django.core.serializers.json import DjangoJSONEncoder
from django.http import Http404, HttpResponseNotAllowed, JsonResponse
from django.http.response import HttpResponse
from django.template import RequestContext, Template
from django.template.exceptions import TemplateDoesNotExist
from django.template.loader import render_to_string
from django.template.response import TemplateResponse
from django.utils.decorators import classonlymethod, method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from strawberry.exceptions import MissingQueryError
from strawberry.file_uploads.utils import replace_placeholders_with_files
from strawberry.http import (
parse_query_params,
parse_request_data,
process_result,
)
from strawberry.schema.exceptions import InvalidOperationTypeError
from strawberry.types.graphql import OperationType
from strawberry.utils.graphiql import get_graphiql_html
from .context import StrawberryDjangoContext
if TYPE_CHECKING:
from django.http import HttpRequest
from strawberry.http import GraphQLHTTPResponse, GraphQLRequestData
from strawberry.types import ExecutionResult
from ..schema import BaseSchema
class TemporalHttpResponse(JsonResponse):
status_code = None
def __init__(self) -> None:
super().__init__({})
def __repr__(self) -> str:
"""Adopted from Django to handle `status_code=None`."""
if self.status_code is not None:
return super().__repr__()
return "<{cls} status_code={status_code}{content_type}>".format(
cls=self.__class__.__name__,
status_code=self.status_code,
content_type=self._content_type_for_repr,
)
class BaseView(View):
subscriptions_enabled = False
graphiql = True
allow_queries_via_get = True
schema: Optional[BaseSchema] = None
json_encoder: Optional[Type[json.JSONEncoder]] = None
json_dumps_params: Optional[Dict[str, Any]] = None
def __init__(
self,
schema: BaseSchema,
graphiql: bool = True,
allow_queries_via_get: bool = True,
subscriptions_enabled: bool = False,
**kwargs: Any,
):
self.schema = schema
self.graphiql = graphiql
self.allow_queries_via_get = allow_queries_via_get
self.subscriptions_enabled = subscriptions_enabled
super().__init__(**kwargs)
self.json_dumps_params = kwargs.pop("json_dumps_params", self.json_dumps_params)
if self.json_dumps_params:
warnings.warn(
"json_dumps_params is deprecated, override encode_json instead",
DeprecationWarning,
stacklevel=2,
)
self.json_encoder = DjangoJSONEncoder
self.json_encoder = kwargs.pop("json_encoder", self.json_encoder)
if self.json_encoder is not None:
warnings.warn(
"json_encoder is deprecated, override encode_json instead",
DeprecationWarning,
stacklevel=2,
)
def parse_body(self, request: HttpRequest) -> Dict[str, Any]:
content_type = request.content_type or ""
if "application/json" in content_type:
return json.loads(request.body)
elif content_type.startswith("multipart/form-data"):
data = json.loads(request.POST.get("operations", "{}"))
files_map = json.loads(request.POST.get("map", "{}"))
data = replace_placeholders_with_files(data, files_map, request.FILES)
return data
elif request.method.lower() == "get" and request.META.get("QUERY_STRING"):
return parse_query_params(request.GET.copy())
return json.loads(request.body)
def is_request_allowed(self, request: HttpRequest) -> bool:
return request.method.lower() in ("get", "post")
def should_render_graphiql(self, request: HttpRequest) -> bool:
if request.method.lower() != "get":
return False
if self.allow_queries_via_get and request.META.get("QUERY_STRING"):
return False
return any(
supported_header in request.META.get("HTTP_ACCEPT", "")
for supported_header in ("text/html", "*/*")
)
def get_request_data(self, request: HttpRequest) -> GraphQLRequestData:
try:
data = self.parse_body(request)
except json.decoder.JSONDecodeError:
raise SuspiciousOperation("Unable to parse request body as JSON")
except KeyError:
raise BadRequest("File(s) missing in form data")
return parse_request_data(data)
def _render_graphiql(self, request: HttpRequest, context=None) -> TemplateResponse:
if not self.graphiql:
raise Http404()
try:
template = Template(render_to_string("graphql/graphiql.html"))
except TemplateDoesNotExist:
template = Template(get_graphiql_html(replace_variables=False))
context = context or {}
context.update({"SUBSCRIPTION_ENABLED": json.dumps(self.subscriptions_enabled)})
response = TemplateResponse(request=request, template=None, context=context)
response.content = template.render(RequestContext(request, context))
return response
def _create_response(
self, response_data: GraphQLHTTPResponse, sub_response: HttpResponse
) -> HttpResponse:
data = self.encode_json(response_data)
response = HttpResponse(
data,
content_type="application/json",
)
for name, value in sub_response.items():
response[name] = value
if sub_response.status_code is not None:
response.status_code = sub_response.status_code
for name, value in sub_response.cookies.items():
response.cookies[name] = value
return response
def encode_json(self, response_data: GraphQLHTTPResponse) -> str:
if self.json_dumps_params:
assert self.json_encoder
return json.dumps(
response_data, cls=self.json_encoder, **self.json_dumps_params
)
if self.json_encoder:
return json.dumps(response_data, cls=self.json_encoder)
return json.dumps(response_data)
class GraphQLView(BaseView):
def get_root_value(self, request: HttpRequest) -> Any:
return None
def get_context(self, request: HttpRequest, response: HttpResponse) -> Any:
return StrawberryDjangoContext(request=request, response=response)
def process_result(
self, request: HttpRequest, result: ExecutionResult
) -> GraphQLHTTPResponse:
return process_result(result)
@method_decorator(csrf_exempt)
def dispatch(
self, request, *args, **kwargs
) -> Union[HttpResponseNotAllowed, TemplateResponse, HttpResponse]:
if not self.is_request_allowed(request):
return HttpResponseNotAllowed(
["GET", "POST"], "GraphQL only supports GET and POST requests."
)
if self.should_render_graphiql(request):
return self._render_graphiql(request)
request_data = self.get_request_data(request)
sub_response = TemporalHttpResponse()
context = self.get_context(request, response=sub_response)
root_value = self.get_root_value(request)
method = request.method
allowed_operation_types = OperationType.from_http(method)
if not self.allow_queries_via_get and method == "GET":
allowed_operation_types = allowed_operation_types - {OperationType.QUERY}
assert self.schema
try:
result = self.schema.execute_sync(
request_data.query,
root_value=root_value,
variable_values=request_data.variables,
context_value=context,
operation_name=request_data.operation_name,
allowed_operation_types=allowed_operation_types,
)
except InvalidOperationTypeError as e:
raise BadRequest(e.as_http_error_reason(method)) from e
except MissingQueryError:
raise SuspiciousOperation("No GraphQL query found in the request")
response_data = self.process_result(request=request, result=result)
return self._create_response(
response_data=response_data, sub_response=sub_response
)
class AsyncGraphQLView(BaseView):
@classonlymethod
def as_view(cls, **initkwargs) -> Callable[..., HttpResponse]:
# This code tells django that this view is async, see docs here:
# https://docs.djangoproject.com/en/3.1/topics/async/#async-views
view = super().as_view(**initkwargs)
view._is_coroutine = asyncio.coroutines._is_coroutine # type: ignore[attr-defined] # noqa: E501
return view
@method_decorator(csrf_exempt)
async def dispatch(
self, request, *args, **kwargs
) -> Union[HttpResponseNotAllowed, TemplateResponse, HttpResponse]:
if not self.is_request_allowed(request):
return HttpResponseNotAllowed(
["GET", "POST"], "GraphQL only supports GET and POST requests."
)
if self.should_render_graphiql(request):
return self._render_graphiql(request)
request_data = self.get_request_data(request)
sub_response = TemporalHttpResponse()
context = await self.get_context(request, response=sub_response)
root_value = await self.get_root_value(request)
method = request.method
allowed_operation_types = OperationType.from_http(method)
if not self.allow_queries_via_get and method == "GET":
allowed_operation_types = allowed_operation_types - {OperationType.QUERY}
assert self.schema
try:
result = await self.schema.execute(
request_data.query,
root_value=root_value,
variable_values=request_data.variables,
context_value=context,
operation_name=request_data.operation_name,
allowed_operation_types=allowed_operation_types,
)
except InvalidOperationTypeError as e:
raise BadRequest(e.as_http_error_reason(method)) from e
except MissingQueryError:
raise SuspiciousOperation("No GraphQL query found in the request")
response_data = await self.process_result(request=request, result=result)
return self._create_response(
response_data=response_data, sub_response=sub_response
)
async def get_root_value(self, request: HttpRequest) -> Any:
return None
async def get_context(self, request: HttpRequest, response: HttpResponse) -> Any:
return StrawberryDjangoContext(request=request, response=response)
async def process_result(
self, request: HttpRequest, result: ExecutionResult
) -> GraphQLHTTPResponse:
return process_result(result) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/django/views.py | views.py |
from __future__ import annotations
import importlib
import inspect
from pathlib import Path
from typing import TYPE_CHECKING, List, Optional, Type
import click
from strawberry.cli.utils import load_schema
from strawberry.codegen import QueryCodegen, QueryCodegenPlugin
if TYPE_CHECKING:
from strawberry.codegen import CodegenResult
def _is_codegen_plugin(obj: object) -> bool:
return (
inspect.isclass(obj)
and issubclass(obj, QueryCodegenPlugin)
and obj is not QueryCodegenPlugin
)
def _import_plugin(plugin: str) -> Optional[Type[QueryCodegenPlugin]]:
module_name = plugin
symbol_name: Optional[str] = None
if ":" in plugin:
module_name, symbol_name = plugin.split(":", 1)
try:
module = importlib.import_module(module_name)
except ModuleNotFoundError:
return None
if symbol_name:
obj = getattr(module, symbol_name)
assert _is_codegen_plugin(obj)
return obj
else:
symbols = {
key: value
for key, value in module.__dict__.items()
if not key.startswith("__")
}
if "__all__" in module.__dict__:
symbols = {
name: symbol
for name, symbol in symbols.items()
if name in module.__dict__["__all__"]
}
for obj in symbols.values():
if _is_codegen_plugin(obj):
return obj
return None
def _load_plugin(plugin_path: str) -> Type[QueryCodegenPlugin]:
# try to import plugin_name from current folder
# then try to import from strawberry.codegen.plugins
plugin = _import_plugin(plugin_path)
if plugin is None and "." not in plugin_path:
plugin = _import_plugin(f"strawberry.codegen.plugins.{plugin_path}")
if plugin is None:
raise click.ClickException(f"Plugin {plugin_path} not found")
return plugin
def _load_plugins(plugins: List[str]) -> List[QueryCodegenPlugin]:
return [_load_plugin(plugin)() for plugin in plugins]
class ConsolePlugin(QueryCodegenPlugin):
def __init__(
self, query: Path, output_dir: Path, plugins: List[QueryCodegenPlugin]
):
self.query = query
self.output_dir = output_dir
self.plugins = plugins
def on_start(self) -> None:
click.echo(
click.style(
"The codegen is experimental. Please submit any bug at "
"https://github.com/strawberry-graphql/strawberry\n",
fg="yellow",
bold=True,
)
)
plugin_names = [plugin.__class__.__name__ for plugin in self.plugins]
click.echo(
click.style(
f"Generating code for {self.query} using "
f"{', '.join(plugin_names)} plugin(s)",
fg="green",
)
)
def on_end(self, result: CodegenResult) -> None:
self.output_dir.mkdir(parents=True, exist_ok=True)
result.write(self.output_dir)
click.echo(
click.style(
f"Generated {len(result.files)} files in {self.output_dir}", fg="green"
)
)
@click.command(short_help="Generate code from a query")
@click.option("--plugins", "-p", "selected_plugins", multiple=True, required=True)
@click.option("--cli-plugin", "cli_plugin", required=False)
@click.option(
"--output-dir",
"-o",
default=".",
help="Output directory",
type=click.Path(path_type=Path, exists=False, dir_okay=True, file_okay=False),
)
@click.option("--schema", type=str, required=True)
@click.argument("query", type=click.Path(path_type=Path, exists=True))
@click.option(
"--app-dir",
default=".",
type=str,
show_default=True,
help=(
"Look for the module in the specified directory, by adding this to the "
"PYTHONPATH. Defaults to the current working directory. "
"Works the same as `--app-dir` in uvicorn."
),
)
def codegen(
schema: str,
query: Path,
app_dir: str,
output_dir: Path,
selected_plugins: List[str],
cli_plugin: Optional[str] = None,
) -> None:
schema_symbol = load_schema(schema, app_dir)
console_plugin = _load_plugin(cli_plugin) if cli_plugin else ConsolePlugin
plugins = _load_plugins(selected_plugins)
plugins.append(console_plugin(query, output_dir, plugins))
code_generator = QueryCodegen(schema_symbol, plugins=plugins)
code_generator.run(query.read_text()) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/cli/commands/codegen.py | codegen.py |
import os
import sys
import click
from strawberry.cli.constants import (
DEBUG_SERVER_LOG_OPERATIONS,
DEBUG_SERVER_SCHEMA_ENV_VAR_KEY,
)
from strawberry.cli.utils import load_schema
@click.command("server", short_help="Starts debug server")
@click.argument("schema", type=str)
@click.option("-h", "--host", default="0.0.0.0", type=str)
@click.option("-p", "--port", default=8000, type=int)
@click.option(
"--log-level",
default="error",
type=click.Choice(["debug", "info", "warning", "error"], case_sensitive=False),
help="passed to uvicorn to determine the log level",
)
@click.option(
"--app-dir",
default=".",
type=str,
show_default=True,
help=(
"Look for the module in the specified directory, by adding this to the "
"PYTHONPATH. Defaults to the current working directory. "
"Works the same as `--app-dir` in uvicorn."
),
)
@click.option(
"--log-operations",
default=True,
type=bool,
show_default=True,
help="Log GraphQL operations",
)
def server(schema, host, port, log_level, app_dir, log_operations) -> None:
sys.path.insert(0, app_dir)
try:
import starlette # noqa: F401
import uvicorn
except ImportError:
message = (
"The debug server requires additional packages, install them by running:\n"
"pip install 'strawberry-graphql[debug-server]'"
)
raise click.ClickException(message)
load_schema(schema, app_dir=app_dir)
os.environ[DEBUG_SERVER_SCHEMA_ENV_VAR_KEY] = schema
os.environ[DEBUG_SERVER_LOG_OPERATIONS] = str(log_operations)
app = "strawberry.cli.debug_server:app"
# Windows doesn't support UTF-8 by default
endl = " 🍓\n" if sys.platform != "win32" else "\n"
print(f"Running strawberry on http://{host}:{port}/graphql", end=endl) # noqa: T201
uvicorn.run(
app,
host=host,
port=port,
log_level=log_level,
reload=True,
reload_dirs=[app_dir],
) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/cli/commands/server.py | server.py |
from __future__ import annotations
import json
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Coroutine, Dict, List, Mapping, Optional, Union
from typing_extensions import Literal, TypedDict
if TYPE_CHECKING:
from graphql import GraphQLFormattedError
@dataclass
class Response:
errors: Optional[List[GraphQLFormattedError]]
data: Optional[Dict[str, object]]
extensions: Optional[Dict[str, object]]
class Body(TypedDict, total=False):
query: str
variables: Optional[Dict[str, object]]
class BaseGraphQLTestClient(ABC):
def __init__(self, client, url: str = "/graphql/"):
self._client = client
self.url = url
def query(
self,
query: str,
variables: Optional[Dict[str, Mapping]] = None,
headers: Optional[Dict[str, object]] = None,
asserts_errors: Optional[bool] = True,
files: Optional[Dict[str, object]] = None,
) -> Union[Coroutine[Any, Any, Response], Response]:
body = self._build_body(query, variables, files)
resp = self.request(body, headers, files)
data = self._decode(resp, type="multipart" if files else "json")
response = Response(
errors=data.get("errors"),
data=data.get("data"),
extensions=data.get("extensions"),
)
if asserts_errors:
assert response.errors is None
return response
@abstractmethod
def request(
self,
body: Dict[str, object],
headers: Optional[Dict[str, object]] = None,
files: Optional[Dict[str, object]] = None,
) -> Any:
raise NotImplementedError
def _build_body(
self,
query: str,
variables: Optional[Dict[str, Mapping]] = None,
files: Optional[Dict[str, object]] = None,
) -> Dict[str, object]:
body: Dict[str, object] = {"query": query}
if variables:
body["variables"] = variables
if files:
assert variables is not None
assert files is not None
file_map = BaseGraphQLTestClient._build_multipart_file_map(variables, files)
body = {
"operations": json.dumps(body),
"map": json.dumps(file_map),
**files,
}
return body
@staticmethod
def _build_multipart_file_map(
variables: Dict[str, Mapping], files: Dict[str, object]
) -> Dict[str, List[str]]:
"""Creates the file mapping between the variables and the files objects passed
as key arguments
Example usages:
>>> _build_multipart_file_map(
>>> variables={"textFile": None}, files={"textFile": f}
>>> )
... {"textFile": ["variables.textFile"]}
If the variable is a list we have to enumerate files in the mapping
>>> _build_multipart_file_map(
>>> variables={"files": [None, None]},
>>> files={"file1": file1, "file2": file2},
>>> )
... {"file1": ["variables.files.0"], "file2": ["variables.files.1"]}
If `variables` contains another keyword (a folder) we must include that keyword
in the mapping
>>> _build_multipart_file_map(
>>> variables={"folder": {"files": [None, None]}},
>>> files={"file1": file1, "file2": file2},
>>> )
... {
... "file1": ["variables.files.folder.files.0"],
... "file2": ["variables.files.folder.files.1"]
... }
If `variables` includes both a list of files and other single values, we must
map them accordingly
>>> _build_multipart_file_map(
>>> variables={"files": [None, None], "textFile": None},
>>> files={"file1": file1, "file2": file2, "textFile": file3},
>>> )
... {
... "file1": ["variables.files.0"],
... "file2": ["variables.files.1"],
... "textFile": ["variables.textFile"],
... }
"""
map: Dict[str, List[str]] = {}
for key, values in variables.items():
reference = key
variable_values = values
# In case of folders the variables will look like
# `{"folder": {"files": ...]}}`
if isinstance(values, dict):
folder_key = list(values.keys())[0]
reference += f".{folder_key}"
# the list of file is inside the folder keyword
variable_values = variable_values[folder_key]
# If the variable is an array of files we must number the keys
if isinstance(variable_values, list):
# copying `files` as when we map a file we must discard from the dict
_kwargs = files.copy()
for index, _ in enumerate(variable_values):
k = list(_kwargs.keys())[0]
_kwargs.pop(k)
map.setdefault(k, [])
map[k].append(f"variables.{reference}.{index}")
else:
map[key] = [f"variables.{reference}"]
# Variables can be mixed files and other data, we don't want to map non-files
# vars so we need to remove them, we can't remove them before
# because they can be part of a list of files or folder
map_without_vars = {k: v for k, v in map.items() if k in files}
return map_without_vars
def _decode(self, response: Any, type: Literal["multipart", "json"]):
if type == "multipart":
return json.loads(response.content.decode())
return response.json() | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/test/client.py | client.py |
from __future__ import annotations
import json
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Optional, Union
from starlette.websockets import WebSocket
from strawberry.asgi.handlers import (
GraphQLTransportWSHandler,
GraphQLWSHandler,
HTTPHandler,
)
from strawberry.http import process_result
from strawberry.subscriptions import GRAPHQL_TRANSPORT_WS_PROTOCOL, GRAPHQL_WS_PROTOCOL
if TYPE_CHECKING:
from starlette.requests import Request
from starlette.responses import Response
from starlette.types import Receive, Scope, Send
from strawberry.http import GraphQLHTTPResponse
from strawberry.schema import BaseSchema
from strawberry.types import ExecutionResult
class GraphQL:
graphql_transport_ws_handler_class = GraphQLTransportWSHandler
graphql_ws_handler_class = GraphQLWSHandler
http_handler_class = HTTPHandler
def __init__(
self,
schema: BaseSchema,
graphiql: bool = True,
allow_queries_via_get: bool = True,
keep_alive: bool = False,
keep_alive_interval: float = 1,
debug: bool = False,
subscription_protocols=(GRAPHQL_TRANSPORT_WS_PROTOCOL, GRAPHQL_WS_PROTOCOL),
connection_init_wait_timeout: timedelta = timedelta(minutes=1),
) -> None:
self.schema = schema
self.graphiql = graphiql
self.allow_queries_via_get = allow_queries_via_get
self.keep_alive = keep_alive
self.keep_alive_interval = keep_alive_interval
self.debug = debug
self.protocols = subscription_protocols
self.connection_init_wait_timeout = connection_init_wait_timeout
async def __call__(self, scope: Scope, receive: Receive, send: Send):
if scope["type"] == "http":
await self.http_handler_class(
schema=self.schema,
graphiql=self.graphiql,
allow_queries_via_get=self.allow_queries_via_get,
debug=self.debug,
get_context=self.get_context,
get_root_value=self.get_root_value,
process_result=self.process_result,
encode_json=self.encode_json,
).handle(scope=scope, receive=receive, send=send)
elif scope["type"] == "websocket":
ws = WebSocket(scope=scope, receive=receive, send=send)
preferred_protocol = self.pick_preferred_protocol(ws)
if preferred_protocol == GRAPHQL_TRANSPORT_WS_PROTOCOL:
await self.graphql_transport_ws_handler_class(
schema=self.schema,
debug=self.debug,
connection_init_wait_timeout=self.connection_init_wait_timeout,
get_context=self.get_context,
get_root_value=self.get_root_value,
ws=ws,
).handle()
elif preferred_protocol == GRAPHQL_WS_PROTOCOL:
await self.graphql_ws_handler_class(
schema=self.schema,
debug=self.debug,
keep_alive=self.keep_alive,
keep_alive_interval=self.keep_alive_interval,
get_context=self.get_context,
get_root_value=self.get_root_value,
ws=ws,
).handle()
else:
# Subprotocol not acceptable
await ws.close(code=4406)
else: # pragma: no cover
raise ValueError("Unknown scope type: {!r}".format(scope["type"]))
def pick_preferred_protocol(self, ws: WebSocket) -> Optional[str]:
protocols = ws["subprotocols"]
intersection = set(protocols) & set(self.protocols)
sorted_intersection = sorted(intersection, key=protocols.index)
return next(iter(sorted_intersection), None)
async def get_root_value(self, request: Union[Request, WebSocket]) -> Optional[Any]:
return None
async def get_context(
self,
request: Union[Request, WebSocket],
response: Optional[Response] = None,
) -> Optional[Any]:
return {"request": request, "response": response}
async def process_result(
self, request: Request, result: ExecutionResult
) -> GraphQLHTTPResponse:
return process_result(result)
def encode_json(self, response_data: GraphQLHTTPResponse) -> str:
return json.dumps(response_data) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/asgi/__init__.py | __init__.py |
from __future__ import annotations
from contextlib import suppress
from typing import TYPE_CHECKING, Any, Optional
from starlette.websockets import WebSocketDisconnect, WebSocketState
from strawberry.subscriptions import GRAPHQL_WS_PROTOCOL
from strawberry.subscriptions.protocols.graphql_ws.handlers import BaseGraphQLWSHandler
if TYPE_CHECKING:
from starlette.websockets import WebSocket
from strawberry.schema import BaseSchema
from strawberry.subscriptions.protocols.graphql_ws.types import OperationMessage
class GraphQLWSHandler(BaseGraphQLWSHandler):
def __init__(
self,
schema: BaseSchema,
debug: bool,
keep_alive: bool,
keep_alive_interval: float,
get_context,
get_root_value,
ws: WebSocket,
):
super().__init__(schema, debug, keep_alive, keep_alive_interval)
self._get_context = get_context
self._get_root_value = get_root_value
self._ws = ws
async def get_context(self) -> Any:
return await self._get_context(request=self._ws)
async def get_root_value(self) -> Any:
return await self._get_root_value(request=self._ws)
async def send_json(self, data: OperationMessage) -> None:
await self._ws.send_json(data)
async def close(self, code: int = 1000, reason: Optional[str] = None) -> None:
# Close messages are not part of the ASGI ref yet
await self._ws.close(code=code)
async def handle_request(self) -> Any:
await self._ws.accept(subprotocol=GRAPHQL_WS_PROTOCOL)
try:
while self._ws.application_state != WebSocketState.DISCONNECTED:
try:
message = await self._ws.receive_json()
except KeyError:
# Ignore non-text messages
continue
else:
await self.handle_message(message)
except WebSocketDisconnect: # pragma: no cover
pass
finally:
if self.keep_alive_task:
self.keep_alive_task.cancel()
with suppress(BaseException):
await self.keep_alive_task
for operation_id in list(self.subscriptions.keys()):
await self.cleanup_operation(operation_id) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/asgi/handlers/graphql_ws_handler.py | graphql_ws_handler.py |
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from starlette.websockets import WebSocketDisconnect, WebSocketState
from strawberry.subscriptions import GRAPHQL_TRANSPORT_WS_PROTOCOL
from strawberry.subscriptions.protocols.graphql_transport_ws.handlers import (
BaseGraphQLTransportWSHandler,
)
if TYPE_CHECKING:
from datetime import timedelta
from starlette.websockets import WebSocket
from strawberry.schema import BaseSchema
class GraphQLTransportWSHandler(BaseGraphQLTransportWSHandler):
def __init__(
self,
schema: BaseSchema,
debug: bool,
connection_init_wait_timeout: timedelta,
get_context,
get_root_value,
ws: WebSocket,
):
super().__init__(schema, debug, connection_init_wait_timeout)
self._get_context = get_context
self._get_root_value = get_root_value
self._ws = ws
async def get_context(self) -> Any:
return await self._get_context(request=self._ws)
async def get_root_value(self) -> Any:
return await self._get_root_value(request=self._ws)
async def send_json(self, data: dict) -> None:
await self._ws.send_json(data)
async def close(self, code: int, reason: str) -> None:
await self._ws.close(code=code, reason=reason)
async def handle_request(self) -> None:
await self._ws.accept(subprotocol=GRAPHQL_TRANSPORT_WS_PROTOCOL)
try:
while self._ws.application_state != WebSocketState.DISCONNECTED:
try:
message = await self._ws.receive_json()
except KeyError:
error_message = "WebSocket message type must be text"
await self.handle_invalid_message(error_message)
else:
await self.handle_message(message)
except WebSocketDisconnect: # pragma: no cover
pass
finally:
for operation_id in list(self.subscriptions.keys()):
await self.cleanup_operation(operation_id)
await self.reap_completed_tasks() | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/asgi/handlers/graphql_transport_ws_handler.py | graphql_transport_ws_handler.py |
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, Optional
from starlette import status
from starlette.requests import Request
from starlette.responses import HTMLResponse, PlainTextResponse, Response
from strawberry.exceptions import MissingQueryError
from strawberry.file_uploads.utils import replace_placeholders_with_files
from strawberry.http import parse_query_params, parse_request_data
from strawberry.schema.exceptions import InvalidOperationTypeError
from strawberry.types.graphql import OperationType
from strawberry.utils.debug import pretty_print_graphql_operation
from strawberry.utils.graphiql import get_graphiql_html
if TYPE_CHECKING:
from starlette.types import Receive, Scope, Send
from strawberry.schema import BaseSchema
from strawberry.types.execution import ExecutionResult
class HTTPHandler:
def __init__(
self,
schema: BaseSchema,
graphiql: bool,
allow_queries_via_get: bool,
debug: bool,
get_context,
get_root_value,
process_result,
encode_json,
):
self.schema = schema
self.graphiql = graphiql
self.allow_queries_via_get = allow_queries_via_get
self.debug = debug
self.get_context = get_context
self.get_root_value = get_root_value
self.process_result = process_result
self.encode_json = encode_json
async def handle(self, scope: Scope, receive: Receive, send: Send) -> None:
request = Request(scope=scope, receive=receive)
root_value = await self.get_root_value(request)
sub_response = Response()
sub_response.status_code = None # type: ignore
del sub_response.headers["content-length"]
context = await self.get_context(request=request, response=sub_response)
response = await self.get_http_response(
request=request,
execute=self.execute,
process_result=self.process_result,
root_value=root_value,
context=context,
)
response.headers.raw.extend(sub_response.headers.raw)
if sub_response.background:
response.background = sub_response.background
if sub_response.status_code:
response.status_code = sub_response.status_code
await response(scope, receive, send)
async def get_http_response(
self,
request: Request,
execute: Callable,
process_result: Callable,
root_value: Optional[Any],
context: Optional[Any],
) -> Response:
method = request.method
if method == "GET":
if request.query_params:
try:
data = parse_query_params(request.query_params._dict)
except json.JSONDecodeError:
return PlainTextResponse(
"Unable to parse request body as JSON",
status_code=status.HTTP_400_BAD_REQUEST,
)
elif self.should_render_graphiql(request):
return self.get_graphiql_response()
else:
return HTMLResponse(status_code=status.HTTP_404_NOT_FOUND)
elif method == "POST":
content_type = request.headers.get("Content-Type", "")
if "application/json" in content_type:
try:
data = await request.json()
except json.JSONDecodeError:
return PlainTextResponse(
"Unable to parse request body as JSON",
status_code=status.HTTP_400_BAD_REQUEST,
)
elif content_type.startswith("multipart/form-data"):
multipart_data = await request.form()
try:
operations_text = multipart_data.get("operations", "{}")
operations = json.loads(operations_text) # type: ignore
files_map = json.loads(multipart_data.get("map", "{}")) # type: ignore # noqa: E501
except json.JSONDecodeError:
return PlainTextResponse(
"Unable to parse request body as JSON",
status_code=status.HTTP_400_BAD_REQUEST,
)
try:
data = replace_placeholders_with_files(
operations, files_map, multipart_data
)
except KeyError:
return PlainTextResponse(
"File(s) missing in form data",
status_code=status.HTTP_400_BAD_REQUEST,
)
else:
return PlainTextResponse(
"Unsupported Media Type",
status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
)
else:
return PlainTextResponse(
"Method Not Allowed",
status_code=status.HTTP_405_METHOD_NOT_ALLOWED,
)
try:
request_data = parse_request_data(data)
except json.JSONDecodeError:
return PlainTextResponse(
"Unable to parse request body as JSON",
status_code=status.HTTP_400_BAD_REQUEST,
)
allowed_operation_types = OperationType.from_http(method)
if not self.allow_queries_via_get and method == "GET":
allowed_operation_types = allowed_operation_types - {OperationType.QUERY}
try:
result = await execute(
request_data.query,
variables=request_data.variables,
context=context,
operation_name=request_data.operation_name,
root_value=root_value,
allowed_operation_types=allowed_operation_types,
)
except InvalidOperationTypeError as e:
return PlainTextResponse(
e.as_http_error_reason(method),
status_code=status.HTTP_400_BAD_REQUEST,
)
except MissingQueryError:
return PlainTextResponse(
"No GraphQL query found in the request",
status_code=status.HTTP_400_BAD_REQUEST,
)
response_data = await process_result(request=request, result=result)
return Response(
self.encode_json(response_data),
status_code=status.HTTP_200_OK,
media_type="application/json",
)
def should_render_graphiql(self, request: Request) -> bool:
if not self.graphiql:
return False
return any(
supported_header in request.headers.get("accept", "")
for supported_header in ("text/html", "*/*")
)
def get_graphiql_response(self) -> HTMLResponse:
html = get_graphiql_html()
return HTMLResponse(html)
async def execute(
self,
query: str,
variables: Optional[Dict[str, Any]] = None,
context: Any = None,
operation_name: Optional[str] = None,
root_value: Any = None,
allowed_operation_types: Optional[Iterable[OperationType]] = None,
) -> ExecutionResult:
if self.debug:
pretty_print_graphql_operation(operation_name, query, variables)
return await self.schema.execute(
query,
root_value=root_value,
variable_values=variables,
operation_name=operation_name,
context_value=context,
allowed_operation_types=allowed_operation_types,
) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/asgi/handlers/http_handler.py | http_handler.py |
from __future__ import annotations
import dataclasses
from typing import (
TYPE_CHECKING,
Any,
Callable,
List,
Mapping,
Optional,
Sequence,
Type,
TypeVar,
Union,
)
from typing_extensions import Self
from strawberry.type import StrawberryType, StrawberryTypeVar
from strawberry.utils.inspect import get_specialized_type_var_map
from strawberry.utils.typing import is_generic as is_type_generic
if TYPE_CHECKING:
from graphql import GraphQLResolveInfo
from strawberry.field import StrawberryField
@dataclasses.dataclass(eq=False)
class TypeDefinition(StrawberryType):
name: str
is_input: bool
is_interface: bool
origin: Type[Any]
description: Optional[str]
interfaces: List[TypeDefinition]
extend: bool
directives: Optional[Sequence[object]]
is_type_of: Optional[Callable[[Any, GraphQLResolveInfo], bool]]
_fields: List[StrawberryField]
concrete_of: Optional[TypeDefinition] = None
"""Concrete implementations of Generic TypeDefinitions fill this in"""
type_var_map: Mapping[TypeVar, Union[StrawberryType, type]] = dataclasses.field(
default_factory=dict
)
def __post_init__(self):
# resolve `Self` annotation with the origin type
for index, field in enumerate(self.fields):
if isinstance(field.type, StrawberryType) and field.type.has_generic(Self): # type: ignore # noqa: E501
self.fields[index] = field.copy_with({Self: self.origin}) # type: ignore # noqa: E501
# TODO: remove wrapped cls when we "merge" this with `StrawberryObject`
def resolve_generic(self, wrapped_cls: type) -> type:
from strawberry.annotation import StrawberryAnnotation
passed_types = wrapped_cls.__args__ # type: ignore
params = wrapped_cls.__origin__.__parameters__ # type: ignore
# Make sure all passed_types are turned into StrawberryTypes
resolved_types = []
for passed_type in passed_types:
resolved_type = StrawberryAnnotation(passed_type).resolve()
resolved_types.append(resolved_type)
type_var_map = dict(zip(params, resolved_types))
return self.copy_with(type_var_map)
# TODO: Return a StrawberryObject
def copy_with(
self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]
) -> type:
# TODO: Logic unnecessary with StrawberryObject
fields = [field.copy_with(type_var_map) for field in self.fields]
new_type_definition = TypeDefinition(
name=self.name,
is_input=self.is_input,
origin=self.origin,
is_interface=self.is_interface,
directives=self.directives,
interfaces=self.interfaces,
description=self.description,
extend=self.extend,
is_type_of=self.is_type_of,
_fields=fields,
concrete_of=self,
type_var_map=type_var_map,
)
new_type = type(
new_type_definition.name,
(self.origin,),
{"_type_definition": new_type_definition},
)
new_type_definition.origin = new_type
return new_type
def get_field(self, python_name: str) -> Optional[StrawberryField]:
return next(
(field for field in self.fields if field.python_name == python_name), None
)
@property
def fields(self) -> List[StrawberryField]:
# TODO: rename _fields to fields and remove this property
return self._fields
@property
def is_generic(self) -> bool:
return is_type_generic(self.origin)
@property
def is_specialized_generic(self) -> bool:
if not self.is_generic:
return False
type_var_map = get_specialized_type_var_map(self.origin, include_type_vars=True)
return type_var_map is None or not any(
isinstance(arg, TypeVar) for arg in type_var_map.values()
)
@property
def type_params(self) -> List[TypeVar]:
type_params: List[TypeVar] = []
for field in self.fields:
type_params.extend(field.type_params)
return type_params
def is_implemented_by(self, root: Union[type, dict]) -> bool:
# TODO: Accept StrawberryObject instead
# TODO: Support dicts
if isinstance(root, dict):
raise NotImplementedError
type_definition = root._type_definition # type: ignore
if type_definition is self:
# No generics involved. Exact type match
return True
if type_definition is not self.concrete_of:
# Either completely different type, or concrete type of a different generic
return False
# Check the mapping of all fields' TypeVars
for generic_field in type_definition.fields:
generic_field_type = generic_field.type
if not isinstance(generic_field_type, StrawberryTypeVar):
continue
# For each TypeVar found, get the expected type from the copy's type map
expected_concrete_type = self.type_var_map.get(generic_field_type.type_var)
if expected_concrete_type is None:
# TODO: Should this return False?
continue
# Check if the expected type matches the type found on the type_map
real_concrete_type = type(getattr(root, generic_field.name))
# TODO: uniform type var map, at the moment we map object types
# to their class (not to TypeDefinition) while we map enum to
# the EnumDefinition class. This is why we do this check here:
if hasattr(real_concrete_type, "_enum_definition"):
real_concrete_type = real_concrete_type._enum_definition
if real_concrete_type is not expected_concrete_type:
return False
# All field mappings succeeded. This is a match
return True | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/types/types.py | types.py |
from __future__ import annotations
import dataclasses
import warnings
from typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, TypeVar, Union
from strawberry.utils.cached_property import cached_property
from .nodes import convert_selections
if TYPE_CHECKING:
from graphql import GraphQLResolveInfo, OperationDefinitionNode
from graphql.language import FieldNode
from graphql.pyutils.path import Path
from strawberry.field import StrawberryField
from strawberry.schema import Schema
from strawberry.type import StrawberryType
from .nodes import Selection
ContextType = TypeVar("ContextType")
RootValueType = TypeVar("RootValueType")
@dataclasses.dataclass
class Info(Generic[ContextType, RootValueType]):
_raw_info: GraphQLResolveInfo
_field: StrawberryField
@property
def field_name(self) -> str:
return self._raw_info.field_name
@property
def schema(self) -> Schema:
return self._raw_info.schema._strawberry_schema # type: ignore
@property
def field_nodes(self) -> List[FieldNode]: # deprecated
warnings.warn(
"`info.field_nodes` is deprecated, use `selected_fields` instead",
DeprecationWarning,
stacklevel=2,
)
return self._raw_info.field_nodes
@cached_property
def selected_fields(self) -> List[Selection]:
info = self._raw_info
return convert_selections(info, info.field_nodes)
@property
def context(self) -> ContextType:
return self._raw_info.context
@property
def root_value(self) -> RootValueType:
return self._raw_info.root_value
@property
def variable_values(self) -> Dict[str, Any]:
return self._raw_info.variable_values
# TODO: merge type with StrawberryType when StrawberryObject is implemented
@property
def return_type(self) -> Optional[Union[type, StrawberryType]]:
return self._field.type
@property
def python_name(self) -> str:
return self._field.python_name
# TODO: create an abstraction on these fields
@property
def operation(self) -> OperationDefinitionNode:
return self._raw_info.operation
@property
def path(self) -> Path:
return self._raw_info.path
# TODO: parent_type as strawberry types | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/types/info.py | info.py |
from __future__ import annotations
import dataclasses
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type
from graphql import specified_rules
from strawberry.utils.operation import get_first_operation, get_operation_type
if TYPE_CHECKING:
from graphql import ASTValidationRule
from graphql import ExecutionResult as GraphQLExecutionResult
from graphql.error.graphql_error import GraphQLError
from graphql.language import DocumentNode, OperationDefinitionNode
from strawberry.schema import Schema
from .graphql import OperationType
@dataclasses.dataclass
class ExecutionContext:
query: Optional[str]
schema: Schema
context: Any = None
variables: Optional[Dict[str, Any]] = None
root_value: Optional[Any] = None
validation_rules: Tuple[Type[ASTValidationRule], ...] = dataclasses.field(
default_factory=lambda: tuple(specified_rules)
)
# The operation name that is provided by the request
provided_operation_name: dataclasses.InitVar[Optional[str]] = None
# Values that get populated during the GraphQL execution so that they can be
# accessed by extensions
graphql_document: Optional[DocumentNode] = None
errors: Optional[List[GraphQLError]] = None
result: Optional[GraphQLExecutionResult] = None
def __post_init__(self, provided_operation_name: str):
self._provided_operation_name = provided_operation_name
@property
def operation_name(self) -> Optional[str]:
if self._provided_operation_name:
return self._provided_operation_name
definition = self._get_first_operation()
if not definition:
return None
if not definition.name:
return None
return definition.name.value
@property
def operation_type(self) -> OperationType:
graphql_document = self.graphql_document
if not graphql_document:
raise RuntimeError("No GraphQL document available")
return get_operation_type(graphql_document, self.operation_name)
def _get_first_operation(self) -> Optional[OperationDefinitionNode]:
graphql_document = self.graphql_document
if not graphql_document:
return None
return get_first_operation(graphql_document)
@dataclasses.dataclass
class ExecutionResult:
data: Optional[Dict[str, Any]]
errors: Optional[List[GraphQLError]]
extensions: Optional[Dict[str, Any]] = None | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/types/execution.py | execution.py |
from __future__ import annotations
import dataclasses
import sys
from typing import TYPE_CHECKING, Dict, List, Type, TypeVar
from strawberry.annotation import StrawberryAnnotation
from strawberry.exceptions import (
FieldWithResolverAndDefaultFactoryError,
FieldWithResolverAndDefaultValueError,
PrivateStrawberryFieldError,
)
from strawberry.private import is_private
from strawberry.unset import UNSET
from strawberry.utils.inspect import get_specialized_type_var_map
if TYPE_CHECKING:
from strawberry.field import StrawberryField
def _get_fields(cls: Type) -> List[StrawberryField]:
"""Get all the strawberry fields off a strawberry.type cls
This function returns a list of StrawberryFields (one for each field item), while
also paying attention the name and typing of the field.
StrawberryFields can be defined on a strawberry.type class as either a dataclass-
style field or using strawberry.field as a decorator.
>>> import strawberry
>>> @strawberry.type
... class Query:
... type_1a: int = 5
... type_1b: int = strawberry.field(...)
... type_1c: int = strawberry.field(resolver=...)
...
... @strawberry.field
... def type_2(self) -> int:
... ...
Type #1:
A pure dataclass-style field. Will not have a StrawberryField; one will need to
be created in this function. Type annotation is required.
Type #2:
A field defined using @strawberry.field as a decorator around the resolver. The
resolver must be type-annotated.
The StrawberryField.python_name value will be assigned to the field's name on the
class if one is not set by either using an explicit strawberry.field(name=...) or by
passing a named function (i.e. not an anonymous lambda) to strawberry.field
(typically as a decorator).
"""
# Deferred import to avoid import cycles
from strawberry.field import StrawberryField
fields: Dict[str, StrawberryField] = {}
# before trying to find any fields, let's first add the fields defined in
# parent classes, we do this by checking if parents have a type definition
for base in cls.__bases__:
if hasattr(base, "_type_definition"):
base_fields = {
field.python_name: field
# TODO: we need to rename _fields to something else
for field in base._type_definition._fields
}
# Add base's fields to cls' fields
fields = {**fields, **base_fields}
# Find the class the each field was originally defined on so we can use
# that scope later when resolving the type, as it may have different names
# available to it.
origins: Dict[str, type] = {field_name: cls for field_name in cls.__annotations__}
for base in cls.__mro__:
if hasattr(base, "_type_definition"):
for field in base._type_definition._fields:
if field.python_name in base.__annotations__:
origins.setdefault(field.name, base)
# then we can proceed with finding the fields for the current class
for field in dataclasses.fields(cls):
if isinstance(field, StrawberryField):
# Check that the field type is not Private
if is_private(field.type):
raise PrivateStrawberryFieldError(field.python_name, cls)
# Check that default is not set if a resolver is defined
if (
field.default is not dataclasses.MISSING
and field.default is not UNSET
and field.base_resolver is not None
):
raise FieldWithResolverAndDefaultValueError(
field.python_name, cls.__name__
)
# Check that default_factory is not set if a resolver is defined
# Note: using getattr because of this issue:
# https://github.com/python/mypy/issues/6910
default_factory = getattr(field, "default_factory", None)
if (
default_factory is not dataclasses.MISSING
and default_factory is not UNSET
and field.base_resolver is not None
):
raise FieldWithResolverAndDefaultFactoryError(
field.python_name, cls.__name__
)
# we make sure that the origin is either the field's resolver when
# called as:
#
# >>> @strawberry.field
# ... def x(self): ...
#
# or the class where this field was defined, so we always have
# the correct origin for determining field types when resolving
# the types.
field.origin = field.origin or cls
# Set the correct namespace for annotations if a namespace isn't
# already set
# Note: We do this here rather in the `Strawberry.type` setter
# function because at that point we don't have a link to the object
# type that the field as attached to.
if isinstance(field.type_annotation, StrawberryAnnotation):
type_annotation = field.type_annotation
if type_annotation.namespace is None:
type_annotation.set_namespace_from_field(field)
# Create a StrawberryField for fields that didn't use strawberry.field
else:
# Only ignore Private fields that weren't defined using StrawberryFields
if is_private(field.type):
continue
field_type = field.type
origin = origins.get(field.name, cls)
module = sys.modules[origin.__module__]
if isinstance(field_type, TypeVar):
specialized_type_var_map = get_specialized_type_var_map(cls)
# If field_type is specialized and a TypeVar, replace it with its
# mapped type
if specialized_type_var_map and field_type in specialized_type_var_map:
field_type = specialized_type_var_map[field_type]
else:
specialized_type_var_map = get_specialized_type_var_map(field_type)
# If field_type is specialized, copy its type_var_map to the definition
if specialized_type_var_map:
field_type = field_type._type_definition.copy_with(
specialized_type_var_map
)
# Create a StrawberryField, for fields of Types #1 and #2a
field = StrawberryField( # noqa: PLW2901
python_name=field.name,
graphql_name=None,
type_annotation=StrawberryAnnotation(
annotation=field_type,
namespace=module.__dict__,
),
origin=origin,
default=getattr(cls, field.name, dataclasses.MISSING),
)
field_name = field.python_name
assert_message = "Field must have a name by the time the schema is generated"
assert field_name is not None, assert_message
# TODO: Raise exception if field_name already in fields
fields[field_name] = field
return list(fields.values()) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/types/type_resolver.py | type_resolver.py |
from __future__ import annotations
import dataclasses
from typing import TYPE_CHECKING, Any, Collection, Dict, Iterable, List, Optional, Union
from graphql.language import FieldNode as GQLFieldNode
from graphql.language import FragmentSpreadNode as GQLFragmentSpreadNode
from graphql.language import InlineFragmentNode as GQLInlineFragmentNode
from graphql.language import ListValueNode as GQLListValueNode
from graphql.language import ObjectValueNode as GQLObjectValueNode
from graphql.language import VariableNode as GQLVariableNode
if TYPE_CHECKING:
from graphql import GraphQLResolveInfo
from graphql.language import ArgumentNode as GQLArgumentNode
from graphql.language import DirectiveNode as GQLDirectiveNode
from graphql.language import ValueNode as GQLValueNode
Arguments = Dict[str, Any]
Directives = Dict[str, Arguments]
Selection = Union["SelectedField", "FragmentSpread", "InlineFragment"]
def convert_value(info: GraphQLResolveInfo, node: GQLValueNode) -> Any:
"""Return useful value from any node."""
if isinstance(node, GQLVariableNode):
# Look up variable
name = node.name.value
return info.variable_values.get(name)
if isinstance(node, GQLListValueNode):
return [convert_value(info, value) for value in node.values]
if isinstance(node, GQLObjectValueNode):
return {
field.name.value: convert_value(info, field.value) for field in node.fields
}
return getattr(node, "value", None)
def convert_arguments(
info: GraphQLResolveInfo, nodes: Iterable[GQLArgumentNode]
) -> Arguments:
"""Return mapping of arguments."""
return {node.name.value: convert_value(info, node.value) for node in nodes}
def convert_directives(
info: GraphQLResolveInfo, nodes: Iterable[GQLDirectiveNode]
) -> Directives:
"""Return mapping of directives."""
return {node.name.value: convert_arguments(info, node.arguments) for node in nodes}
def convert_selections(
info: GraphQLResolveInfo, field_nodes: Collection[GQLFieldNode]
) -> List[Selection]:
"""Return typed `Selection` based on node type."""
selections: List[Selection] = []
for node in field_nodes:
if isinstance(node, GQLFieldNode):
selections.append(SelectedField.from_node(info, node))
elif isinstance(node, GQLInlineFragmentNode):
selections.append(InlineFragment.from_node(info, node))
elif isinstance(node, GQLFragmentSpreadNode):
selections.append(FragmentSpread.from_node(info, node))
else:
raise TypeError(f"Unknown node type: {node}")
return selections
@dataclasses.dataclass
class FragmentSpread:
"""Wrapper for a FragmentSpreadNode."""
name: str
type_condition: str
directives: Directives
selections: List[Selection]
@classmethod
def from_node(cls, info: GraphQLResolveInfo, node: GQLFragmentSpreadNode):
# Look up fragment
name = node.name.value
fragment = info.fragments[name]
return cls(
name=name,
directives=convert_directives(info, node.directives),
type_condition=fragment.type_condition.name.value,
selections=convert_selections(
info, getattr(fragment.selection_set, "selections", [])
),
)
@dataclasses.dataclass
class InlineFragment:
"""Wrapper for a InlineFragmentNode."""
type_condition: str
selections: List[Selection]
directives: Directives
@classmethod
def from_node(cls, info: GraphQLResolveInfo, node: GQLInlineFragmentNode):
return cls(
type_condition=node.type_condition.name.value,
selections=convert_selections(
info, getattr(node.selection_set, "selections", [])
),
directives=convert_directives(info, node.directives),
)
@dataclasses.dataclass
class SelectedField:
"""Wrapper for a FieldNode."""
name: str
directives: Directives
arguments: Arguments
selections: List[Selection]
alias: Optional[str] = None
@classmethod
def from_node(cls, info: GraphQLResolveInfo, node: GQLFieldNode):
return cls(
name=node.name.value,
directives=convert_directives(info, node.directives),
alias=getattr(node.alias, "value", None),
arguments=convert_arguments(info, node.arguments),
selections=convert_selections(
info, getattr(node.selection_set, "selections", [])
),
) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/types/nodes.py | nodes.py |
from __future__ import annotations as _
import inspect
import sys
import warnings
from inspect import isasyncgenfunction, iscoroutinefunction
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
ForwardRef,
Generic,
List,
Mapping,
NamedTuple,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from typing_extensions import Annotated, Protocol, get_args, get_origin
from strawberry.annotation import StrawberryAnnotation
from strawberry.arguments import StrawberryArgument
from strawberry.exceptions import MissingArgumentsAnnotationsError
from strawberry.type import StrawberryType
from strawberry.types.info import Info
from strawberry.utils.cached_property import cached_property
from strawberry.utils.typing import eval_type
if TYPE_CHECKING:
import builtins
class Parameter(inspect.Parameter):
def __hash__(self):
"""Override to exclude default value from hash.
This adds compatibility for using unhashable default values in resolvers such as
list and dict. The present use-case is limited to analyzing parameters from one
resolver. Therefore, the name, kind, and annotation combination are guaranteed
to be unique since two arguments cannot have the same name in a callable.
Furthermore, even though it is not currently a use-case to collect parameters
from different resolvers, the likelihood of collision from having the same hash
value but different defaults is mitigated by Python invoking the
:py:meth:`__eq__` method if two items have the same hash. See the verification
of this behavior in the `test_parameter_hash_collision` test.
"""
return hash((self.name, self.kind, self.annotation))
class Signature(inspect.Signature):
_parameter_cls = Parameter
class ReservedParameterSpecification(Protocol):
def find(
self, parameters: Tuple[inspect.Parameter, ...], resolver: StrawberryResolver
) -> Optional[inspect.Parameter]:
"""Finds the reserved parameter from ``parameters``."""
class ReservedName(NamedTuple):
name: str
def find(
self, parameters: Tuple[inspect.Parameter, ...], _: StrawberryResolver
) -> Optional[inspect.Parameter]:
return next((p for p in parameters if p.name == self.name), None)
class ReservedNameBoundParameter(NamedTuple):
name: str
def find(
self, parameters: Tuple[inspect.Parameter, ...], _: StrawberryResolver
) -> Optional[inspect.Parameter]:
if parameters: # Add compatibility for resolvers with no arguments
first_parameter = parameters[0]
return first_parameter if first_parameter.name == self.name else None
else:
return None
class ReservedType(NamedTuple):
"""Define a reserved type by name or by type.
To preserve backwards-comaptibility, if an annotation was defined but does not match
:attr:`type`, then the name is used as a fallback.
"""
name: str
type: Type
def find(
self, parameters: Tuple[inspect.Parameter, ...], resolver: StrawberryResolver
) -> Optional[inspect.Parameter]:
for parameter in parameters:
annotation = parameter.annotation
try:
resolved_annotation = eval_type(
ForwardRef(annotation)
if isinstance(annotation, str)
else annotation,
resolver._namespace,
None,
)
resolver._resolved_annotations[parameter] = resolved_annotation
except NameError:
# Type-annotation could not be resolved
resolved_annotation = annotation
if self.is_reserved_type(resolved_annotation):
return parameter
# Fallback to matching by name
reserved_name = ReservedName(name=self.name).find(parameters, resolver)
if reserved_name:
warning = DeprecationWarning(
f"Argument name-based matching of '{self.name}' is deprecated and will "
"be removed in v1.0. Ensure that reserved arguments are annotated "
"their respective types (i.e. use value: 'DirectiveValue[str]' instead "
"of 'value: str' and 'info: Info' instead of a plain 'info')."
)
warnings.warn(warning, stacklevel=3)
return reserved_name
else:
return None
def is_reserved_type(self, other: Type) -> bool:
origin = cast(type, get_origin(other)) or other
if origin is Annotated:
# Handle annotated arguments such as Private[str] and DirectiveValue[str]
return any(isinstance(argument, self.type) for argument in get_args(other))
else:
# Handle both concrete and generic types (i.e Info, and Info[Any, Any])
return (
issubclass(origin, self.type)
if isinstance(origin, type)
else origin is self.type
)
SELF_PARAMSPEC = ReservedNameBoundParameter("self")
CLS_PARAMSPEC = ReservedNameBoundParameter("cls")
ROOT_PARAMSPEC = ReservedName("root")
INFO_PARAMSPEC = ReservedType("info", Info)
T = TypeVar("T")
class StrawberryResolver(Generic[T]):
RESERVED_PARAMSPEC: Tuple[ReservedParameterSpecification, ...] = (
SELF_PARAMSPEC,
CLS_PARAMSPEC,
ROOT_PARAMSPEC,
INFO_PARAMSPEC,
)
def __init__(
self,
func: Union[Callable[..., T], staticmethod, classmethod],
*,
description: Optional[str] = None,
type_override: Optional[Union[StrawberryType, type]] = None,
):
self.wrapped_func = func
self._description = description
self._type_override = type_override
"""Specify the type manually instead of calculating from wrapped func
This is used when creating copies of types w/ generics
"""
self._resolved_annotations: Dict[inspect.Parameter, Any] = {}
"""Populated during reserved parameter determination.
Caching resolved annotations this way prevents evaling them repeatedly.
"""
# TODO: Use this when doing the actual resolving? How to deal with async resolvers?
def __call__(self, *args, **kwargs) -> T:
if not callable(self.wrapped_func):
raise UncallableResolverError(self)
return self.wrapped_func(*args, **kwargs)
@cached_property
def signature(self) -> inspect.Signature:
return Signature.from_callable(self._unbound_wrapped_func, follow_wrapped=True)
@cached_property
def reserved_parameters(
self,
) -> Dict[ReservedParameterSpecification, Optional[inspect.Parameter]]:
"""Mapping of reserved parameter specification to parameter."""
parameters = tuple(self.signature.parameters.values())
return {spec: spec.find(parameters, self) for spec in self.RESERVED_PARAMSPEC}
@cached_property
def arguments(self) -> List[StrawberryArgument]:
"""Resolver arguments exposed in the GraphQL Schema."""
parameters = self.signature.parameters.values()
reserved_parameters = set(self.reserved_parameters.values())
missing_annotations = []
arguments = []
user_parameters = (p for p in parameters if p not in reserved_parameters)
for param in user_parameters:
annotation = self._resolved_annotations.get(param, param.annotation)
if annotation is inspect.Signature.empty:
missing_annotations.append(param.name)
else:
argument = StrawberryArgument(
python_name=param.name,
graphql_name=None,
type_annotation=StrawberryAnnotation(
annotation=annotation, namespace=self._namespace
),
default=param.default,
)
arguments.append(argument)
if missing_annotations:
raise MissingArgumentsAnnotationsError(self, missing_annotations)
return arguments
@cached_property
def info_parameter(self) -> Optional[inspect.Parameter]:
return self.reserved_parameters.get(INFO_PARAMSPEC)
@cached_property
def root_parameter(self) -> Optional[inspect.Parameter]:
return self.reserved_parameters.get(ROOT_PARAMSPEC)
@cached_property
def self_parameter(self) -> Optional[inspect.Parameter]:
return self.reserved_parameters.get(SELF_PARAMSPEC)
@cached_property
def name(self) -> str:
# TODO: What to do if resolver is a lambda?
return self._unbound_wrapped_func.__name__
@cached_property
def annotations(self) -> Dict[str, object]:
"""Annotations for the resolver.
Does not include special args defined in `RESERVED_PARAMSPEC` (e.g. self, root,
info)
"""
reserved_parameters = self.reserved_parameters
reserved_names = {p.name for p in reserved_parameters.values() if p is not None}
annotations = self._unbound_wrapped_func.__annotations__
annotations = {
name: annotation
for name, annotation in annotations.items()
if name not in reserved_names
}
return annotations
@cached_property
def type_annotation(self) -> Optional[StrawberryAnnotation]:
return_annotation = self.signature.return_annotation
if return_annotation is inspect.Signature.empty:
return None
else:
type_annotation = StrawberryAnnotation(
annotation=return_annotation, namespace=self._namespace
)
return type_annotation
@property
def type(self) -> Optional[Union[StrawberryType, type]]:
if self._type_override:
return self._type_override
if self.type_annotation is None:
return None
return self.type_annotation.resolve()
@cached_property
def is_async(self) -> bool:
return iscoroutinefunction(self._unbound_wrapped_func) or isasyncgenfunction(
self._unbound_wrapped_func
)
def copy_with(
self, type_var_map: Mapping[TypeVar, Union[StrawberryType, builtins.type]]
) -> StrawberryResolver:
type_override = None
if self.type:
if isinstance(self.type, StrawberryType):
type_override = self.type.copy_with(type_var_map)
elif hasattr(self.type, "_type_definition"):
type_override = self.type._type_definition.copy_with(
type_var_map,
)
other = type(self)(
func=self.wrapped_func,
description=self._description,
type_override=type_override,
)
# Resolve generic arguments
for argument in other.arguments:
if isinstance(argument.type, StrawberryType) and argument.type.is_generic:
argument.type_annotation = StrawberryAnnotation(
annotation=argument.type.copy_with(type_var_map),
namespace=argument.type_annotation.namespace,
)
return other
@cached_property
def _namespace(self) -> Dict[str, Any]:
return sys.modules[self._unbound_wrapped_func.__module__].__dict__
@cached_property
def _unbound_wrapped_func(self) -> Callable[..., T]:
if isinstance(self.wrapped_func, (staticmethod, classmethod)):
return self.wrapped_func.__func__
return self.wrapped_func
class UncallableResolverError(Exception):
def __init__(self, resolver: StrawberryResolver):
message = (
f"Attempted to call resolver {resolver} with uncallable function "
f"{resolver.wrapped_func}"
)
super().__init__(message)
__all__ = ["StrawberryResolver"] | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/types/fields/resolver.py | resolver.py |
from __future__ import annotations
import json
from datetime import timedelta
from inspect import signature
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Dict,
Iterable,
Optional,
Sequence,
Union,
cast,
)
from starlette import status
from starlette.background import BackgroundTasks # noqa: TCH002
from starlette.requests import HTTPConnection, Request
from starlette.responses import (
HTMLResponse,
PlainTextResponse,
Response,
)
from starlette.websockets import WebSocket
from fastapi import APIRouter, Depends
from strawberry.exceptions import InvalidCustomContext, MissingQueryError
from strawberry.fastapi.context import BaseContext, CustomContext
from strawberry.fastapi.handlers import GraphQLTransportWSHandler, GraphQLWSHandler
from strawberry.file_uploads.utils import replace_placeholders_with_files
from strawberry.http import (
parse_query_params,
parse_request_data,
process_result,
)
from strawberry.schema.exceptions import InvalidOperationTypeError
from strawberry.subscriptions import GRAPHQL_TRANSPORT_WS_PROTOCOL, GRAPHQL_WS_PROTOCOL
from strawberry.types.graphql import OperationType
from strawberry.utils.debug import pretty_print_graphql_operation
from strawberry.utils.graphiql import get_graphiql_html
if TYPE_CHECKING:
from starlette.types import ASGIApp
from strawberry.fastapi.context import MergedContext
from strawberry.http import GraphQLHTTPResponse
from strawberry.schema import BaseSchema
from strawberry.types import ExecutionResult
class GraphQLRouter(APIRouter):
graphql_ws_handler_class = GraphQLWSHandler
graphql_transport_ws_handler_class = GraphQLTransportWSHandler
@staticmethod
async def __get_root_value():
return None
@staticmethod
def __get_context_getter(
custom_getter: Callable[
..., Union[Optional[CustomContext], Awaitable[Optional[CustomContext]]]
]
) -> Callable[..., Awaitable[CustomContext]]:
async def dependency(
custom_context: Optional[CustomContext],
background_tasks: BackgroundTasks,
connection: HTTPConnection,
response: Response = None, # type: ignore
) -> MergedContext:
request = cast(Union[Request, WebSocket], connection)
if isinstance(custom_context, BaseContext):
custom_context.request = request
custom_context.background_tasks = background_tasks
custom_context.response = response
return custom_context
default_context = {
"request": request,
"background_tasks": background_tasks,
"response": response,
}
if isinstance(custom_context, dict):
return {
**default_context,
**custom_context,
}
elif custom_context is None:
return default_context
else:
raise InvalidCustomContext()
# replace the signature parameters of dependency...
# ...with the old parameters minus the first argument as it will be replaced...
# ...with the value obtained by injecting custom_getter context as a dependency.
sig = signature(dependency)
sig = sig.replace(
parameters=[
*list(sig.parameters.values())[1:],
sig.parameters["custom_context"].replace(
default=Depends(custom_getter)
),
],
)
# there is an ongoing issue with types and .__signature__ applied to Callables:
# https://github.com/python/mypy/issues/5958, as of 14/12/21
# as such, the below line has its typing ignored by MyPy
dependency.__signature__ = sig # type: ignore
return dependency
def __init__(
self,
schema: BaseSchema,
path: str = "",
graphiql: bool = True,
allow_queries_via_get: bool = True,
keep_alive: bool = False,
keep_alive_interval: float = 1,
debug: bool = False,
root_value_getter=None,
context_getter=None,
subscription_protocols=(GRAPHQL_TRANSPORT_WS_PROTOCOL, GRAPHQL_WS_PROTOCOL),
connection_init_wait_timeout: timedelta = timedelta(minutes=1),
default: Optional[ASGIApp] = None,
on_startup: Optional[Sequence[Callable[[], Any]]] = None,
on_shutdown: Optional[Sequence[Callable[[], Any]]] = None,
):
super().__init__(
default=default,
on_startup=on_startup,
on_shutdown=on_shutdown,
)
self.schema = schema
self.graphiql = graphiql
self.allow_queries_via_get = allow_queries_via_get
self.keep_alive = keep_alive
self.keep_alive_interval = keep_alive_interval
self.debug = debug
self.root_value_getter = root_value_getter or self.__get_root_value
self.context_getter = self.__get_context_getter(
context_getter or (lambda: None)
)
self.protocols = subscription_protocols
self.connection_init_wait_timeout = connection_init_wait_timeout
@self.get(
path,
responses={
200: {
"description": "The GraphiQL integrated development environment.",
},
404: {
"description": "Not found if GraphiQL is not enabled.",
},
},
)
async def handle_http_get(
request: Request,
response: Response,
context=Depends(self.context_getter),
root_value=Depends(self.root_value_getter),
) -> Response:
if request.query_params:
try:
query_data = parse_query_params(request.query_params._dict)
except json.JSONDecodeError:
return PlainTextResponse(
"Unable to parse request body as JSON",
status_code=status.HTTP_400_BAD_REQUEST,
)
return await self.execute_request(
request=request,
response=response,
data=query_data,
context=context,
root_value=root_value,
)
elif self.should_render_graphiql(request):
return self.get_graphiql_response()
return Response(status_code=status.HTTP_404_NOT_FOUND)
@self.post(path)
async def handle_http_post(
request: Request,
response: Response,
context=Depends(self.context_getter),
root_value=Depends(self.root_value_getter),
) -> Response:
actual_response: Response
content_type = request.headers.get("content-type", "")
if "application/json" in content_type:
try:
data = await request.json()
except json.JSONDecodeError:
actual_response = PlainTextResponse(
"Unable to parse request body as JSON",
status_code=status.HTTP_400_BAD_REQUEST,
)
return self._merge_responses(response, actual_response)
elif content_type.startswith("multipart/form-data"):
multipart_data = await request.form()
try:
operations_text = multipart_data.get("operations", "{}")
operations = json.loads(operations_text) # type: ignore
files_map = json.loads(multipart_data.get("map", "{}")) # type: ignore # noqa: E501
except json.JSONDecodeError:
actual_response = PlainTextResponse(
"Unable to parse request body as JSON",
status_code=status.HTTP_400_BAD_REQUEST,
)
return self._merge_responses(response, actual_response)
try:
data = replace_placeholders_with_files(
operations, files_map, multipart_data
)
except KeyError:
actual_response = PlainTextResponse(
"File(s) missing in form data",
status_code=status.HTTP_400_BAD_REQUEST,
)
return self._merge_responses(response, actual_response)
else:
actual_response = PlainTextResponse(
"Unsupported Media Type",
status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
)
return self._merge_responses(response, actual_response)
return await self.execute_request(
request=request,
response=response,
data=data,
context=context,
root_value=root_value,
)
@self.websocket(path)
async def websocket_endpoint(
websocket: WebSocket,
context=Depends(self.context_getter),
root_value=Depends(self.root_value_getter),
):
async def _get_context():
return context
async def _get_root_value():
return root_value
preferred_protocol = self.pick_preferred_protocol(websocket)
if preferred_protocol == GRAPHQL_TRANSPORT_WS_PROTOCOL:
await self.graphql_transport_ws_handler_class(
schema=self.schema,
debug=self.debug,
connection_init_wait_timeout=self.connection_init_wait_timeout,
get_context=_get_context,
get_root_value=_get_root_value,
ws=websocket,
).handle()
elif preferred_protocol == GRAPHQL_WS_PROTOCOL:
await self.graphql_ws_handler_class(
schema=self.schema,
debug=self.debug,
keep_alive=self.keep_alive,
keep_alive_interval=self.keep_alive_interval,
get_context=_get_context,
get_root_value=_get_root_value,
ws=websocket,
).handle()
else:
# Code 4406 is "Subprotocol not acceptable"
await websocket.close(code=4406)
def pick_preferred_protocol(self, ws: WebSocket) -> Optional[str]:
protocols = ws["subprotocols"]
intersection = set(protocols) & set(self.protocols)
return min(
intersection,
key=lambda i: protocols.index(i),
default=None,
)
def should_render_graphiql(self, request: Request) -> bool:
if not self.graphiql:
return False
return any(
supported_header in request.headers.get("accept", "")
for supported_header in ("text/html", "*/*")
)
def get_graphiql_response(self) -> HTMLResponse:
html = get_graphiql_html()
return HTMLResponse(html)
@staticmethod
def _merge_responses(response: Response, actual_response: Response) -> Response:
actual_response.headers.raw.extend(response.headers.raw)
if response.status_code:
actual_response.status_code = response.status_code
return actual_response
async def execute(
self,
query: Optional[str],
variables: Optional[Dict[str, Any]] = None,
context: Any = None,
operation_name: Optional[str] = None,
root_value: Any = None,
allowed_operation_types: Optional[Iterable[OperationType]] = None,
) -> ExecutionResult:
if self.debug and query:
pretty_print_graphql_operation(operation_name, query, variables)
return await self.schema.execute(
query,
root_value=root_value,
variable_values=variables,
operation_name=operation_name,
context_value=context,
allowed_operation_types=allowed_operation_types,
)
async def process_result(
self, request: Request, result: ExecutionResult
) -> GraphQLHTTPResponse:
return process_result(result)
async def execute_request(
self, request: Request, response: Response, data: dict, context, root_value
) -> Response:
request_data = parse_request_data(data)
method = request.method
allowed_operation_types = OperationType.from_http(method)
if not self.allow_queries_via_get and method == "GET":
allowed_operation_types = allowed_operation_types - {OperationType.QUERY}
try:
result = await self.execute(
request_data.query,
variables=request_data.variables,
context=context,
operation_name=request_data.operation_name,
root_value=root_value,
allowed_operation_types=allowed_operation_types,
)
except InvalidOperationTypeError as e:
return PlainTextResponse(
e.as_http_error_reason(method),
status_code=status.HTTP_400_BAD_REQUEST,
)
except MissingQueryError:
missing_query_response = PlainTextResponse(
"No GraphQL query found in the request",
status_code=status.HTTP_400_BAD_REQUEST,
)
return self._merge_responses(response, missing_query_response)
response_data = await self.process_result(request, result)
actual_response = Response(
self.encode_json(response_data),
media_type="application/json",
status_code=status.HTTP_200_OK,
)
return self._merge_responses(response, actual_response)
def encode_json(self, response_data: GraphQLHTTPResponse) -> str:
return json.dumps(response_data) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/fastapi/router.py | router.py |
from __future__ import annotations
import json
from dataclasses import dataclass
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Dict, Optional, Union, cast
from starlite import (
BackgroundTasks,
Controller,
HttpMethod,
MediaType,
Provide,
Request,
Response,
WebSocket,
get,
post,
websocket,
)
from starlite.exceptions import (
NotFoundException,
SerializationException,
ValidationException,
)
from starlite.status_codes import (
HTTP_200_OK,
HTTP_400_BAD_REQUEST,
HTTP_415_UNSUPPORTED_MEDIA_TYPE,
)
from strawberry.exceptions import InvalidCustomContext, MissingQueryError
from strawberry.file_uploads.utils import replace_placeholders_with_files
from strawberry.http import (
GraphQLHTTPResponse,
parse_query_params,
parse_request_data,
process_result,
)
from strawberry.schema.exceptions import InvalidOperationTypeError
from strawberry.subscriptions import GRAPHQL_TRANSPORT_WS_PROTOCOL, GRAPHQL_WS_PROTOCOL
from strawberry.subscriptions.protocols.graphql_transport_ws import (
WS_4406_PROTOCOL_NOT_ACCEPTABLE,
)
from strawberry.types.graphql import OperationType
from strawberry.utils.debug import pretty_print_graphql_operation
from strawberry.utils.graphiql import get_graphiql_html
from .handlers.graphql_transport_ws_handler import (
GraphQLTransportWSHandler as BaseGraphQLTransportWSHandler,
)
from .handlers.graphql_ws_handler import GraphQLWSHandler as BaseGraphQLWSHandler
if TYPE_CHECKING:
from typing import FrozenSet, Iterable, List, Set, Tuple, Type
from starlite.types import AnyCallable, Dependencies
from strawberry.schema import BaseSchema
from strawberry.types import ExecutionResult
MergedContext = Union[
"BaseContext",
Union[
Dict[str, Any],
Dict[str, BackgroundTasks],
Dict[str, Request],
Dict[str, Response],
Dict[str, websocket],
],
]
CustomContext = Union["BaseContext", Dict[str, Any]]
async def _context_getter(
custom_context: Optional[CustomContext],
request: Request,
) -> MergedContext:
if isinstance(custom_context, BaseContext):
custom_context.request = request
return custom_context
default_context = {
"request": request,
}
if isinstance(custom_context, dict):
return {
**default_context,
**custom_context,
}
if custom_context is None:
return default_context
raise InvalidCustomContext()
@dataclass
class GraphQLResource:
data: Optional[Dict[str, object]]
errors: Optional[List[object]]
extensions: Optional[Dict[str, object]]
@dataclass
class EmptyResponseModel:
pass
class GraphQLWSHandler(BaseGraphQLWSHandler):
async def get_context(self) -> Any:
return await self._get_context()
async def get_root_value(self) -> Any:
return await self._get_root_value()
class GraphQLTransportWSHandler(BaseGraphQLTransportWSHandler):
async def get_context(self) -> Any:
return await self._get_context()
async def get_root_value(self) -> Any:
return await self._get_root_value()
class BaseContext:
def __init__(self):
self.request: Optional[Union[Request, WebSocket]] = None
self.response: Optional[Response] = None
def make_graphql_controller(
schema: BaseSchema,
path: str = "",
graphiql: bool = True,
allow_queries_via_get: bool = True,
keep_alive: bool = False,
keep_alive_interval: float = 1,
debug: bool = False,
# TODO: root typevar
root_value_getter: Optional[AnyCallable] = None,
# TODO: context typevar
context_getter: Optional[AnyCallable] = None,
subscription_protocols: Tuple[str, ...] = (
GRAPHQL_TRANSPORT_WS_PROTOCOL,
GRAPHQL_WS_PROTOCOL,
),
connection_init_wait_timeout: timedelta = timedelta(minutes=1),
) -> Type[Controller]:
routes_path = path
if context_getter is None:
def custom_context_getter_():
return None
else:
custom_context_getter_ = context_getter
if root_value_getter is None:
def root_value_getter_():
return None
else:
root_value_getter_ = root_value_getter
class GraphQLController(Controller):
path: str = routes_path
dependencies: Optional[Dependencies] = {
"custom_context": Provide(custom_context_getter_),
"context": Provide(_context_getter),
"root_value": Provide(root_value_getter_),
}
graphql_ws_handler_class: Type[GraphQLWSHandler] = GraphQLWSHandler
graphql_transport_ws_handler_class: Type[
GraphQLTransportWSHandler
] = GraphQLTransportWSHandler
_schema: BaseSchema = schema
_graphiql: bool = graphiql
_allow_queries_via_get: bool = allow_queries_via_get
_keep_alive: bool = keep_alive
_keep_alive_interval: float = keep_alive_interval
_debug: bool = debug
_protocols: Tuple[str, ...] = subscription_protocols
_connection_init_wait_timeout: timedelta = connection_init_wait_timeout
_graphiql_allowed_accept: FrozenSet[str] = frozenset({"text/html", "*/*"})
async def execute(
self,
query: Optional[str],
variables: Optional[Dict[str, Any]] = None,
context: Optional[CustomContext] = None,
operation_name: Optional[str] = None,
root_value: Optional[Any] = None,
allowed_operation_types: Optional[Iterable[OperationType]] = None,
):
if self._debug:
pretty_print_graphql_operation(operation_name, query or "", variables)
return await self._schema.execute(
query,
root_value=root_value,
variable_values=variables,
operation_name=operation_name,
context_value=context,
allowed_operation_types=allowed_operation_types,
)
async def process_result(self, result: ExecutionResult) -> GraphQLHTTPResponse:
return process_result(result)
async def execute_request(
self,
request: Request,
data: dict,
context: CustomContext,
root_value: Any,
) -> Response[Union[GraphQLResource, str]]:
request_data = parse_request_data(data or {})
allowed_operation_types = OperationType.from_http(request.method)
if not self._allow_queries_via_get and request.method == HttpMethod.GET:
allowed_operation_types = allowed_operation_types - {
OperationType.QUERY
}
response: Union[Response[dict], Response[BaseContext]] = Response(
{}, background=BackgroundTasks([])
)
if isinstance(context, BaseContext):
context.response = response
elif isinstance(context, dict):
context["response"] = response
try:
result = await self.execute(
request_data.query,
variables=request_data.variables,
context=context,
operation_name=request_data.operation_name,
root_value=root_value,
allowed_operation_types=allowed_operation_types,
)
except InvalidOperationTypeError as e:
return Response(
e.as_http_error_reason(request.method),
status_code=HTTP_400_BAD_REQUEST,
media_type=MediaType.TEXT,
)
except MissingQueryError:
return Response(
"No GraphQL query found in the request",
status_code=HTTP_400_BAD_REQUEST,
media_type=MediaType.TEXT,
)
response_data = await self.process_result(result)
actual_response: Response[GraphQLHTTPResponse] = Response(
response_data, status_code=HTTP_200_OK, media_type=MediaType.JSON
)
return self._merge_responses(response, actual_response)
def should_render_graphiql(self, request: Request) -> bool:
if not self._graphiql:
return False
accept: Set[str] = set()
for value in request.headers.getall("accept", ""):
accept.symmetric_difference_update(set(value.split(",")))
return bool(self._graphiql_allowed_accept & accept)
def get_graphiql_response(self) -> Response[str]:
html = get_graphiql_html()
return Response(html, media_type=MediaType.HTML)
@staticmethod
def _merge_responses(
response: Response, actual_response: Response
) -> Response[Union[GraphQLResource, str]]:
actual_response.headers.update(response.headers)
actual_response.cookies.extend(response.cookies)
actual_response.background = response.background
if response.status_code:
actual_response.status_code = response.status_code
return actual_response
@get(raises=[ValidationException, NotFoundException])
async def handle_http_get(
self,
request: Request,
context: CustomContext,
root_value: Any,
) -> Response[Union[GraphQLResource, str]]:
if request.query_params:
try:
query_data = parse_query_params(
cast("Dict[str, Any]", request.query_params)
)
except json.JSONDecodeError as error:
raise ValidationException(
detail="Unable to parse request body as JSON"
) from error
return await self.execute_request(
request=request,
data=query_data,
context=context,
root_value=root_value,
)
if self.should_render_graphiql(request):
return cast(
"Response[Union[GraphQLResource, str]]",
self.get_graphiql_response(),
)
raise NotFoundException()
@post(status_code=HTTP_200_OK)
async def handle_http_post(
self,
request: Request,
context: CustomContext,
root_value: Any,
) -> Response[Union[GraphQLResource, str]]:
actual_response: Response[Union[GraphQLResource, str]]
content_type, _ = request.content_type
if "application/json" in content_type:
try:
data = await request.json()
except SerializationException:
actual_response = Response(
"Unable to parse request body as JSON",
status_code=HTTP_400_BAD_REQUEST,
media_type=MediaType.TEXT,
)
return actual_response
elif content_type.startswith("multipart/form-data"):
multipart_data = await request.form()
operations: Dict[str, Any] = multipart_data.get("operations", "{}")
files_map: Dict[str, List[str]] = multipart_data.get("map", "{}")
try:
data = replace_placeholders_with_files(
operations, files_map, multipart_data
)
except KeyError:
return Response(
"File(s) missing in form data",
status_code=HTTP_400_BAD_REQUEST,
media_type=MediaType.TEXT,
)
except (TypeError, AttributeError):
return Response(
"Unable to parse the multipart body",
status_code=HTTP_400_BAD_REQUEST,
media_type=MediaType.TEXT,
)
else:
return Response(
"Unsupported Media Type",
status_code=HTTP_415_UNSUPPORTED_MEDIA_TYPE,
media_type=MediaType.TEXT,
)
return await self.execute_request(
request=request,
data=data,
context=context,
root_value=root_value,
)
@websocket()
async def websocket_endpoint(
self,
socket: WebSocket,
context: CustomContext,
root_value: Any,
) -> None:
async def _get_context():
return context
async def _get_root_value():
return root_value
preferred_protocol = self.pick_preferred_protocol(socket)
if preferred_protocol == GRAPHQL_TRANSPORT_WS_PROTOCOL:
await self.graphql_transport_ws_handler_class(
schema=self._schema,
debug=self._debug,
connection_init_wait_timeout=self._connection_init_wait_timeout,
get_context=_get_context,
get_root_value=_get_root_value,
ws=socket,
).handle()
elif preferred_protocol == GRAPHQL_WS_PROTOCOL:
await self.graphql_ws_handler_class(
schema=self._schema,
debug=self._debug,
keep_alive=self._keep_alive,
keep_alive_interval=self._keep_alive_interval,
get_context=_get_context,
get_root_value=_get_root_value,
ws=socket,
).handle()
else:
await socket.close(code=WS_4406_PROTOCOL_NOT_ACCEPTABLE)
def pick_preferred_protocol(self, socket: WebSocket) -> Optional[str]:
protocols: List[str] = socket.scope["subprotocols"]
intersection = set(protocols) & set(self._protocols)
return (
min(
intersection,
key=lambda i: protocols.index(i) if i else "",
default=None,
)
or None
)
return GraphQLController | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/starlite/controller.py | controller.py |
from contextlib import suppress
from typing import Any, Optional
from starlite import WebSocket
from starlite.exceptions import SerializationException, WebSocketDisconnect
from strawberry.schema import BaseSchema
from strawberry.subscriptions import GRAPHQL_WS_PROTOCOL
from strawberry.subscriptions.protocols.graphql_ws.handlers import BaseGraphQLWSHandler
from strawberry.subscriptions.protocols.graphql_ws.types import OperationMessage
class GraphQLWSHandler(BaseGraphQLWSHandler):
def __init__(
self,
schema: BaseSchema,
debug: bool,
keep_alive: bool,
keep_alive_interval: float,
get_context,
get_root_value,
ws: WebSocket,
):
super().__init__(schema, debug, keep_alive, keep_alive_interval)
self._get_context = get_context
self._get_root_value = get_root_value
self._ws = ws
async def get_context(self) -> Any:
return await self._get_context()
async def get_root_value(self) -> Any:
return await self._get_root_value()
async def send_json(self, data: OperationMessage) -> None:
await self._ws.send_json(data)
async def close(self, code: int = 1000, reason: Optional[str] = None) -> None:
# Close messages are not part of the ASGI ref yet
await self._ws.close(code=code)
async def handle_request(self) -> Any:
await self._ws.accept(subprotocols=GRAPHQL_WS_PROTOCOL)
try:
while self._ws.connection_state != "disconnect":
try:
message = await self._ws.receive_json()
except (SerializationException, ValueError):
# Ignore non-text messages
continue
else:
await self.handle_message(message)
except WebSocketDisconnect: # pragma: no cover
pass
finally:
if self.keep_alive_task:
self.keep_alive_task.cancel()
with suppress(BaseException):
await self.keep_alive_task
for operation_id in list(self.subscriptions.keys()):
await self.cleanup_operation(operation_id) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/starlite/handlers/graphql_ws_handler.py | graphql_ws_handler.py |
from datetime import timedelta
from typing import Any
from starlite import WebSocket
from starlite.exceptions import SerializationException, WebSocketDisconnect
from strawberry.schema import BaseSchema
from strawberry.subscriptions import GRAPHQL_TRANSPORT_WS_PROTOCOL
from strawberry.subscriptions.protocols.graphql_transport_ws.handlers import (
BaseGraphQLTransportWSHandler,
)
class GraphQLTransportWSHandler(BaseGraphQLTransportWSHandler):
def __init__(
self,
schema: BaseSchema,
debug: bool,
connection_init_wait_timeout: timedelta,
get_context,
get_root_value,
ws: WebSocket,
):
super().__init__(schema, debug, connection_init_wait_timeout)
self._get_context = get_context
self._get_root_value = get_root_value
self._ws = ws
async def get_context(self) -> Any:
return await self._get_context()
async def get_root_value(self) -> Any:
return await self._get_root_value()
async def send_json(self, data: dict) -> None:
await self._ws.send_json(data)
async def close(self, code: int, reason: str) -> None:
# Close messages are not part of the ASGI ref yet
await self._ws.close(code=code)
async def handle_request(self) -> None:
await self._ws.accept(subprotocols=GRAPHQL_TRANSPORT_WS_PROTOCOL)
try:
while self._ws.connection_state != "disconnect":
try:
message = await self._ws.receive_json()
except (SerializationException, ValueError):
error_message = "WebSocket message type must be text"
await self.handle_invalid_message(error_message)
else:
await self.handle_message(message)
except WebSocketDisconnect: # pragma: no cover
pass
finally:
for operation_id in list(self.subscriptions.keys()):
await self.cleanup_operation(operation_id)
await self.reap_completed_tasks() | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/starlite/handlers/graphql_transport_ws_handler.py | graphql_transport_ws_handler.py |
import os
import sys
import threading
from types import TracebackType
from typing import Any, Callable, Optional, Tuple, Type, cast
from .exception import StrawberryException, UnableToFindExceptionSource
if sys.version_info >= (3, 8):
original_threading_exception_hook = threading.excepthook
else:
original_threading_exception_hook = None
ExceptionHandler = Callable[
[Type[BaseException], BaseException, Optional[TracebackType]], None
]
def should_use_rich_exceptions() -> bool:
errors_disabled = os.environ.get("STRAWBERRY_DISABLE_RICH_ERRORS", "")
return errors_disabled.lower() not in ["true", "1", "yes"]
def _get_handler(exception_type: Type[BaseException]) -> ExceptionHandler:
if issubclass(exception_type, StrawberryException):
try:
import rich
except ImportError:
pass
else:
def _handler(
exception_type: Type[BaseException],
exception: BaseException,
traceback: Optional[TracebackType],
):
try:
rich.print(exception)
# we check if weren't able to find the exception source
# in that case we fallback to the original exception handler
except UnableToFindExceptionSource:
sys.__excepthook__(exception_type, exception, traceback)
return _handler
return sys.__excepthook__
def strawberry_exception_handler(
exception_type: Type[BaseException],
exception: BaseException,
traceback: Optional[TracebackType],
) -> None:
_get_handler(exception_type)(exception_type, exception, traceback)
def strawberry_threading_exception_handler(
args: Tuple[
Type[BaseException],
Optional[BaseException],
Optional[TracebackType],
Optional[threading.Thread],
]
) -> None:
(exception_type, exception, traceback, _) = args
if exception is None:
if sys.version_info >= (3, 8):
# this cast is only here because some weird issue with mypy
# and the inability to disable this error based on the python version
# (we'd need to do type ignore for python 3.8 and above, but mypy
# doesn't seem to be able to handle that and will complain in python 3.7)
cast(Any, original_threading_exception_hook)(args)
return
_get_handler(exception_type)(exception_type, exception, traceback)
def reset_exception_handler() -> None:
sys.excepthook = sys.__excepthook__
if sys.version_info >= (3, 8):
threading.excepthook = original_threading_exception_hook
def setup_exception_handler() -> None:
if should_use_rich_exceptions():
sys.excepthook = strawberry_exception_handler
if sys.version_info >= (3, 8):
threading.excepthook = strawberry_threading_exception_handler | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/exceptions/handler.py | handler.py |
from __future__ import annotations
from inspect import getframeinfo, stack
from pathlib import Path
from typing import TYPE_CHECKING, Optional, Type
from strawberry.exceptions.utils.source_finder import SourceFinder
from strawberry.utils.cached_property import cached_property
from .exception import StrawberryException
if TYPE_CHECKING:
from strawberry.union import StrawberryUnion
from .exception_source import ExceptionSource
class InvalidUnionTypeError(StrawberryException):
"""The union is constructed with an invalid type"""
invalid_type: object
def __init__(self, union_name: str, invalid_type: object) -> None:
from strawberry.custom_scalar import ScalarWrapper
self.union_name = union_name
self.invalid_type = invalid_type
# assuming that the exception happens two stack frames above the current one.
# one is our code checking for invalid types, the other is the caller
self.frame = getframeinfo(stack()[2][0])
if isinstance(invalid_type, ScalarWrapper):
type_name = invalid_type.wrap.__name__
else:
try:
type_name = invalid_type.__name__ # type: ignore
except AttributeError:
# might be StrawberryList instance
type_name = invalid_type.__class__.__name__
self.message = f"Type `{type_name}` cannot be used in a GraphQL Union"
self.rich_message = (
f"Type `[underline]{type_name}[/]` cannot be used in a GraphQL Union"
)
self.suggestion = (
"To fix this error you should replace the type a strawberry.type"
)
self.annotation_message = "invalid type here"
@cached_property
def exception_source(self) -> Optional[ExceptionSource]:
path = Path(self.frame.filename)
source_finder = SourceFinder()
return source_finder.find_union_call(path, self.union_name, self.invalid_type)
class InvalidTypeForUnionMergeError(StrawberryException):
"""A specialized version of InvalidUnionTypeError for when trying
to merge unions using the pipe operator."""
invalid_type: Type
def __init__(self, union: StrawberryUnion, other: object) -> None:
self.union = union
self.other = other
# assuming that the exception happens two stack frames above the current one.
# one is our code checking for invalid types, the other is the caller
self.frame = getframeinfo(stack()[2][0])
other_name = getattr(other, "__name__", str(other))
self.message = f"`{other_name}` cannot be used when merging GraphQL Unions"
self.rich_message = (
f"`[underline]{other_name}[/]` cannot be used when merging GraphQL Unions"
)
self.suggestion = ""
self.annotation_message = "invalid type here"
@cached_property
def exception_source(self) -> Optional[ExceptionSource]:
source_finder = SourceFinder()
return source_finder.find_union_merge(self.union, self.other, frame=self.frame) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/exceptions/invalid_union_type.py | invalid_union_type.py |
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from strawberry.utils.cached_property import cached_property
from .exception import StrawberryException
from .utils.source_finder import SourceFinder
if TYPE_CHECKING:
from strawberry.arguments import StrawberryArgument
from strawberry.types.fields.resolver import StrawberryResolver
from strawberry.types.types import TypeDefinition
from .exception_source import ExceptionSource
class InvalidArgumentTypeError(StrawberryException):
def __init__(
self,
resolver: StrawberryResolver,
argument: StrawberryArgument,
):
from strawberry.union import StrawberryUnion
self.function = resolver.wrapped_func
self.argument_name = argument.python_name
# argument_type: Literal["union", "interface"],
argument_type = "unknown"
if isinstance(argument.type, StrawberryUnion):
argument_type = "union"
else:
type_definition: Optional[TypeDefinition] = getattr(
argument.type, "_type_definition", None
)
if type_definition and type_definition.is_interface:
argument_type = "interface"
self.message = (
f'Argument "{self.argument_name}" on field '
f'"{resolver.name}" cannot be of type '
f'"{argument_type}"'
)
self.rich_message = self.message
if argument_type == "union":
self.suggestion = "Unions are not supported as arguments in GraphQL."
elif argument_type == "interface":
self.suggestion = "Interfaces are not supported as arguments in GraphQL."
else:
self.suggestion = f"{self.argument_name} is not supported as an argument."
self.annotation_message = (
f'Argument "{self.argument_name}" cannot be of type "{argument_type}"'
)
@cached_property
def exception_source(self) -> Optional[ExceptionSource]:
if self.function is None:
return None # pragma: no cover
source_finder = SourceFinder()
return source_finder.find_argument_from_object(
self.function, self.argument_name # type: ignore
) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/exceptions/invalid_argument_type.py | invalid_argument_type.py |
from __future__ import annotations
from enum import Enum
from typing import TYPE_CHECKING, Optional
from strawberry.utils.cached_property import cached_property
from .exception import StrawberryException
from .utils.source_finder import SourceFinder
if TYPE_CHECKING:
from .exception_source import ExceptionSource
class ObjectIsNotClassError(StrawberryException):
class MethodType(Enum):
INPUT = "input"
INTERFACE = "interface"
TYPE = "type"
def __init__(self, obj: object, method_type: MethodType):
self.obj = obj
self.function = obj
# TODO: assert obj is a function for now and skip the error if it is
# something else
obj_name = obj.__name__ # type: ignore
self.message = (
f"strawberry.{method_type.value} can only be used with class types. "
f"Provided object {obj_name} is not a type."
)
self.rich_message = (
f"strawberry.{method_type.value} can only be used with class types. "
f"Provided object `[underline]{obj_name}[/]` is not a type."
)
self.annotation_message = "function defined here"
self.suggestion = (
"To fix this error, make sure your use "
f"strawberry.{method_type.value} on a class."
)
super().__init__(self.message)
@classmethod
def input(cls, obj: object) -> ObjectIsNotClassError:
return cls(obj, cls.MethodType.INPUT)
@classmethod
def interface(cls, obj: object) -> ObjectIsNotClassError:
return cls(obj, cls.MethodType.INTERFACE)
@classmethod
def type(cls, obj: object) -> ObjectIsNotClassError:
return cls(obj, cls.MethodType.TYPE)
@cached_property
def exception_source(self) -> Optional[ExceptionSource]:
if self.function is None:
return None # pragma: no cover
source_finder = SourceFinder()
return source_finder.find_function_from_object(self.function) # type: ignore | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/exceptions/object_is_not_a_class.py | object_is_not_a_class.py |
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Optional
from strawberry.utils.cached_property import cached_property
from strawberry.utils.str_converters import to_kebab_case
if TYPE_CHECKING:
from rich.console import RenderableType
from .exception_source import ExceptionSource
class UnableToFindExceptionSource(Exception):
"""Internal exception raised when we can't find the exception source."""
class StrawberryException(Exception, ABC):
message: str
rich_message: str
suggestion: str
annotation_message: str
def __init__(self, message: str) -> None:
self.message = message
def __str__(self) -> str:
return self.message
@property
def documentation_path(self) -> str:
return to_kebab_case(self.__class__.__name__.replace("Error", ""))
@property
def documentation_url(self) -> str:
prefix = "https://errors.strawberry.rocks/"
return prefix + self.documentation_path
@cached_property
@abstractmethod
def exception_source(self) -> Optional[ExceptionSource]:
return None
@property
def __rich_header__(self) -> RenderableType:
return f"[bold red]error: {self.rich_message}"
@property
def __rich_body__(self) -> RenderableType:
assert self.exception_source
return self._get_error_inline(self.exception_source, self.annotation_message)
@property
def __rich_footer__(self) -> RenderableType:
return (
f"{self.suggestion}\n\n"
"Read more about this error on [bold underline]"
f"[link={self.documentation_url}]{self.documentation_url}"
).strip()
def __rich__(self) -> Optional[RenderableType]:
from rich.box import SIMPLE
from rich.console import Group
from rich.panel import Panel
if self.exception_source is None:
raise UnableToFindExceptionSource() from self
content = (
self.__rich_header__,
"",
self.__rich_body__,
"",
"",
self.__rich_footer__,
)
return Panel.fit(
Group(*content),
box=SIMPLE,
)
def _get_error_inline(
self, exception_source: ExceptionSource, message: str
) -> RenderableType:
source_file = exception_source.path
relative_path = exception_source.path_relative_to_cwd
error_line = exception_source.error_line
from rich.console import Group
from .syntax import Syntax
path = f"[white] @ [link=file://{source_file}]{relative_path}:{error_line}"
prefix = " " * exception_source.error_column
caret = "^" * (
exception_source.error_column_end - exception_source.error_column
)
message = f"{prefix}[bold]{caret}[/] {message}"
error_line = exception_source.error_line
line_annotations = {error_line: message}
return Group(
path,
"",
Syntax(
code=exception_source.code,
highlight_lines={error_line},
line_offset=exception_source.start_line - 1,
line_annotations=line_annotations,
line_range=(
exception_source.start_line - 1,
exception_source.end_line,
),
),
) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/exceptions/exception.py | exception.py |
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, Type
from strawberry.utils.cached_property import cached_property
from .exception import StrawberryException
from .utils.source_finder import SourceFinder
if TYPE_CHECKING:
from rich.console import RenderableType
from .exception_source import ExceptionSource
class DuplicatedTypeName(StrawberryException):
"""Raised when the same type with different definition is reused inside a schema"""
def __init__(
self,
first_cls: Optional[Type],
second_cls: Optional[Type],
duplicated_type_name: str,
):
self.first_cls = first_cls
self.second_cls = second_cls
self.message = (
f"Type {duplicated_type_name} is defined multiple times in the schema"
)
self.rich_message = (
f"Type `[underline]{duplicated_type_name}[/]` "
"is defined multiple times in the schema"
)
self.suggestion = (
"To fix this error you should either rename the type or "
"remove the duplicated definition."
)
super().__init__(self.message)
@property
def __rich_body__(self) -> RenderableType:
if self.first_cls is None or self.second_cls is None:
return ""
from rich.console import Group
source_finder = SourceFinder()
first_class_source = self.exception_source
assert first_class_source
second_class_source = source_finder.find_class_from_object(self.second_cls)
if second_class_source is None:
return self._get_error_inline(
first_class_source, "first class defined here"
)
return Group(
self._get_error_inline(first_class_source, "first class defined here"),
"",
self._get_error_inline(second_class_source, "second class defined here"),
)
@cached_property
def exception_source(self) -> Optional[ExceptionSource]:
if self.first_cls is None:
return None # pragma: no cover
source_finder = SourceFinder()
return source_finder.find_class_from_object(self.first_cls) | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/exceptions/duplicated_type_name.py | duplicated_type_name.py |
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, Set, Union
from graphql import GraphQLError
from strawberry.utils.cached_property import cached_property
from .duplicated_type_name import DuplicatedTypeName
from .exception import StrawberryException, UnableToFindExceptionSource
from .handler import setup_exception_handler
from .invalid_argument_type import InvalidArgumentTypeError
from .invalid_union_type import InvalidTypeForUnionMergeError, InvalidUnionTypeError
from .missing_arguments_annotations import MissingArgumentsAnnotationsError
from .missing_field_annotation import MissingFieldAnnotationError
from .missing_return_annotation import MissingReturnAnnotationError
from .not_a_strawberry_enum import NotAStrawberryEnumError
from .object_is_not_a_class import ObjectIsNotClassError
from .object_is_not_an_enum import ObjectIsNotAnEnumError
from .private_strawberry_field import PrivateStrawberryFieldError
from .scalar_already_registered import ScalarAlreadyRegisteredError
from .unresolved_field_type import UnresolvedFieldTypeError
if TYPE_CHECKING:
from graphql import GraphQLInputObjectType, GraphQLObjectType
from strawberry.type import StrawberryType
from .exception_source import ExceptionSource
setup_exception_handler()
# TODO: this doesn't seem to be tested
class WrongReturnTypeForUnion(Exception):
"""The Union type cannot be resolved because it's not a field"""
def __init__(self, field_name: str, result_type: str):
message = (
f'The type "{result_type}" cannot be resolved for the field "{field_name}" '
", are you using a strawberry.field?"
)
super().__init__(message)
# TODO: this doesn't seem to be tested
class UnallowedReturnTypeForUnion(Exception):
"""The return type is not in the list of Union types"""
def __init__(
self, field_name: str, result_type: str, allowed_types: Set[GraphQLObjectType]
):
formatted_allowed_types = list(sorted(type_.name for type_ in allowed_types))
message = (
f'The type "{result_type}" of the field "{field_name}" '
f'is not in the list of the types of the union: "{formatted_allowed_types}"'
)
super().__init__(message)
# TODO: this doesn't seem to be tested
class InvalidTypeInputForUnion(Exception):
def __init__(self, annotation: GraphQLInputObjectType):
message = f"Union for {annotation} is not supported because it is an Input type"
super().__init__(message)
# TODO: this doesn't seem to be tested
class MissingTypesForGenericError(Exception):
"""Raised when a generic types was used without passing any type."""
def __init__(self, annotation: Union[StrawberryType, type]):
message = (
f'The type "{repr(annotation)}" is generic, but no type has been passed'
)
super().__init__(message)
class UnsupportedTypeError(StrawberryException):
def __init__(self, annotation: Union[StrawberryType, type]):
message = f"{annotation} conversion is not supported"
super().__init__(message)
@cached_property
def exception_source(self) -> Optional[ExceptionSource]:
return None
class MultipleStrawberryArgumentsError(Exception):
def __init__(self, argument_name: str):
message = (
f"Annotation for argument `{argument_name}` cannot have multiple "
f"`strawberry.argument`s"
)
super().__init__(message)
class WrongNumberOfResultsReturned(Exception):
def __init__(self, expected: int, received: int):
message = (
"Received wrong number of results in dataloader, "
f"expected: {expected}, received: {received}"
)
super().__init__(message)
class FieldWithResolverAndDefaultValueError(Exception):
def __init__(self, field_name: str, type_name: str):
message = (
f'Field "{field_name}" on type "{type_name}" cannot define a default '
"value and a resolver."
)
super().__init__(message)
class FieldWithResolverAndDefaultFactoryError(Exception):
def __init__(self, field_name: str, type_name: str):
message = (
f'Field "{field_name}" on type "{type_name}" cannot define a '
"default_factory and a resolver."
)
super().__init__(message)
class MissingQueryError(Exception):
def __init__(self):
message = 'Request data is missing a "query" value'
super().__init__(message)
class InvalidDefaultFactoryError(Exception):
def __init__(self):
message = "`default_factory` must be a callable that requires no arguments"
super().__init__(message)
class InvalidCustomContext(Exception):
"""Raised when a custom context object is of the wrong python type"""
def __init__(self):
message = (
"The custom context must be either a class "
"that inherits from BaseContext or a dictionary"
)
super().__init__(message)
class StrawberryGraphQLError(GraphQLError):
"""Use it when you want to override the graphql.GraphQLError in custom extensions"""
__all__ = [
"StrawberryException",
"UnableToFindExceptionSource",
"MissingArgumentsAnnotationsError",
"MissingReturnAnnotationError",
"WrongReturnTypeForUnion",
"UnallowedReturnTypeForUnion",
"ObjectIsNotAnEnumError",
"ObjectIsNotClassError",
"InvalidUnionTypeError",
"InvalidTypeForUnionMergeError",
"MissingTypesForGenericError",
"UnsupportedTypeError",
"UnresolvedFieldTypeError",
"PrivateStrawberryFieldError",
"MultipleStrawberryArgumentsError",
"NotAStrawberryEnumError",
"ScalarAlreadyRegisteredError",
"WrongNumberOfResultsReturned",
"FieldWithResolverAndDefaultValueError",
"FieldWithResolverAndDefaultFactoryError",
"MissingQueryError",
"InvalidArgumentTypeError",
"InvalidDefaultFactoryError",
"InvalidCustomContext",
"MissingFieldAnnotationError",
"DuplicatedTypeName",
"StrawberryGraphQLError",
] | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/exceptions/__init__.py | __init__.py |
from __future__ import annotations
import importlib
import importlib.util
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Optional, Sequence, Type, cast
from strawberry.utils.cached_property import cached_property
from ..exception_source import ExceptionSource
if TYPE_CHECKING:
from inspect import Traceback
from libcst import BinaryOperation, Call, CSTNode, FunctionDef
from strawberry.custom_scalar import ScalarDefinition
from strawberry.union import StrawberryUnion
@dataclass
class SourcePath:
path: Path
code: str
class LibCSTSourceFinder:
def __init__(self) -> None:
self.cst = importlib.import_module("libcst")
def find_source(self, module: str) -> Optional[SourcePath]:
# todo: support for pyodide
source_module = sys.modules.get(module)
path = None
if source_module is None:
spec = importlib.util.find_spec(module)
if spec is not None and spec.origin is not None:
path = Path(spec.origin)
elif source_module.__file__ is not None:
path = Path(source_module.__file__)
if path is None:
return None
if not path.exists() or path.suffix != ".py":
return None # pragma: no cover
source = path.read_text()
return SourcePath(path=path, code=source)
def _find(self, source: str, matcher: Any) -> Sequence[CSTNode]:
from libcst.metadata import (
MetadataWrapper,
ParentNodeProvider,
PositionProvider,
)
module = self.cst.parse_module(source)
self._metadata_wrapper = MetadataWrapper(module)
self._position_metadata = self._metadata_wrapper.resolve(PositionProvider)
self._parent_metadata = self._metadata_wrapper.resolve(ParentNodeProvider)
import libcst.matchers as m
return m.findall(self._metadata_wrapper, matcher)
def _find_definition_by_qualname(
self, qualname: str, nodes: Sequence[CSTNode]
) -> Optional[CSTNode]:
from libcst import ClassDef, FunctionDef
for definition in nodes:
parent: Optional[CSTNode] = definition
stack = []
while parent:
if isinstance(parent, ClassDef):
stack.append(parent.name.value)
if isinstance(parent, FunctionDef):
stack.extend(("<locals>", parent.name.value))
parent = self._parent_metadata.get(parent)
if stack[0] == "<locals>":
stack.pop(0)
found_class_name = ".".join(reversed(stack))
if found_class_name == qualname:
return definition
return None
def _find_function_definition(
self, source: SourcePath, function: Callable
) -> Optional[FunctionDef]:
import libcst.matchers as m
matcher = m.FunctionDef(name=m.Name(value=function.__name__))
function_defs = self._find(source.code, matcher)
return cast(
"FunctionDef",
self._find_definition_by_qualname(function.__qualname__, function_defs),
)
def _find_class_definition(
self, source: SourcePath, cls: Type
) -> Optional[CSTNode]:
import libcst.matchers as m
matcher = m.ClassDef(name=m.Name(value=cls.__name__))
class_defs = self._find(source.code, matcher)
return self._find_definition_by_qualname(cls.__qualname__, class_defs)
def find_class(self, cls: Type) -> Optional[ExceptionSource]:
source = self.find_source(cls.__module__)
if source is None:
return None # pragma: no cover
class_def = self._find_class_definition(source, cls)
if class_def is None:
return None # pragma: no cover
position = self._position_metadata[class_def]
column_start = position.start.column + len("class ")
return ExceptionSource(
path=source.path,
code=source.code,
start_line=position.start.line,
error_line=position.start.line,
end_line=position.end.line,
error_column=column_start,
error_column_end=column_start + len(cls.__name__),
)
def find_class_attribute(
self, cls: Type, attribute_name: str
) -> Optional[ExceptionSource]:
source = self.find_source(cls.__module__)
if source is None:
return None # pragma: no cover
class_def = self._find_class_definition(source, cls)
if class_def is None:
return None # pragma: no cover
import libcst.matchers as m
from libcst import AnnAssign
attribute_definitions = m.findall(
class_def,
m.AssignTarget(target=m.Name(value=attribute_name))
| m.AnnAssign(target=m.Name(value=attribute_name)),
)
if not attribute_definitions:
return None
attribute_definition = attribute_definitions[0]
if isinstance(attribute_definition, AnnAssign):
attribute_definition = attribute_definition.target
class_position = self._position_metadata[class_def]
attribute_position = self._position_metadata[attribute_definition]
return ExceptionSource(
path=source.path,
code=source.code,
start_line=class_position.start.line,
error_line=attribute_position.start.line,
end_line=class_position.end.line,
error_column=attribute_position.start.column,
error_column_end=attribute_position.end.column,
)
def find_function(self, function: Callable) -> Optional[ExceptionSource]:
source = self.find_source(function.__module__)
if source is None:
return None # pragma: no cover
function_def = self._find_function_definition(source, function)
if function_def is None:
return None # pragma: no cover
position = self._position_metadata[function_def]
prefix = f"def{function_def.whitespace_after_def.value}"
if function_def.asynchronous:
prefix = f"async{function_def.asynchronous.whitespace_after.value}{prefix}"
function_prefix = len(prefix)
error_column = position.start.column + function_prefix
error_column_end = error_column + len(function.__name__)
return ExceptionSource(
path=source.path,
code=source.code,
start_line=position.start.line,
error_line=position.start.line,
end_line=position.end.line,
error_column=error_column,
error_column_end=error_column_end,
)
def find_argument(
self, function: Callable, argument_name: str
) -> Optional[ExceptionSource]:
source = self.find_source(function.__module__)
if source is None:
return None # pragma: no cover
function_def = self._find_function_definition(source, function)
if function_def is None:
return None # pragma: no cover
import libcst.matchers as m
argument_defs = m.findall(
function_def,
m.Param(name=m.Name(value=argument_name)),
)
if not argument_defs:
return None # pragma: no cover
argument_def = argument_defs[0]
function_position = self._position_metadata[function_def]
position = self._position_metadata[argument_def]
return ExceptionSource(
path=source.path,
code=source.code,
start_line=function_position.start.line,
end_line=function_position.end.line,
error_line=position.start.line,
error_column=position.start.column,
error_column_end=position.end.column,
)
def find_union_call(
self, path: Path, union_name: str, invalid_type: object
) -> Optional[ExceptionSource]:
import libcst.matchers as m
source = path.read_text()
invalid_type_name = getattr(invalid_type, "__name__", None)
types_arg_matcher = (
[
m.Tuple(
elements=[
m.ZeroOrMore(),
m.Element(value=m.Name(value=invalid_type_name)),
m.ZeroOrMore(),
],
)
| m.List(
elements=[
m.ZeroOrMore(),
m.Element(value=m.Name(value=invalid_type_name)),
m.ZeroOrMore(),
],
)
]
if invalid_type_name is not None
else []
)
matcher = m.Call(
func=m.Attribute(
value=m.Name(value="strawberry"),
attr=m.Name(value="union"),
)
| m.Name(value="union"),
args=[
m.Arg(value=m.SimpleString(value=f"'{union_name}'"))
| m.Arg(value=m.SimpleString(value=f'"{union_name}"')),
m.Arg(*types_arg_matcher), # type: ignore
],
)
union_calls = self._find(source, matcher)
if not union_calls:
return None # pragma: no cover
union_call = cast("Call", union_calls[0])
if invalid_type_name:
invalid_type_nodes = m.findall(
union_call.args[1],
m.Element(value=m.Name(value=invalid_type_name)),
)
if not invalid_type_nodes:
return None # pragma: no cover
invalid_type_node = invalid_type_nodes[0]
else:
invalid_type_node = union_call
position = self._position_metadata[union_call]
invalid_type_node_position = self._position_metadata[invalid_type_node]
return ExceptionSource(
path=path,
code=source,
start_line=position.start.line,
error_line=invalid_type_node_position.start.line,
end_line=position.end.line,
error_column=invalid_type_node_position.start.column,
error_column_end=invalid_type_node_position.end.column,
)
def find_union_merge(
self, union: StrawberryUnion, other: object, frame: Traceback
) -> Optional[ExceptionSource]:
import libcst.matchers as m
path = Path(frame.filename)
source = path.read_text()
other_name = getattr(other, "__name__", None)
if other_name is None:
return None # pragma: no cover
matcher = m.BinaryOperation(operator=m.BitOr(), right=m.Name(value=other_name))
merge_calls = self._find(source, matcher)
if not merge_calls:
return None # pragma: no cover
merge_call_node = cast("BinaryOperation", merge_calls[0])
invalid_type_node = merge_call_node.right
position = self._position_metadata[merge_call_node]
invalid_type_node_position = self._position_metadata[invalid_type_node]
return ExceptionSource(
path=path,
code=source,
start_line=position.start.line,
error_line=invalid_type_node_position.start.line,
end_line=position.end.line,
error_column=invalid_type_node_position.start.column,
error_column_end=invalid_type_node_position.end.column,
)
def find_scalar_call(
self, scalar_definition: ScalarDefinition
) -> Optional[ExceptionSource]:
if scalar_definition._source_file is None:
return None # pragma: no cover
import libcst.matchers as m
path = Path(scalar_definition._source_file)
source = path.read_text()
matcher = m.Call(
func=m.Attribute(value=m.Name(value="strawberry"), attr=m.Name("scalar"))
| m.Name("scalar"),
args=[
m.ZeroOrMore(),
m.Arg(
keyword=m.Name(value="name"),
value=m.SimpleString(value=f"'{scalar_definition.name}'")
| m.SimpleString(value=f'"{scalar_definition.name}"'),
),
m.ZeroOrMore(),
],
)
scalar_calls = self._find(source, matcher)
if not scalar_calls:
return None # pragma: no cover
scalar_call_node = scalar_calls[0]
argument_node = m.findall(
scalar_call_node,
m.Arg(
keyword=m.Name(value="name"),
),
)
position = self._position_metadata[scalar_call_node]
argument_node_position = self._position_metadata[argument_node[0]]
return ExceptionSource(
path=path,
code=source,
start_line=position.start.line,
end_line=position.end.line,
error_line=argument_node_position.start.line,
error_column=argument_node_position.start.column,
error_column_end=argument_node_position.end.column,
)
class SourceFinder:
# TODO: this might need to become a getter
@cached_property
def cst(self) -> Optional[LibCSTSourceFinder]:
try:
return LibCSTSourceFinder()
except ImportError:
return None # pragma: no cover
def find_class_from_object(self, cls: Type) -> Optional[ExceptionSource]:
return self.cst.find_class(cls) if self.cst else None
def find_class_attribute_from_object(
self, cls: Type, attribute_name: str
) -> Optional[ExceptionSource]:
return self.cst.find_class_attribute(cls, attribute_name) if self.cst else None
def find_function_from_object(
self, function: Callable
) -> Optional[ExceptionSource]:
return self.cst.find_function(function) if self.cst else None
def find_argument_from_object(
self, function: Callable, argument_name: str
) -> Optional[ExceptionSource]:
return self.cst.find_argument(function, argument_name) if self.cst else None
def find_union_call(
self, path: Path, union_name: str, invalid_type: object
) -> Optional[ExceptionSource]:
return (
self.cst.find_union_call(path, union_name, invalid_type)
if self.cst
else None
)
def find_union_merge(
self, union: StrawberryUnion, other: object, frame: Traceback
) -> Optional[ExceptionSource]:
return self.cst.find_union_merge(union, other, frame) if self.cst else None
def find_scalar_call(
self, scalar_definition: ScalarDefinition
) -> Optional[ExceptionSource]:
return self.cst.find_scalar_call(scalar_definition) if self.cst else None | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/exceptions/utils/source_finder.py | source_finder.py |
import ast
import sys
import typing
from collections.abc import AsyncGenerator
from functools import lru_cache
from typing import ( # type: ignore
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Dict,
ForwardRef,
Generic,
Optional,
Tuple,
Type,
TypeVar,
Union,
_eval_type,
_GenericAlias,
_SpecialForm,
cast,
overload,
)
from typing_extensions import Annotated, get_args, get_origin
ast_unparse = getattr(ast, "unparse", None)
# ast.unparse is only available on python 3.9+. For older versions we will
# use `astunparse.unparse`.
# We are also using "not TYPE_CHECKING" here because mypy gives an erorr
# on tests because "astunparse" is missing stubs, but the mypy action says
# that the comment is unused.
if not TYPE_CHECKING and ast_unparse is None:
import astunparse
ast_unparse = astunparse.unparse
@lru_cache()
def get_generic_alias(type_: Type) -> Type:
"""Get the generic alias for a type.
Given a type, its generic alias from `typing` module will be returned
if it exists. For example:
>>> get_generic_alias(list)
typing.List
>>> get_generic_alias(dict)
typing.Dict
This is mostly useful for python versions prior to 3.9, to get a version
of a concrete type which supports `__class_getitem__`. In 3.9+ types like
`list`/`dict`/etc are subscriptable and can be used directly instead
of their generic alias version.
"""
if isinstance(type_, _SpecialForm):
return type_
for attr_name in dir(typing):
# ignore private attributes, they are not Generic aliases
if attr_name.startswith("_"): # pragma: no cover
continue
attr = getattr(typing, attr_name)
# _GenericAlias overrides all the methods that we can use to know if
# this is a subclass of it. But if it has an "_inst" attribute
# then it for sure is a _GenericAlias
if hasattr(attr, "_inst") and attr.__origin__ is type_:
return attr
raise AssertionError(f"No GenericAlias available for {type_}") # pragma: no cover
def is_list(annotation: object) -> bool:
"""Returns True if annotation is a List"""
annotation_origin = getattr(annotation, "__origin__", None)
return annotation_origin == list
def is_union(annotation: object) -> bool:
"""Returns True if annotation is a Union"""
# this check is needed because unions declared with the new syntax `A | B`
# don't have a `__origin__` property on them, but they are instances of
# `UnionType`, which is only available in Python 3.10+
if sys.version_info >= (3, 10):
from types import UnionType
if isinstance(annotation, UnionType):
return True
# unions declared as Union[A, B] fall through to this check, even on python 3.10+
annotation_origin = getattr(annotation, "__origin__", None)
return annotation_origin == Union
def is_optional(annotation: Type) -> bool:
"""Returns True if the annotation is Optional[SomeType]"""
# Optionals are represented as unions
if not is_union(annotation):
return False
types = annotation.__args__
# A Union to be optional needs to have at least one None type
return any(x == None.__class__ for x in types)
def get_optional_annotation(annotation: Type) -> Type:
types = annotation.__args__
non_none_types = tuple(x for x in types if x != None.__class__)
# if we have multiple non none types we want to return a copy of this
# type (normally a Union type).
if len(non_none_types) > 1:
return annotation.copy_with(non_none_types)
return non_none_types[0]
def get_list_annotation(annotation: Type) -> Type:
return annotation.__args__[0]
def is_concrete_generic(annotation: type) -> bool:
ignored_generics = (list, tuple, Union, ClassVar, AsyncGenerator)
return (
isinstance(annotation, _GenericAlias)
and annotation.__origin__ not in ignored_generics
)
def is_generic_subclass(annotation: type) -> bool:
return isinstance(annotation, type) and issubclass(
annotation, Generic # type:ignore
)
def is_generic(annotation: type) -> bool:
"""Returns True if the annotation is or extends a generic."""
return (
# TODO: These two lines appear to have the same effect. When will an
# annotation have parameters but not satisfy the first condition?
(is_generic_subclass(annotation) or is_concrete_generic(annotation))
and bool(get_parameters(annotation))
)
def is_type_var(annotation: Type) -> bool:
"""Returns True if the annotation is a TypeVar."""
return isinstance(annotation, TypeVar)
def get_parameters(annotation: Type) -> Union[Tuple[object], Tuple[()]]:
if (
isinstance(annotation, _GenericAlias)
or isinstance(annotation, type)
and issubclass(annotation, Generic) # type:ignore
and annotation is not Generic
):
return annotation.__parameters__
else:
return () # pragma: no cover
@overload
def _ast_replace_union_operation(expr: ast.expr) -> ast.expr:
...
@overload
def _ast_replace_union_operation(expr: ast.Expr) -> ast.Expr:
...
def _ast_replace_union_operation(
expr: Union[ast.Expr, ast.expr]
) -> Union[ast.Expr, ast.expr]:
if isinstance(expr, ast.Expr) and isinstance(
expr.value, (ast.BinOp, ast.Subscript)
):
expr = ast.Expr(_ast_replace_union_operation(expr.value))
elif isinstance(expr, ast.BinOp):
left = _ast_replace_union_operation(expr.left)
right = _ast_replace_union_operation(expr.right)
expr = ast.Subscript(
ast.Name(id="Union"),
ast.Tuple([left, right], ast.Load()),
ast.Load(),
)
elif isinstance(expr, ast.Tuple):
expr = ast.Tuple(
[_ast_replace_union_operation(elt) for elt in expr.elts],
ast.Load(),
)
elif isinstance(expr, ast.Subscript):
if hasattr(ast, "Index") and isinstance(expr.slice, ast.Index):
expr = ast.Subscript(
expr.value,
# The cast is required for mypy on python 3.7 and 3.8
ast.Index(_ast_replace_union_operation(cast(Any, expr.slice).value)),
ast.Load(),
)
elif isinstance(expr.slice, (ast.BinOp, ast.Tuple)):
expr = ast.Subscript(
expr.value,
_ast_replace_union_operation(expr.slice),
ast.Load(),
)
return expr
def eval_type(
type_: Any,
globalns: Optional[Dict] = None,
localns: Optional[Dict] = None,
) -> Type:
"""Evaluates a type, resolving forward references."""
from strawberry.auto import StrawberryAuto
from strawberry.lazy_type import StrawberryLazyReference
from strawberry.private import StrawberryPrivate
globalns = globalns or {}
# If this is not a string, maybe its args are (e.g. List["Foo"])
if isinstance(type_, ForwardRef):
# For Python 3.10+, we can use the built-in _eval_type function directly.
# It will handle "|" notations properly
if sys.version_info < (3, 10):
parsed = _ast_replace_union_operation(
cast(ast.Expr, ast.parse(type_.__forward_arg__).body[0])
)
# We replaced "a | b" with "Union[a, b], so make sure Union can be resolved
# at globalns because it may not be there
if "Union" not in globalns:
globalns["Union"] = Union
assert ast_unparse
type_ = ForwardRef(ast_unparse(parsed))
return _eval_type(type_, globalns, localns)
origin = get_origin(type_)
if origin is not None:
args = get_args(type_)
if origin is Annotated:
for arg in args[1:]:
if isinstance(arg, StrawberryPrivate):
return type_
if isinstance(arg, StrawberryLazyReference):
remaining_args = [
a
for a in args[1:]
if not isinstance(arg, StrawberryLazyReference)
]
args = (arg.resolve_forward_ref(args[0]), *remaining_args)
break
if isinstance(arg, StrawberryAuto):
remaining_args = [
a for a in args[1:] if not isinstance(arg, StrawberryAuto)
]
args = (arg, *remaining_args)
break
# If we have only a StrawberryLazyReference and no more annotations,
# we need to return the argument directly because Annotated
# will raise an error if trying to instantiate it with only
# one argument.
if len(args) == 1:
return args[0]
# python 3.10 will return UnionType for origin, and it cannot be
# subscripted like Union[Foo, Bar]
if sys.version_info >= (3, 10):
from types import UnionType
if origin is UnionType:
origin = Union
# Future annotations in older versions will eval generic aliases to their
# real types (i.e. List[foo] will have its origin set to list instead
# of List). If that type is not subscriptable, retrieve its generic
# alias version instead.
if sys.version_info < (3, 9) and not hasattr(origin, "__class_getitem__"):
origin = get_generic_alias(origin)
type_ = (
origin[tuple(eval_type(a, globalns, localns) for a in args)]
if args
else origin
)
return type_
_T = TypeVar("_T")
def __dataclass_transform__(
*,
eq_default: bool = True,
order_default: bool = False,
kw_only_default: bool = False,
field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()),
) -> Callable[[_T], _T]:
return lambda a: a | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/utils/typing.py | typing.py |
import inspect
from functools import lru_cache
from typing import Any, Callable, Dict, List, Optional, TypeVar, Union, overload
from typing_extensions import Literal, get_args
@lru_cache(maxsize=250)
def get_func_args(func: Callable[[Any], Any]) -> List[str]:
"""Returns a list of arguments for the function"""
sig = inspect.signature(func)
return [
arg_name
for arg_name, param in sig.parameters.items()
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
@overload
def get_specialized_type_var_map(
cls: type,
*,
include_type_vars: Literal[True],
) -> Optional[Dict[TypeVar, Union[TypeVar, type]]]:
...
@overload
def get_specialized_type_var_map(
cls: type,
*,
include_type_vars: Literal[False] = ...,
) -> Optional[Dict[TypeVar, type]]:
...
@overload
def get_specialized_type_var_map(
cls: type,
*,
include_type_vars: bool,
) -> Optional[
Union[Optional[Dict[TypeVar, type]], Dict[TypeVar, Union[TypeVar, type]]]
]:
...
def get_specialized_type_var_map(cls: type, *, include_type_vars: bool = False):
"""Get a type var map for specialized types.
Consider the following:
>>> class Foo(Generic[T]):
... ...
...
>>> class Bar(Generic[K]):
... ...
...
>>> class IntBar(Bar[int]):
... ...
...
>>> class IntBarSubclass(IntBar):
... ...
...
>>> class IntBarFoo(IntBar, Foo[str]):
... ...
...
This would return:
>>> get_specialized_type_var_map(object)
None
>>> get_specialized_type_var_map(Foo)
{}
>>> get_specialized_type_var_map(Foo, include_type_vars=True)
{~T: ~T}
>>> get_specialized_type_var_map(Bar)
{~T: ~T}
>>> get_specialized_type_var_map(IntBar)
{~T: int}
>>> get_specialized_type_var_map(IntBarSubclass)
{~T: int}
>>> get_specialized_type_var_map(IntBarFoo)
{~T: int, ~K: str}
"""
orig_bases = getattr(cls, "__orig_bases__", None)
if orig_bases is None:
# Not a specialized type
return None
type_var_map = {}
for base in orig_bases:
# Recursively get type var map from base classes
base_type_var_map = get_specialized_type_var_map(base)
if base_type_var_map is not None:
type_var_map.update(base_type_var_map)
args = get_args(base)
origin = getattr(base, "__origin__", None)
params = origin and getattr(origin, "__parameters__", None)
if params is None:
params = getattr(base, "__parameters__", None)
if not params:
continue
type_var_map.update(
{
p: a
for p, a in zip(params, args)
if include_type_vars or not isinstance(a, TypeVar)
}
)
return type_var_map | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/utils/inspect.py | inspect.py |
from __future__ import annotations
import re
import typing
import warnings
from decimal import Decimal
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Set,
Tuple,
Union,
cast,
)
from mypy.nodes import (
ARG_OPT,
ARG_POS,
ARG_STAR2,
GDEF,
MDEF,
Argument,
AssignmentStmt,
Block,
CallExpr,
CastExpr,
FuncDef,
IndexExpr,
MemberExpr,
NameExpr,
PassStmt,
PlaceholderNode,
RefExpr,
SymbolTableNode,
TempNode,
TupleExpr,
TypeAlias,
TypeVarExpr,
Var,
)
from mypy.plugin import (
Plugin,
SemanticAnalyzerPluginInterface,
)
from mypy.plugins.common import _get_argument, _get_decorator_bool_argument, add_method
from mypy.plugins.dataclasses import DataclassAttribute
from mypy.semanal_shared import set_callable_name
from mypy.server.trigger import make_wildcard_trigger
from mypy.types import (
AnyType,
CallableType,
Instance,
NoneType,
TypeOfAny,
TypeVarType,
UnionType,
get_proper_type,
)
from mypy.typevars import fill_typevars
from mypy.util import get_unique_redefinition_name
# Backwards compatible with the removal of `TypeVarDef` in mypy 0.920.
try:
from mypy.types import TypeVarDef # type: ignore
except ImportError:
TypeVarDef = TypeVarType
# To be compatible with user who don't use pydantic
try:
from pydantic.mypy import METADATA_KEY as PYDANTIC_METADATA_KEY
from pydantic.mypy import PydanticModelField
except ImportError:
PYDANTIC_METADATA_KEY = ""
if TYPE_CHECKING:
from typing_extensions import Final
from mypy.nodes import ClassDef, Expression, TypeInfo
from mypy.plugins import ( # type: ignore
AnalyzeTypeContext,
CheckerPluginInterface,
ClassDefContext,
DynamicClassDefContext,
FunctionContext,
)
from mypy.types import Type
VERSION_RE = re.compile(r"(^0|^(?:[1-9][0-9]*))\.(0|(?:[1-9][0-9]*))")
FALLBACK_VERSION = Decimal("0.800")
class MypyVersion:
"""Stores the mypy version to be used by the plugin"""
VERSION: Decimal
class InvalidNodeTypeException(Exception):
def __init__(self, node: Any) -> None:
self.message = f"Invalid node type: {str(node)}"
super().__init__()
def __str__(self) -> str:
return self.message
def lazy_type_analyze_callback(ctx: AnalyzeTypeContext) -> Type:
if len(ctx.type.args) == 0:
# TODO: maybe this should throw an error
return AnyType(TypeOfAny.special_form)
type_name = ctx.type.args[0]
type_ = ctx.api.analyze_type(type_name)
return type_
def strawberry_field_hook(ctx: FunctionContext) -> Type:
# TODO: check when used as decorator, check type of the caller
# TODO: check type of resolver if any
return AnyType(TypeOfAny.special_form)
def _get_named_type(name: str, api: SemanticAnalyzerPluginInterface):
if "." in name:
return api.named_type_or_none(name)
return api.named_type(name)
def _get_type_for_expr(expr: Expression, api: SemanticAnalyzerPluginInterface) -> Type:
if isinstance(expr, NameExpr):
# guarding against invalid nodes, still have to figure out why this happens
# but sometimes mypy crashes because the internal node of the named type
# is actually a Var node, which is unexpected, so we do a naive guard here
# and raise an exception for it.
if expr.fullname:
sym = api.lookup_fully_qualified_or_none(expr.fullname)
if sym and isinstance(sym.node, Var):
raise InvalidNodeTypeException(sym.node)
return _get_named_type(expr.fullname or expr.name, api)
if isinstance(expr, IndexExpr):
type_ = _get_type_for_expr(expr.base, api)
type_.args = (_get_type_for_expr(expr.index, api),) # type: ignore
return type_
if isinstance(expr, MemberExpr):
if expr.fullname:
return _get_named_type(expr.fullname, api)
else:
raise InvalidNodeTypeException(expr)
if isinstance(expr, CallExpr):
if expr.analyzed:
return _get_type_for_expr(expr.analyzed, api)
else:
raise InvalidNodeTypeException(expr)
if isinstance(expr, CastExpr):
return expr.type
raise ValueError(f"Unsupported expression {type(expr)}")
def create_type_hook(ctx: DynamicClassDefContext) -> None:
# returning classes/type aliases is not supported yet by mypy
# see https://github.com/python/mypy/issues/5865
type_alias = TypeAlias(
AnyType(TypeOfAny.from_error),
fullname=ctx.api.qualified_name(ctx.name),
line=ctx.call.line,
column=ctx.call.column,
)
ctx.api.add_symbol_table_node(
ctx.name,
SymbolTableNode(GDEF, type_alias, plugin_generated=True),
)
return
def union_hook(ctx: DynamicClassDefContext) -> None:
try:
# Check if types is passed as a keyword argument
types = ctx.call.args[ctx.call.arg_names.index("types")]
except ValueError:
# Fall back to assuming position arguments
types = ctx.call.args[1]
if isinstance(types, TupleExpr):
try:
type_ = UnionType(
tuple(_get_type_for_expr(x, ctx.api) for x in types.items)
)
except InvalidNodeTypeException:
type_alias = TypeAlias(
AnyType(TypeOfAny.from_error),
fullname=ctx.api.qualified_name(ctx.name),
line=ctx.call.line,
column=ctx.call.column,
)
ctx.api.add_symbol_table_node(
ctx.name,
SymbolTableNode(GDEF, type_alias, plugin_generated=False),
)
return
type_alias = TypeAlias(
type_,
fullname=ctx.api.qualified_name(ctx.name),
line=ctx.call.line,
column=ctx.call.column,
)
ctx.api.add_symbol_table_node(
ctx.name, SymbolTableNode(GDEF, type_alias, plugin_generated=False)
)
def enum_hook(ctx: DynamicClassDefContext) -> None:
first_argument = ctx.call.args[0]
if isinstance(first_argument, NameExpr):
if not first_argument.node:
ctx.api.defer()
return
if isinstance(first_argument.node, Var):
var_type = first_argument.node.type or AnyType(
TypeOfAny.implementation_artifact
)
type_alias = TypeAlias(
var_type,
fullname=ctx.api.qualified_name(ctx.name),
line=ctx.call.line,
column=ctx.call.column,
)
ctx.api.add_symbol_table_node(
ctx.name, SymbolTableNode(GDEF, type_alias, plugin_generated=False)
)
return
enum_type: Optional[Type]
try:
enum_type = _get_type_for_expr(first_argument, ctx.api)
except InvalidNodeTypeException:
enum_type = None
if not enum_type:
enum_type = AnyType(TypeOfAny.from_error)
type_alias = TypeAlias(
enum_type,
fullname=ctx.api.qualified_name(ctx.name),
line=ctx.call.line,
column=ctx.call.column,
)
ctx.api.add_symbol_table_node(
ctx.name, SymbolTableNode(GDEF, type_alias, plugin_generated=False)
)
def scalar_hook(ctx: DynamicClassDefContext) -> None:
first_argument = ctx.call.args[0]
if isinstance(first_argument, NameExpr):
if not first_argument.node:
ctx.api.defer()
return
if isinstance(first_argument.node, Var):
var_type = first_argument.node.type or AnyType(
TypeOfAny.implementation_artifact
)
type_alias = TypeAlias(
var_type,
fullname=ctx.api.qualified_name(ctx.name),
line=ctx.call.line,
column=ctx.call.column,
)
ctx.api.add_symbol_table_node(
ctx.name, SymbolTableNode(GDEF, type_alias, plugin_generated=False)
)
return
scalar_type: Optional[Type]
# TODO: add proper support for NewType
try:
scalar_type = _get_type_for_expr(first_argument, ctx.api)
except InvalidNodeTypeException:
scalar_type = None
if not scalar_type:
scalar_type = AnyType(TypeOfAny.from_error)
type_alias = TypeAlias(
scalar_type,
fullname=ctx.api.qualified_name(ctx.name),
line=ctx.call.line,
column=ctx.call.column,
)
ctx.api.add_symbol_table_node(
ctx.name, SymbolTableNode(GDEF, type_alias, plugin_generated=False)
)
def add_static_method_to_class(
api: Union[SemanticAnalyzerPluginInterface, CheckerPluginInterface],
cls: ClassDef,
name: str,
args: List[Argument],
return_type: Type,
tvar_def: Optional[TypeVarType] = None,
) -> None:
"""Adds a static method
Edited add_method_to_class to incorporate static method logic
https://github.com/python/mypy/blob/9c05d3d19/mypy/plugins/common.py
"""
info = cls.info
# First remove any previously generated methods with the same name
# to avoid clashes and problems in the semantic analyzer.
if name in info.names:
sym = info.names[name]
if sym.plugin_generated and isinstance(sym.node, FuncDef):
cls.defs.body.remove(sym.node)
# For compat with mypy < 0.93
if MypyVersion.VERSION < Decimal("0.93"):
function_type = api.named_type("__builtins__.function")
else:
if isinstance(api, SemanticAnalyzerPluginInterface):
function_type = api.named_type("builtins.function")
else:
function_type = api.named_generic_type("builtins.function", [])
arg_types, arg_names, arg_kinds = [], [], []
for arg in args:
assert arg.type_annotation, "All arguments must be fully typed."
arg_types.append(arg.type_annotation)
arg_names.append(arg.variable.name)
arg_kinds.append(arg.kind)
signature = CallableType(
arg_types, arg_kinds, arg_names, return_type, function_type
)
if tvar_def:
signature.variables = [tvar_def]
func = FuncDef(name, args, Block([PassStmt()]))
func.is_static = True
func.info = info
func.type = set_callable_name(signature, func)
func._fullname = f"{info.fullname}.{name}"
func.line = info.line
# NOTE: we would like the plugin generated node to dominate, but we still
# need to keep any existing definitions so they get semantically analyzed.
if name in info.names:
# Get a nice unique name instead.
r_name = get_unique_redefinition_name(name, info.names)
info.names[r_name] = info.names[name]
info.names[name] = SymbolTableNode(MDEF, func, plugin_generated=True)
info.defn.defs.body.append(func)
def strawberry_pydantic_class_callback(ctx: ClassDefContext) -> None:
# in future we want to have a proper pydantic plugin, but for now
# let's fallback to **kwargs for __init__, some resources are here:
# https://github.com/samuelcolvin/pydantic/blob/master/pydantic/mypy.py
# >>> model_index = ctx.cls.decorators[0].arg_names.index("model")
# >>> model_name = ctx.cls.decorators[0].args[model_index].name
# >>> model_type = ctx.api.named_type("UserModel")
# >>> model_type = ctx.api.lookup(model_name, Context())
model_expression = _get_argument(call=ctx.reason, name="model")
if model_expression is None:
ctx.api.fail("model argument in decorator failed to be parsed", ctx.reason)
else:
# Add __init__
init_args = [
Argument(Var("kwargs"), AnyType(TypeOfAny.explicit), None, ARG_STAR2)
]
add_method(ctx, "__init__", init_args, NoneType())
model_type = cast(Instance, _get_type_for_expr(model_expression, ctx.api))
# these are the fields that the user added to the strawberry type
new_strawberry_fields: Set[str] = set()
# TODO: think about inheritance for strawberry?
for stmt in ctx.cls.defs.body:
if isinstance(stmt, AssignmentStmt):
lhs = cast(NameExpr, stmt.lvalues[0])
new_strawberry_fields.add(lhs.name)
pydantic_fields: Set[PydanticModelField] = set()
try:
for _name, data in model_type.type.metadata[PYDANTIC_METADATA_KEY][
"fields"
].items():
field = PydanticModelField.deserialize(ctx.cls.info, data)
pydantic_fields.add(field)
except KeyError:
# this will happen if the user didn't add the pydantic plugin
# AND is using the pydantic conversion decorator
ctx.api.fail(
"Pydantic plugin not installed,"
" please add pydantic.mypy your mypy.ini plugins",
ctx.reason,
)
potentially_missing_fields: Set[PydanticModelField] = {
f for f in pydantic_fields if f.name not in new_strawberry_fields
}
"""
Need to check if all_fields=True from the pydantic decorator
There is no way to real check that Literal[True] was used
We just check if the strawberry type is missing all the fields
This means that the user is using all_fields=True
"""
is_all_fields: bool = len(potentially_missing_fields) == len(pydantic_fields)
missing_pydantic_fields: Set[PydanticModelField] = (
potentially_missing_fields if not is_all_fields else set()
)
# Add the default to_pydantic if undefined by the user
if "to_pydantic" not in ctx.cls.info.names:
add_method(
ctx,
"to_pydantic",
args=[
f.to_argument(
# TODO: use_alias should depend on config?
info=model_type.type,
typed=True,
force_optional=False,
use_alias=True,
)
for f in missing_pydantic_fields
],
return_type=model_type,
)
# Add from_pydantic
model_argument = Argument(
variable=Var(name="instance", type=model_type),
type_annotation=model_type,
initializer=None,
kind=ARG_OPT,
)
add_static_method_to_class(
ctx.api,
ctx.cls,
name="from_pydantic",
args=[model_argument],
return_type=fill_typevars(ctx.cls.info),
)
def is_dataclasses_field_or_strawberry_field(expr: Expression) -> bool:
if isinstance(expr, CallExpr):
if isinstance(expr.callee, RefExpr) and expr.callee.fullname in (
"dataclasses.field",
"strawberry.field.field",
"strawberry.mutation.mutation",
"strawberry.federation.field",
"strawberry.federation.field.field",
):
return True
if isinstance(expr.callee, MemberExpr) and isinstance(
expr.callee.expr, NameExpr
):
return (
expr.callee.name in {"field", "mutation"}
and expr.callee.expr.name == "strawberry"
)
return False
def _collect_field_args(
ctx: ClassDefContext, expr: Expression
) -> Tuple[bool, Dict[str, Expression]]:
"""Returns a tuple where the first value represents whether or not
the expression is a call to dataclass.field and the second is a
dictionary of the keyword arguments that field() was called with.
"""
if is_dataclasses_field_or_strawberry_field(expr):
expr = cast(CallExpr, expr)
args = {}
for name, arg in zip(expr.arg_names, expr.args):
if name is None:
ctx.api.fail(
'"field()" or "mutation()" only takes keyword arguments', expr
)
return False, {}
args[name] = arg
return True, args
return False, {}
# Custom dataclass transformer that knows about strawberry.field, we cannot
# extend the mypy one as it might be compiled by mypyc and we'd get this error
# >>> TypeError: interpreted classes cannot inherit from compiled
# Original copy from
# https://github.com/python/mypy/blob/5253f7c0/mypy/plugins/dataclasses.py
SELF_TVAR_NAME: Final = "_DT"
class CustomDataclassTransformer:
def __init__(self, ctx: ClassDefContext) -> None:
self._ctx = ctx
def transform(self) -> None:
"""Apply all the necessary transformations to the underlying
dataclass so as to ensure it is fully type checked according
to the rules in PEP 557.
"""
ctx = self._ctx
info = self._ctx.cls.info
attributes = self.collect_attributes()
if attributes is None:
# Some definitions are not ready, defer() should be already called.
return
for attr in attributes:
if attr.type is None:
ctx.api.defer()
return
decorator_arguments = {
"init": _get_decorator_bool_argument(self._ctx, "init", True),
"eq": _get_decorator_bool_argument(self._ctx, "eq", True),
"order": _get_decorator_bool_argument(self._ctx, "order", False),
"frozen": _get_decorator_bool_argument(self._ctx, "frozen", False),
}
# If there are no attributes, it may be that the semantic analyzer has not
# processed them yet. In order to work around this, we can simply skip
# generating __init__ if there are no attributes, because if the user
# truly did not define any, then the object default __init__ with an
# empty signature will be present anyway.
if (
decorator_arguments["init"]
and (
"__init__" not in info.names or info.names["__init__"].plugin_generated
)
and attributes
):
args = [info] if MypyVersion.VERSION >= Decimal("1.0") else []
add_method(
ctx,
"__init__",
args=[
attr.to_argument(*args) for attr in attributes if attr.is_in_init
],
return_type=NoneType(),
)
if (
decorator_arguments["eq"]
and info.get("__eq__") is None
or decorator_arguments["order"]
):
# Type variable for self types in generated methods.
obj_type = ctx.api.named_type("__builtins__.object")
self_tvar_expr = TypeVarExpr(
SELF_TVAR_NAME, info.fullname + "." + SELF_TVAR_NAME, [], obj_type
)
info.names[SELF_TVAR_NAME] = SymbolTableNode(MDEF, self_tvar_expr)
# Add <, >, <=, >=, but only if the class has an eq method.
if decorator_arguments["order"]:
if not decorator_arguments["eq"]:
ctx.api.fail("eq must be True if order is True", ctx.cls)
for method_name in ["__lt__", "__gt__", "__le__", "__ge__"]:
# Like for __eq__ and __ne__, we want "other" to match
# the self type.
obj_type = ctx.api.named_type("__builtins__.object")
order_tvar_def = TypeVarDef(
SELF_TVAR_NAME,
info.fullname + "." + SELF_TVAR_NAME,
-1,
[],
obj_type,
)
# Backwards compatible with the removal of `TypeVarDef` in mypy 0.920.
if isinstance(order_tvar_def, TypeVarType):
order_other_type = order_tvar_def
else:
order_other_type = TypeVarType(order_tvar_def) # type: ignore
order_return_type = ctx.api.named_type("__builtins__.bool")
order_args = [
Argument(
Var("other", order_other_type), order_other_type, None, ARG_POS
)
]
existing_method = info.get(method_name)
if existing_method is not None and not existing_method.plugin_generated:
assert existing_method.node
ctx.api.fail(
"You may not have a custom %s method when order=True"
% method_name,
existing_method.node,
)
add_method(
ctx,
method_name,
args=order_args,
return_type=order_return_type,
self_type=order_other_type,
tvar_def=order_tvar_def,
)
if decorator_arguments["frozen"]:
self._freeze(attributes)
else:
self._propertize_callables(attributes)
self.reset_init_only_vars(info, attributes)
info.metadata["dataclass"] = {
"attributes": [attr.serialize() for attr in attributes],
"frozen": decorator_arguments["frozen"],
}
def reset_init_only_vars(
self, info: TypeInfo, attributes: List[DataclassAttribute]
) -> None:
"""Remove init-only vars from the class and reset init var declarations."""
for attr in attributes:
if attr.is_init_var:
if attr.name in info.names:
del info.names[attr.name]
else:
# Nodes of superclass InitVars not used in __init__
# cannot be reached.
assert attr.is_init_var
for stmt in info.defn.defs.body:
if isinstance(stmt, AssignmentStmt) and stmt.unanalyzed_type:
lvalue = stmt.lvalues[0]
if isinstance(lvalue, NameExpr) and lvalue.name == attr.name:
# Reset node so that another semantic analysis pass will
# recreate a symbol node for this attribute.
lvalue.node = None
def collect_attributes(self) -> Optional[List[DataclassAttribute]]:
"""Collect all attributes declared in the dataclass and its parents.
All assignments of the form
a: SomeType
b: SomeOtherType = ...
are collected.
"""
# First, collect attributes belonging to the current class.
ctx = self._ctx
cls = self._ctx.cls
attrs: List[DataclassAttribute] = []
known_attrs: Set[str] = set()
for stmt in cls.defs.body:
# Any assignment that doesn't use the new type declaration
# syntax can be ignored out of hand.
if not (isinstance(stmt, AssignmentStmt) and stmt.new_syntax):
continue
# a: int, b: str = 1, 'foo' is not supported syntax so we
# don't have to worry about it.
lhs = stmt.lvalues[0]
if not isinstance(lhs, NameExpr):
continue
sym = cls.info.names.get(lhs.name)
if sym is None:
# This name is likely blocked by a star import. We don't need
# to defer because defer() is already called by mark_incomplete().
continue
node = sym.node
if isinstance(node, PlaceholderNode):
# This node is not ready yet.
return None
assert isinstance(node, Var)
# x: ClassVar[int] is ignored by dataclasses.
if node.is_classvar:
continue
# x: InitVar[int] is turned into x: int and is removed from the class.
is_init_var = False
node_type = get_proper_type(node.type)
if (
isinstance(node_type, Instance)
and node_type.type.fullname == "dataclasses.InitVar"
):
is_init_var = True
node.type = node_type.args[0]
has_field_call, field_args = _collect_field_args(ctx, stmt.rvalue)
is_in_init_param = field_args.get("init")
if is_in_init_param is None:
is_in_init = True
else:
is_in_init = bool(ctx.api.parse_bool(is_in_init_param))
# fields with a resolver are never put in the __init__ method
if "resolver" in field_args:
is_in_init = False
has_default = False
# Ensure that something like x: int = field() is rejected
# after an attribute with a default.
if has_field_call:
has_default = "default" in field_args or "default_factory" in field_args
# All other assignments are already type checked.
elif not isinstance(stmt.rvalue, TempNode):
has_default = True
if not has_default:
# Make all non-default attributes implicit because they are de-facto set
# on self in the generated __init__(), not in the class body.
sym.implicit = True
known_attrs.add(lhs.name)
params = dict(
name=lhs.name,
is_in_init=is_in_init,
is_init_var=is_init_var,
has_default=has_default,
line=stmt.line,
column=stmt.column,
type=sym.type,
)
# Support the addition of `info` in mypy 0.800 and `kw_only` in mypy 0.920
# without breaking backwards compatibility.
if MypyVersion.VERSION >= Decimal("0.800"):
params["info"] = cls.info
if MypyVersion.VERSION >= Decimal("0.920"):
params["kw_only"] = True
if MypyVersion.VERSION >= Decimal("1.1"):
params["alias"] = None
attribute = DataclassAttribute(**params)
attrs.append(attribute)
# Next, collect attributes belonging to any class in the MRO
# as long as those attributes weren't already collected. This
# makes it possible to overwrite attributes in subclasses.
# copy() because we potentially modify all_attrs below and if
# this code requires debugging we'll have unmodified attrs laying around.
all_attrs = attrs.copy()
for info in cls.info.mro[1:-1]:
if "dataclass" not in info.metadata:
continue
super_attrs = []
# Each class depends on the set of attributes in its dataclass ancestors.
ctx.api.add_plugin_dependency(make_wildcard_trigger(info.fullname))
for data in info.metadata["dataclass"]["attributes"]:
name: str = data["name"]
if name not in known_attrs:
attr = DataclassAttribute.deserialize(info, data, ctx.api)
attr.expand_typevar_from_subtype(ctx.cls.info)
known_attrs.add(name)
super_attrs.append(attr)
elif all_attrs:
# How early in the attribute list an attribute appears is
# determined by the reverse MRO, not simply MRO.
# See https://docs.python.org/3/library/dataclasses.html#inheritance
# for details.
for attr in all_attrs:
if attr.name == name:
all_attrs.remove(attr)
super_attrs.append(attr)
break
all_attrs = super_attrs + all_attrs
return all_attrs
def _freeze(self, attributes: List[DataclassAttribute]) -> None:
"""Converts all attributes to @property methods in order to
emulate frozen classes.
"""
info = self._ctx.cls.info
for attr in attributes:
sym_node = info.names.get(attr.name)
if sym_node is not None:
var = sym_node.node
assert isinstance(var, Var)
var.is_property = True
else:
if MypyVersion.VERSION >= Decimal("1.0"):
var = attr.to_var(current_info=info)
else:
var = attr.to_var() # type: ignore
var.info = info
var.is_property = True
var._fullname = info.fullname + "." + var.name
info.names[var.name] = SymbolTableNode(MDEF, var)
def _propertize_callables(self, attributes: List[DataclassAttribute]) -> None:
"""Converts all attributes with callable types to @property methods.
This avoids the typechecker getting confused and thinking that
`my_dataclass_instance.callable_attr(foo)` is going to receive a
`self` argument (it is not).
"""
info = self._ctx.cls.info
for attr in attributes:
if isinstance(get_proper_type(attr.type), CallableType):
if MypyVersion.VERSION >= Decimal("1.0"):
var = attr.to_var(current_info=info)
else:
var = attr.to_var() # type: ignore
var.info = info
var.is_property = True
var.is_settable_property = True
var._fullname = info.fullname + "." + var.name
info.names[var.name] = SymbolTableNode(MDEF, var)
def custom_dataclass_class_maker_callback(ctx: ClassDefContext) -> None:
"""Hooks into the class typechecking process to add support for dataclasses."""
transformer = CustomDataclassTransformer(ctx)
transformer.transform()
class StrawberryPlugin(Plugin):
def get_dynamic_class_hook(
self, fullname: str
) -> Optional[Callable[[DynamicClassDefContext], None]]:
# TODO: investigate why we need this instead of `strawberry.union.union` on CI
# we have the same issue in the other hooks
if self._is_strawberry_union(fullname):
return union_hook
if self._is_strawberry_enum(fullname):
return enum_hook
if self._is_strawberry_scalar(fullname):
return scalar_hook
if self._is_strawberry_create_type(fullname):
return create_type_hook
return None
def get_function_hook(
self, fullname: str
) -> Optional[Callable[[FunctionContext], Type]]:
if self._is_strawberry_field(fullname):
return strawberry_field_hook
return None
def get_type_analyze_hook(self, fullname: str) -> Union[Callable[..., Type], None]:
if self._is_strawberry_lazy_type(fullname):
return lazy_type_analyze_callback
return None
def get_class_decorator_hook(
self, fullname: str
) -> Optional[Callable[[ClassDefContext], None]]:
if self._is_strawberry_decorator(fullname):
return custom_dataclass_class_maker_callback
if self._is_strawberry_pydantic_decorator(fullname):
return strawberry_pydantic_class_callback
return None
def _is_strawberry_union(self, fullname: str) -> bool:
return fullname == "strawberry.union.union" or fullname.endswith(
"strawberry.union"
)
def _is_strawberry_field(self, fullname: str) -> bool:
if fullname in {
"strawberry.field.field",
"strawberry.mutation.mutation",
"strawberry.federation.field",
}:
return True
return any(
fullname.endswith(decorator)
for decorator in {
"strawberry.field",
"strawberry.mutation",
"strawberry.federation.field",
}
)
def _is_strawberry_enum(self, fullname: str) -> bool:
return fullname == "strawberry.enum.enum" or fullname.endswith(
"strawberry.enum"
)
def _is_strawberry_scalar(self, fullname: str) -> bool:
return fullname == "strawberry.custom_scalar.scalar" or fullname.endswith(
"strawberry.scalar"
)
def _is_strawberry_lazy_type(self, fullname: str) -> bool:
return fullname == "strawberry.lazy_type.LazyType"
def _is_strawberry_decorator(self, fullname: str) -> bool:
if any(
strawberry_decorator in fullname
for strawberry_decorator in {
"strawberry.object_type.type",
"strawberry.federation.type",
"strawberry.federation.object_type.type",
"strawberry.federation.input",
"strawberry.federation.object_type.input",
"strawberry.federation.interface",
"strawberry.federation.object_type.interface",
"strawberry.schema_directive.schema_directive",
"strawberry.federation.schema_directive",
"strawberry.federation.schema_directive.schema_directive",
"strawberry.object_type.input",
"strawberry.object_type.interface",
}
):
return True
# in some cases `fullpath` is not what we would expect, this usually
# happens when `follow_imports` are disabled in mypy when you get a path
# that looks likes `some_module.types.strawberry.type`
return any(
fullname.endswith(decorator)
for decorator in {
"strawberry.type",
"strawberry.federation.type",
"strawberry.input",
"strawberry.interface",
"strawberry.schema_directive",
"strawberry.federation.schema_directive",
}
)
def _is_strawberry_create_type(self, fullname: str) -> bool:
# using endswith(.create_type) is not ideal as there might be
# other function called like that, but it's the best we can do
# when follow-imports is set to "skip". Hopefully in the future
# we can remove our custom hook for create type
return (
fullname == "strawberry.tools.create_type.create_type"
or fullname.endswith(".create_type")
)
def _is_strawberry_pydantic_decorator(self, fullname: str) -> bool:
if any(
strawberry_decorator in fullname
for strawberry_decorator in {
"strawberry.experimental.pydantic.object_type.type",
"strawberry.experimental.pydantic.object_type.input",
"strawberry.experimental.pydantic.object_type.interface",
"strawberry.experimental.pydantic.error_type",
}
):
return True
# in some cases `fullpath` is not what we would expect, this usually
# happens when `follow_imports` are disabled in mypy when you get a path
# that looks likes `some_module.types.strawberry.type`
return any(
fullname.endswith(decorator)
for decorator in {
"strawberry.experimental.pydantic.type",
"strawberry.experimental.pydantic.input",
"strawberry.experimental.pydantic.error_type",
}
)
def plugin(version: str) -> typing.Type[StrawberryPlugin]:
match = VERSION_RE.match(version)
if match:
MypyVersion.VERSION = Decimal(".".join(match.groups()))
else:
MypyVersion.VERSION = FALLBACK_VERSION
warnings.warn(
f"Mypy version {version} could not be parsed. Reverting to v0.800",
stacklevel=1,
)
return StrawberryPlugin | 564bff00ff-strawberry-graphql | /564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/ext/mypy_plugin.py | mypy_plugin.py |
Subsets and Splits