language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
ray-project__ray
|
python/ray/llm/_internal/serve/config_generator/utils/prompt.py
|
{
"start": 44,
"end": 251
}
|
class ____(Prompt):
@classmethod
def ask(cls, prompt: str, **kwargs):
# Automatically apply bold style to the BoldPrompt
return Prompt.ask(f"[bold]{prompt}[/bold]", **kwargs)
|
BoldPrompt
|
python
|
hyperopt__hyperopt
|
hyperopt/rdists.py
|
{
"start": 901,
"end": 2283
}
|
class ____(scipy_lognorm_gen):
def __init__(self, mu, sigma):
self.mu_ = mu
self.s_ = sigma
scipy_lognorm_gen.__init__(self)
# I still don't understand what scipy stats objects are doing
# re: this stuff
del self.__dict__["_parse_args"]
del self.__dict__["_parse_args_stats"]
del self.__dict__["_parse_args_rvs"]
def _parse_args(self, *args, **kwargs):
assert not args, args
assert not kwargs, kwargs
args = (self.s_,)
loc = 0
scale = np.exp(self.mu_)
return args, loc, scale
def qtable_pmf(x, q, qlow, xs, ps):
qx = np.round(np.atleast_1d(x).astype(float) / q) * q
is_multiple = np.isclose(qx, x)
ix = np.round((qx - qlow) / q).astype(int)
is_inbounds = np.logical_and(ix >= 0, ix < len(ps))
oks = np.logical_and(is_multiple, is_inbounds)
rval = np.zeros_like(qx)
rval[oks] = np.asarray(ps)[ix[oks]]
if isinstance(x, np.ndarray):
return rval.reshape(x.shape)
return float(rval[0])
def qtable_logpmf(x, q, qlow, xs, ps):
p = qtable_pmf(np.atleast_1d(x), q, qlow, xs, ps)
# -- this if/else avoids np warning about underflow
rval = np.zeros_like(p)
rval[p == 0] = -np.inf
rval[p != 0] = np.log(p[p != 0])
if isinstance(x, np.ndarray):
return rval
return float(rval[0])
|
lognorm_gen
|
python
|
scrapy__scrapy
|
tests/CrawlerRunner/explicit_default_reactor.py
|
{
"start": 157,
"end": 544
}
|
class ____(Spider):
name = "no_request"
custom_settings = {
"TWISTED_REACTOR": None,
}
async def start(self):
return
yield
def main(reactor):
configure_logging(
{"LOG_FORMAT": "%(levelname)s: %(message)s", "LOG_LEVEL": "DEBUG"}
)
runner = CrawlerRunner()
return runner.crawl(NoRequestsSpider)
react(main)
|
NoRequestsSpider
|
python
|
mlflow__mlflow
|
mlflow/telemetry/events.py
|
{
"start": 6088,
"end": 6353
}
|
class ____(Event):
name: str = "create_webhook"
@classmethod
def parse(cls, arguments: dict[str, Any]) -> dict[str, Any] | None:
events = arguments.get("events") or []
return {"events": [str(event) for event in events]}
|
CreateWebhookEvent
|
python
|
great-expectations__great_expectations
|
great_expectations/experimental/metric_repository/metrics.py
|
{
"start": 7012,
"end": 7259
}
|
class ____(MetricRepositoryBaseModel):
"""Collection of Metric objects produced during the same execution run."""
data_asset_id: Union[uuid.UUID, None] = Field(description="Data asset id", default=None)
metrics: Sequence[Metric]
|
MetricRun
|
python
|
doocs__leetcode
|
solution/0400-0499/0448.Find All Numbers Disappeared in an Array/Solution.py
|
{
"start": 0,
"end": 172
}
|
class ____:
def findDisappearedNumbers(self, nums: List[int]) -> List[int]:
s = set(nums)
return [x for x in range(1, len(nums) + 1) if x not in s]
|
Solution
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builder_test/packages/builder_and_mixins/package.py
|
{
"start": 814,
"end": 929
}
|
class ____(BuilderMixin, generic.GenericBuilder):
def install(self, pkg, spec, prefix):
pass
|
GenericBuilder
|
python
|
numba__numba
|
numba/core/serialize.py
|
{
"start": 5406,
"end": 6284
}
|
class ____:
"""Wrap a callable object to be pickled by path to workaround limitation
in pickling due to non-pickleable objects in function non-locals.
Note:
- Do not use this as a decorator.
- Wrapped object must be a global that exist in its parent module and it
can be imported by `from the_module import the_object`.
Usage:
>>> def my_fn(x):
>>> ...
>>> wrapped_fn = PickleCallableByPath(my_fn)
>>> # refer to `wrapped_fn` instead of `my_fn`
"""
def __init__(self, fn):
self._fn = fn
def __call__(self, *args, **kwargs):
return self._fn(*args, **kwargs)
def __reduce__(self):
return type(self)._rebuild, (self._fn.__module__, self._fn.__name__,)
@classmethod
def _rebuild(cls, modname, fn_path):
return cls(getattr(sys.modules[modname], fn_path))
|
PickleCallableByPath
|
python
|
django__django
|
tests/admin_filters/tests.py
|
{
"start": 851,
"end": 1495
}
|
class ____(SimpleListFilter):
def lookups(self, request, model_admin):
return (
("the 80s", "the 1980's"),
("the 90s", "the 1990's"),
("the 00s", "the 2000's"),
("other", "other decades"),
)
def queryset(self, request, queryset):
decade = self.value()
if decade == "the 80s":
return queryset.filter(year__gte=1980, year__lte=1989)
if decade == "the 90s":
return queryset.filter(year__gte=1990, year__lte=1999)
if decade == "the 00s":
return queryset.filter(year__gte=2000, year__lte=2009)
|
DecadeListFilter
|
python
|
ray-project__ray
|
rllib/offline/estimators/tests/test_ope.py
|
{
"start": 6409,
"end": 11234
}
|
class ____(unittest.TestCase):
"""Compilation and learning tests for the Fitted-Q Evaluation model"""
@classmethod
def setUpClass(cls) -> None:
ray.init()
env = CliffWalkingWallEnv()
cls.policy = CliffWalkingWallPolicy(
observation_space=env.observation_space,
action_space=env.action_space,
config={},
)
cls.gamma = 0.99
# Collect single episode under optimal policy
obs_batch = []
new_obs = []
actions = []
action_prob = []
rewards = []
terminateds = []
truncateds = []
obs, info = env.reset()
terminated = truncated = False
while not terminated and not truncated:
obs_batch.append(obs)
act, _, extra = cls.policy.compute_single_action(obs)
actions.append(act)
action_prob.append(extra["action_prob"])
obs, rew, terminated, truncated, _ = env.step(act)
new_obs.append(obs)
rewards.append(rew)
terminateds.append(terminated)
truncateds.append(truncated)
cls.batch = SampleBatch(
obs=obs_batch,
actions=actions,
action_prob=action_prob,
rewards=rewards,
terminateds=terminateds,
truncateds=truncateds,
new_obs=new_obs,
)
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_fqe_compilation_and_stopping(self):
"""Compilation tests for FQETorchModel.
(1) Check that it does not modify the underlying batch during training
(2) Check that the stopping criteria from FQE are working correctly
(3) Check that using fqe._compute_action_probs equals brute force
iterating over all actions with policy.compute_log_likelihoods
"""
fqe = FQETorchModel(
policy=self.policy,
gamma=self.gamma,
)
tmp_batch = copy.deepcopy(self.batch)
losses = fqe.train(self.batch)
# Make sure FQETorchModel.train() does not modify the batch
check(tmp_batch, self.batch)
# Make sure FQE stopping criteria are respected
assert len(losses) == fqe.n_iters or losses[-1] < fqe.min_loss_threshold, (
f"FQE.train() terminated early in {len(losses)} steps with final loss"
f"{losses[-1]} for n_iters: {fqe.n_iters} and "
f"min_loss_threshold: {fqe.min_loss_threshold}"
)
# Test fqe._compute_action_probs against "brute force" method
# of computing log_prob for each possible action individually
# using policy.compute_log_likelihoods
obs = torch.tensor(self.batch["obs"], device=fqe.device)
action_probs = fqe._compute_action_probs(obs)
action_probs = convert_to_numpy(action_probs)
tmp_probs = []
for act in range(fqe.policy.action_space.n):
tmp_actions = np.zeros_like(self.batch["actions"]) + act
log_probs = self.policy.compute_log_likelihoods(
actions=tmp_actions,
obs_batch=self.batch["obs"],
)
tmp_probs.append(np.exp(log_probs))
tmp_probs = np.stack(tmp_probs).T
check(action_probs, tmp_probs, decimals=3)
def test_fqe_optimal_convergence(self):
"""Test that FQE converges to the true Q-values for an optimal trajectory
self.batch is deterministic since it is collected under a CliffWalkingWallPolicy
with epsilon = 0.0; check that FQE converges to the true Q-values for self.batch
"""
# If self.batch["rewards"] =
# [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 10],
# and gamma = 0.99, the discounted returns i.e. optimal Q-values are as follows:
q_values = np.zeros(len(self.batch["rewards"]), dtype=float)
q_values[-1] = self.batch["rewards"][-1]
for t in range(len(self.batch["rewards"]) - 2, -1, -1):
q_values[t] = self.batch["rewards"][t] + self.gamma * q_values[t + 1]
print(q_values)
q_model_config = {
"polyak_coef": 1.0,
"model_config": {
"fcnet_hiddens": [],
"activation": "linear",
},
"lr": 0.01,
"n_iters": 5000,
}
fqe = FQETorchModel(
policy=self.policy,
gamma=self.gamma,
**q_model_config,
)
losses = fqe.train(self.batch)
print(losses[-10:])
estimates = fqe.estimate_v(self.batch)
print(estimates)
check(estimates, q_values, decimals=1)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
TestFQE
|
python
|
apache__airflow
|
providers/slack/tests/unit/slack/hooks/test_slack.py
|
{
"start": 22745,
"end": 24208
}
|
class ____:
@pytest.fixture
def mock_get_conn(self):
with mock.patch(
"airflow.providers.slack.hooks.slack.get_async_connection", new_callable=mock.AsyncMock
) as m:
m.return_value = Connection(
conn_id=SLACK_API_DEFAULT_CONN_ID,
conn_type=CONN_TYPE,
password=MOCK_SLACK_API_TOKEN,
)
yield m
@pytest.mark.asyncio
@mock.patch("airflow.providers.slack.hooks.slack.AsyncWebClient")
async def test_get_async_client(self, mock_client, mock_get_conn):
"""Test get_async_client creates AsyncWebClient with correct params."""
hook = SlackHook(slack_conn_id=SLACK_API_DEFAULT_CONN_ID)
await hook.get_async_client()
mock_get_conn.assert_called()
mock_client.assert_called_once_with(token=MOCK_SLACK_API_TOKEN, logger=mock.ANY)
@pytest.mark.asyncio
@mock.patch("airflow.providers.slack.hooks.slack.AsyncWebClient.api_call", new_callable=mock.AsyncMock)
async def test_async_call(self, mock_api_call, mock_get_conn):
"""Test async_call is called correctly."""
hook = SlackHook(slack_conn_id=SLACK_API_DEFAULT_CONN_ID)
test_api_json = {"channel": "test_channel"}
await hook.async_call("chat.postMessage", json=test_api_json)
mock_get_conn.assert_called()
mock_api_call.assert_called_with("chat.postMessage", json=test_api_json)
|
TestSlackHookAsync
|
python
|
joke2k__faker
|
faker/providers/person/sw/__init__.py
|
{
"start": 44,
"end": 8089
}
|
class ____(PersonProvider):
"""
A Faker provider for generating fake Swahili.
"""
formats = (
"{{first_name_male}} {{last_name_male}}",
"{{first_name_male}} {{last_name_male}}",
"{{first_name_male}} {{last_name_male}}",
"{{first_name_male}} {{last_name_male}}",
"{{first_name_male}} {{last_name_male}} {{last_name_male}}",
"{{first_name_female}} {{last_name_female}}",
"{{first_name_female}} {{last_name_female}}",
"{{first_name_female}} {{last_name_female}}",
"{{first_name_female}} {{last_name_female}}",
"{{first_name_female}} {{last_name_female}} {{last_name_female}}",
"{{prefix_male}} {{first_name_male}} {{last_name_male}}",
"{{prefix_female}} {{first_name_female}} {{last_name_female}}",
"{{prefix_male}} {{first_name_male}} {{last_name_male}}",
"{{prefix_female}} {{first_name_female}} {{last_name_female}}",
)
# first names sourced from:
# 1. https://www.behindthename.com/submit/names/gender/masculine/usage/swahili
# 2. https://github.com/faker-js/faker/blob/next/src/locales/yo_NG/person/male_first_name.ts
first_names_male = (
"Abdu",
"Aijuka",
"Amri",
"Andwele",
"Angalia",
"Angavu",
"Anoni",
"Asani",
"Asanti",
"Athumani",
"Azizi",
"Bahari",
"Bale",
"Balinda",
"Beshte",
"Bibuwa",
"Boma",
"Cheusi",
"Chuki",
"Dai",
"Daudi",
"Duma",
"Dunia",
"Ëakumbu",
"Ekundu",
"Eliakimu",
"Enzi",
"Evance",
"Fahari",
"Fanaka",
"Faraja",
"Hadithi",
"Hamis",
"Harambee",
"Hekima",
"Isaya",
"Issack",
"Ituri",
"Jalia",
"Jangwa",
"Jelani",
"Jua",
"Jumaane",
"Justiniani",
"Kaombwe",
"Kashangaki",
"Kenyangi",
"Khamani",
"Khamisi",
"Kiapo",
"Kiburi",
"Kijana",
"Kijani",
"Kimbilio",
"Kinubi",
"Kipenzi",
"Kiume",
"Kondo",
"Konradi",
"Kovu",
"Kurunzi",
"Kusiima",
"Makini",
"Makunga",
"Makuu",
"Matunda",
"Mavuno",
"Mohamedi",
"Mulele",
"Mwezi",
"Ngamia",
"Ngeni",
"Ntimi",
"Nuhu",
"Nuriat",
"Nwabudike",
"Osogo",
"Pambe",
"Pelaji",
"Popobawa",
"Pumbaa",
"Rashidi",
"Reshoni",
"Risasi",
"Rua",
"Rubani",
"Ruhiu",
"Rungo",
"Sabari",
"Sadaka",
"Sadiki",
"Safari",
"Samweli",
"Seif",
"Shida",
"Sifa",
"Siku",
"Takatifu",
"Thabiti",
"Tisa",
"Tufani",
"Tukufu",
"Ushindi",
"Usiku",
"Uzima",
"Wamwema",
"Yakobo",
"Yohana",
"Yohane",
"Zahur",
"Zende",
"Zuba",
"Zuhri",
"Zwatie",
)
first_names_female = (
"Abigaili",
"Adhra",
"Adia",
"Adimu",
"Akumu",
"Almasi",
"Amani",
"Amondi",
"Anasa",
"Angalia",
"Arusi",
"Asali",
"Asanti",
"Asatira",
"Asmini",
"Atiena",
"Bahari",
"Boma",
"Busara",
"Chaniya",
"Chausiki",
"Chipukizi",
"Chuki",
"Dainess",
"Dalili",
"Enzi",
"Evance",
"Fahari",
"Faisa",
"Fanaka",
"Faraja",
"Farhiya",
"Farijika",
"Gethera",
"Goma",
"Haiba",
"Halisi",
"Hanja",
"Hashiki",
"Hatima",
"Hawehindi",
"Hekima",
"Hidaya",
"Hodari",
"Humaiya",
"Imany",
"Imara",
"Itanya",
"Jahi",
"Jana",
"Jasiri",
"Jina",
"Jua",
"Kaluwa",
"Kaombwe",
"Karama",
"Kaskazi",
"Kiah",
"Kibafupia",
"Kibibi",
"Kiburi",
"Kijana",
"Kimya",
"Kinaya",
"Kiojah",
"Kipenzi",
"Kipepeo",
"Kisima",
"Kiwara",
"Kuchanua",
"Kweli",
"Lailati",
"Laini",
"Madaha",
"Madini",
"Madoa",
"Mahali",
"Maisha",
"Majani",
"Makini",
"Maliza",
"Marini",
"Marjani",
"Matunda",
"Maua",
"Misuli",
"Mkarkara",
"Mrihani",
"Muhima",
"Musila",
"Mwamini",
"Mwasaa",
"Najuma",
"Naki",
"Nashipie",
"Nasra",
"Nathari",
"Nayfa",
"Nelah",
"Niara",
"Nigesa",
"Njozi",
"Nula",
"Nyasi",
"Nyoka",
"Nyoni",
"Nyota",
"Nyuki",
"Opwonya",
"Panya",
"Paskalia",
"Reshoni",
"Rua",
"Sabari",
"Sadao",
"Safari",
"Safiri",
"Sarabi",
"Sarafina",
"Sauti",
"Serafina",
"Shani",
"Shawana",
"Shida",
"Sifa",
"Siku",
"Skolastika",
"Sungara",
"Swala",
"Tambika",
"Tamu",
"Ta-tanisha",
"Tisa",
"Tuere",
"Tufani",
"Udeera",
"Ujamaa",
"Umande",
"Umoja",
"Uzima",
"Waceera",
"Wamwema",
"Waridi",
"Waseme",
"Yasinta",
"Zahnya",
"Zaituni",
"Zumaridi",
"Zuwena",
)
first_names = first_names_male + first_names_female
# last names sourced from :
# 1.https://www.familyeducation.com/baby-names/surname/origin/kenyan
last_names_male = (
"Abwao",
"Adamu",
"Baharia",
"Dhadho",
"Fuli",
"Hassani",
"Juma",
"Kahinu",
"Kimachu",
"Kitumaini",
"Madhubuti",
"Magombo",
"Mathenge",
"Msuya",
"Naomi",
"Nazari",
"Rikke",
"Sayyid",
"Simba",
"Sinema",
"Wario",
"Yudas",
"Abdi",
"Ali",
"Akinyi",
"Anyango",
"Juma",
"Kamau",
"Kibet",
"Kimani",
"Maina",
"Mwangi",
"Obama",
"Ochieng",
"Onyango",
"Otieno",
"Mohamed",
"Hassan",
"Wafula",
"Wanjala",
"Atieno",
"Kariuki",
"Kimutai",
"Kipkorir",
"Kipkirui",
"Kipkemei",
"Kiplagat",
"Kiprono",
"Kipsang",
"Kiptoo",
"Kipruto",
"Mumbi",
"Muthoni",
"Njeri",
"Njoroge",
"Odhiambo",
"Omondi",
"Owuor",
"Wanijiku",
"Wambui",
"Abdullahi",
"Adan",
"Ahmed",
"Auma",
"Barasa",
"Hussein",
"Ibrahim",
"John",
"Mutai",
"Omar",
"Ouma",
"Waweru",
)
# last names are not sex dependant
last_names_female = last_names_male
last_names = last_names_male + last_names_female
prefixes_female = (
"Mrs.",
"Ms.",
"Dr.",
"Bi.",
"Mama",
"Bibi",
"Madam",
"Chief",
"Dkt.",
"Mheshimiwa",
"Mwalimu",
"Mtukufu",
"Malkia",
"Mwanamke",
)
prefixes_male = (
"Mr.",
"Dr.",
"Bwana",
"Mzee",
"Bw.",
"Dkt.",
"Mheshimiwa",
"Mwalimu",
"Mtukufu",
"Mfalme",
)
|
Provider
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-boarddocs/llama_index/readers/boarddocs/base.py
|
{
"start": 261,
"end": 4337
}
|
class ____(BaseReader):
"""
BoardDocs doc reader.
Read public agendas included on a BoardDocs site.
Args:
site (str): The BoardDocs site you'd like to index, e.g. "ca/redwood"
committee_id (str): The committee on the site you want to index
"""
def __init__(
self,
site: str,
committee_id: str,
) -> None:
"""Initialize with parameters."""
self.site = site
self.committee_id = committee_id
self.base_url = "https://go.boarddocs.com/" + site + "/Board.nsf"
# set up the headers required for the server to answer
self.headers = {
"accept": "application/json, text/javascript, */*; q=0.01",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
"sec-ch-ua": (
'"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"'
),
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"macOS"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"x-requested-with": "XMLHttpRequest",
}
super().__init__()
def get_meeting_list(self) -> List[dict]:
"""
Returns a list of meetings for the committee.
Args:
None
Returns:
List[dict]: A list of meetings, each with a meetingID, date, and unid
"""
meeting_list_url = self.base_url + "/BD-GetMeetingsList?open"
data = "current_committee_id=" + self.committee_id
response = requests.post(meeting_list_url, headers=self.headers, data=data)
meetingsData = json.loads(response.text)
return [
{
"meetingID": meeting.get("unique", None),
"date": meeting.get("numberdate", None),
"unid": meeting.get("unid", None),
}
for meeting in meetingsData
]
def process_meeting(
self, meeting_id: str, index_pdfs: bool = True
) -> List[Document]:
"""
Returns documents from the given meeting.
"""
agenda_url = self.base_url + "/PRINT-AgendaDetailed"
# set the meetingID & committee
data = "id=" + meeting_id + "&" + "current_committee_id=" + self.committee_id
# POST the request!
response = requests.post(agenda_url, headers=self.headers, data=data)
# parse the returned HTML
soup = BeautifulSoup(response.content, "html.parser")
agenda_date = soup.find("div", {"class": "print-meeting-date"}).string
agenda_title = soup.find("div", {"class": "print-meeting-name"}).string
[fd.a.get("href") for fd in soup.find_all("div", {"class": "public-file"})]
agenda_data = html2text.html2text(response.text)
# TODO: index the linked PDFs in agenda_files!
docs = []
agenda_doc = Document(
text=agenda_data,
doc_id=meeting_id,
extra_info={
"committee": self.committee_id,
"title": agenda_title,
"date": agenda_date,
"url": agenda_url,
},
)
docs.append(agenda_doc)
return docs
def load_data(
self, meeting_ids: Optional[List[str]] = None, **load_kwargs: Any
) -> List[Document]:
"""
Load all meetings of the committee.
Args:
meeting_ids (List[str]): A list of meeting IDs to load. If None, load all meetings.
"""
# if a list of meetings wasn't provided, enumerate them all
if not meeting_ids:
meeting_ids = [
meeting.get("meetingID") for meeting in self.get_meeting_list()
]
# process all relevant meetings & return the documents
docs = []
for meeting_id in meeting_ids:
docs.extend(self.process_meeting(meeting_id))
return docs
|
BoardDocsReader
|
python
|
great-expectations__great_expectations
|
tests/datasource/fluent/data_asset/test_data_asset.py
|
{
"start": 15956,
"end": 16641
}
|
class ____:
def test_get_returns_id(
self,
unset_gx_env_variables: None,
data_context: AbstractDataContext,
) -> None:
# arrange
resource_name = random_name()
ds = data_context.data_sources.add_pandas(
name=resource_name,
)
asset = ds.add_dataframe_asset(name=resource_name)
batch_def = asset.add_batch_definition_whole_dataframe(name=resource_name)
# act
refetched_batch_def = asset.get_batch_definition(resource_name)
# assert
assert batch_def.id
assert refetched_batch_def.id
assert batch_def.id == refetched_batch_def.id
|
TestGetBatchDefinition
|
python
|
pyinstaller__pyinstaller
|
PyInstaller/building/datastruct.py
|
{
"start": 5226,
"end": 7648
}
|
class ____:
invcnum = 0
def __init__(self):
from PyInstaller.config import CONF
# Get a (per class) unique number to avoid conflicts between toc objects
self.invcnum = self.__class__.invcnum
self.__class__.invcnum += 1
self.tocfilename = os.path.join(CONF['workpath'], '%s-%02d.toc' % (self.__class__.__name__, self.invcnum))
self.tocbasename = os.path.basename(self.tocfilename)
self.dependencies = []
def __postinit__(self):
"""
Check if the target need to be rebuild and if so, re-assemble.
`__postinit__` is to be called at the end of `__init__` of every subclass of Target. `__init__` is meant to
setup the parameters and `__postinit__` is checking if rebuild is required and in case calls `assemble()`
"""
logger.info("checking %s", self.__class__.__name__)
data = None
last_build = misc.mtime(self.tocfilename)
if last_build == 0:
logger.info("Building %s because %s is non existent", self.__class__.__name__, self.tocbasename)
else:
try:
data = misc.load_py_data_struct(self.tocfilename)
except Exception:
logger.info("Building because %s is bad", self.tocbasename)
else:
# create a dict for easier access
data = dict(zip((g[0] for g in self._GUTS), data))
# assemble if previous data was not found or is outdated
if not data or self._check_guts(data, last_build):
self.assemble()
self._save_guts()
_GUTS = []
def _check_guts(self, data, last_build):
"""
Returns True if rebuild/assemble is required.
"""
if len(data) != len(self._GUTS):
logger.info("Building because %s is bad", self.tocbasename)
return True
for attr, func in self._GUTS:
if func is None:
# no check for this value
continue
if func(attr, data[attr], getattr(self, attr), last_build):
return True
return False
def _save_guts(self):
"""
Save the input parameters and the work-product of this run to maybe avoid regenerating it later.
"""
data = tuple(getattr(self, g[0]) for g in self._GUTS)
misc.save_py_data_struct(self.tocfilename, data)
|
Target
|
python
|
ansible__ansible
|
test/sanity/code-smell/required-and-default-attributes.py
|
{
"start": 75,
"end": 768
}
|
class ____(ast.NodeVisitor):
def __init__(self, path: str) -> None:
self.path = path
def visit_Call(self, node: ast.Call) -> None:
if isinstance(node.func, ast.Name) and node.func.id.endswith("FieldAttribute"):
if len([kw for kw in node.keywords if kw.arg in ("default", "required")]) > 1:
print(f"{self.path}:{node.lineno}:{node.col_offset}: use only one of `default` or `required` with `{node.func.id}`")
def main() -> None:
for path in sys.argv[1:] or sys.stdin.read().splitlines():
tree = ast.parse(pathlib.Path(path).read_text(), path)
CallVisitor(path).visit(tree)
if __name__ == "__main__":
main()
|
CallVisitor
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/spec.py
|
{
"start": 3606,
"end": 7064
}
|
class ____(BaseModel):
"""Config for custom insights"""
class Config:
use_enum_values = True
name: str = Field(
title="Name",
description="The name value of insight",
)
level: str = Field(
title="Level",
description="Chosen level for API",
default="ad",
enum=["ad", "adset", "campaign", "account"],
)
fields: Optional[List[ValidFields]] = Field(
title="Fields",
description="A list of chosen fields for fields parameter",
default=[],
)
breakdowns: Optional[List[ValidBreakdowns]] = Field(
title="Breakdowns",
description="A list of chosen breakdowns for breakdowns",
default=[],
)
action_breakdowns: Optional[List[ValidActionBreakdowns]] = Field(
title="Action Breakdowns",
description="A list of chosen action_breakdowns for action_breakdowns",
default=[],
)
action_report_time: str = Field(
title="Action Report Time",
description=(
"Determines the report time of action stats. For example, if a person saw the ad on Jan 1st "
"but converted on Jan 2nd, when you query the API with action_report_time=impression, you see a conversion on Jan 1st. "
"When you query the API with action_report_time=conversion, you see a conversion on Jan 2nd."
),
default="mixed",
airbyte_hidden=True,
enum=["conversion", "impression", "mixed"],
)
time_increment: Optional[PositiveInt] = Field(
title="Time Increment",
description=(
"Time window in days by which to aggregate statistics. The sync will be chunked into N day intervals, where N is the number of days you specified. "
"For example, if you set this value to 7, then all statistics will be reported as 7-day aggregates by starting from the start_date. If the start and end dates are October 1st and October 30th, then the connector will output 5 records: 01 - 06, 07 - 13, 14 - 20, 21 - 27, and 28 - 30 (3 days only). "
"The minimum allowed value for this field is 1, and the maximum is 89."
),
maximum=89,
minimum=1,
default=1,
)
start_date: Optional[datetime] = Field(
title="Start Date",
description="The date from which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z.",
pattern=DATE_TIME_PATTERN,
examples=["2017-01-25T00:00:00Z"],
)
end_date: Optional[datetime] = Field(
title="End Date",
description=(
"The date until which you'd like to replicate data for this stream, in the format YYYY-MM-DDT00:00:00Z. "
"All data generated between the start date and this end date will be replicated. "
"Not setting this option will result in always syncing the latest data."
),
pattern=DATE_TIME_PATTERN,
examples=["2017-01-26T00:00:00Z"],
)
insights_lookback_window: Optional[PositiveInt] = Field(
title="Custom Insights Lookback Window",
description="The attribution window",
maximum=28,
mininum=1,
default=28,
)
insights_job_timeout: Optional[PositiveInt] = Field(
title="Custom Insights Job Timeout",
description="The insights job timeout",
maximum=60,
mininum=10,
default=60,
)
|
InsightConfig
|
python
|
Textualize__textual
|
tests/test_markdownviewer.py
|
{
"start": 1431,
"end": 3272
}
|
class ____(App[None]):
def __init__(self, markdown_string: str) -> None:
self.markdown_string = markdown_string
super().__init__()
def compose(self) -> ComposeResult:
yield MarkdownViewer(self.markdown_string, open_links=False)
async def on_mount(self) -> None:
self.query_one(MarkdownViewer).show_table_of_contents = False
@pytest.mark.parametrize("link", [0, 1])
async def test_markdown_string_viewer_anchor_link(link: int) -> None:
"""Test https://github.com/Textualize/textual/issues/3094
Also https://github.com/Textualize/textual/pull/3244#issuecomment-1710278718."""
async with MarkdownStringViewerApp(
TEST_MARKDOWN.replace("{{file}}", "")
).run_test() as pilot:
# There's not really anything to test *for* here, but the lack of an
# exception is the win (before the fix this is testing it would have
# been FileNotFoundError).
await pilot.click(Markdown, Offset(2, link))
@pytest.mark.parametrize("text", ["Hey [[/test]]", "[i]Hey there[/i]"])
async def test_headings_that_look_like_they_contain_markup(text: str) -> None:
"""Regression test for https://github.com/Textualize/textual/issues/3689.
Things that look like markup are escaped in markdown headings in the table of contents.
"""
document = f"# {text}"
async with MarkdownStringViewerApp(document).run_test() as pilot:
await pilot.pause()
assert pilot.app.query_one(MD.MarkdownH1)._content == Content(text)
toc_tree = pilot.app.query_one(MD.MarkdownTableOfContents).query_one(Tree)
# The toc label looks like "I {text}" but the I is styled so we drop it.
toc_label = toc_tree.root.children[0].label
_, text_label = toc_label.divide([2])
assert text_label == Text(text)
|
MarkdownStringViewerApp
|
python
|
doocs__leetcode
|
solution/1700-1799/1727.Largest Submatrix With Rearrangements/Solution.py
|
{
"start": 0,
"end": 431
}
|
class ____:
def largestSubmatrix(self, matrix: List[List[int]]) -> int:
for i in range(1, len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j]:
matrix[i][j] = matrix[i - 1][j] + 1
ans = 0
for row in matrix:
row.sort(reverse=True)
for j, v in enumerate(row, 1):
ans = max(ans, j * v)
return ans
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/ernie4_5_moe/modeling_ernie4_5_moe.py
|
{
"start": 2883,
"end": 3668
}
|
class ____(nn.Module):
def __init__(self, config, intermediate_size=None):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
|
Ernie4_5_MoeMLP
|
python
|
donnemartin__interactive-coding-challenges
|
linked_lists/remove_duplicates/test_remove_duplicates.py
|
{
"start": 18,
"end": 1185
}
|
class ____(unittest.TestCase):
def test_remove_dupes(self, linked_list):
print('Test: Empty list')
linked_list.remove_dupes()
self.assertEqual(linked_list.get_all_data(), [])
print('Test: One element list')
linked_list.insert_to_front(2)
linked_list.remove_dupes()
self.assertEqual(linked_list.get_all_data(), [2])
print('Test: General case, duplicates')
linked_list.insert_to_front(1)
linked_list.insert_to_front(1)
linked_list.insert_to_front(3)
linked_list.insert_to_front(2)
linked_list.insert_to_front(3)
linked_list.insert_to_front(1)
linked_list.insert_to_front(1)
linked_list.remove_dupes()
self.assertEqual(linked_list.get_all_data(), [1, 3, 2])
print('Test: General case, no duplicates')
linked_list.remove_dupes()
self.assertEqual(linked_list.get_all_data(), [1, 3, 2])
print('Success: test_remove_dupes\n')
def main():
test = TestRemoveDupes()
linked_list = MyLinkedList(None)
test.test_remove_dupes(linked_list)
if __name__ == '__main__':
main()
|
TestRemoveDupes
|
python
|
pyca__cryptography
|
src/cryptography/utils.py
|
{
"start": 1994,
"end": 2189
}
|
class ____:
def __init__(self, value: object, message: str, warning_class):
self.value = value
self.message = message
self.warning_class = warning_class
|
_DeprecatedValue
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/tracers/log_stream.py
|
{
"start": 6438,
"end": 25425
}
|
class ____(BaseTracer, _StreamingCallbackHandler):
"""Tracer that streams run logs to a stream."""
def __init__(
self,
*,
auto_close: bool = True,
include_names: Sequence[str] | None = None,
include_types: Sequence[str] | None = None,
include_tags: Sequence[str] | None = None,
exclude_names: Sequence[str] | None = None,
exclude_types: Sequence[str] | None = None,
exclude_tags: Sequence[str] | None = None,
# Schema format is for internal use only.
_schema_format: Literal["original", "streaming_events"] = "streaming_events",
) -> None:
"""A tracer that streams run logs to a stream.
Args:
auto_close: Whether to close the stream when the root run finishes.
include_names: Only include runs from Runnables with matching names.
include_types: Only include runs from Runnables with matching types.
include_tags: Only include runs from Runnables with matching tags.
exclude_names: Exclude runs from Runnables with matching names.
exclude_types: Exclude runs from Runnables with matching types.
exclude_tags: Exclude runs from Runnables with matching tags.
_schema_format: Primarily changes how the inputs and outputs are
handled.
**For internal use only. This API will change.**
- 'original' is the format used by all current tracers.
This format is slightly inconsistent with respect to inputs
and outputs.
- 'streaming_events' is used for supporting streaming events,
for internal usage. It will likely change in the future, or
be deprecated entirely in favor of a dedicated async tracer
for streaming events.
Raises:
ValueError: If an invalid schema format is provided (internal use only).
"""
if _schema_format not in {"original", "streaming_events"}:
msg = (
f"Invalid schema format: {_schema_format}. "
f"Expected one of 'original', 'streaming_events'."
)
raise ValueError(msg)
super().__init__(_schema_format=_schema_format)
self.auto_close = auto_close
self.include_names = include_names
self.include_types = include_types
self.include_tags = include_tags
self.exclude_names = exclude_names
self.exclude_types = exclude_types
self.exclude_tags = exclude_tags
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
memory_stream = _MemoryStream[RunLogPatch](loop)
self.lock = threading.Lock()
self.send_stream = memory_stream.get_send_stream()
self.receive_stream = memory_stream.get_receive_stream()
self._key_map_by_run_id: dict[UUID, str] = {}
self._counter_map_by_name: dict[str, int] = defaultdict(int)
self.root_id: UUID | None = None
def __aiter__(self) -> AsyncIterator[RunLogPatch]:
"""Iterate over the stream of run logs.
Returns:
An async iterator over the run log patches.
"""
return self.receive_stream.__aiter__()
def send(self, *ops: dict[str, Any]) -> bool:
"""Send a patch to the stream, return False if the stream is closed.
Args:
*ops: The operations to send to the stream.
Returns:
`True` if the patch was sent successfully, False if the stream is closed.
"""
# We will likely want to wrap this in try / except at some point
# to handle exceptions that might arise at run time.
# For now we'll let the exception bubble up, and always return
# True on the happy path.
self.send_stream.send_nowait(RunLogPatch(*ops))
return True
async def tap_output_aiter(
self, run_id: UUID, output: AsyncIterator[T]
) -> AsyncIterator[T]:
"""Tap an output async iterator to stream its values to the log.
Args:
run_id: The ID of the run.
output: The output async iterator.
Yields:
The output value.
"""
async for chunk in output:
# root run is handled in .astream_log()
# if we can't find the run silently ignore
# eg. because this run wasn't included in the log
if (
run_id != self.root_id
and (key := self._key_map_by_run_id.get(run_id))
and (
not self.send(
{
"op": "add",
"path": f"/logs/{key}/streamed_output/-",
"value": chunk,
}
)
)
):
break
yield chunk
def tap_output_iter(self, run_id: UUID, output: Iterator[T]) -> Iterator[T]:
"""Tap an output async iterator to stream its values to the log.
Args:
run_id: The ID of the run.
output: The output iterator.
Yields:
The output value.
"""
for chunk in output:
# root run is handled in .astream_log()
# if we can't find the run silently ignore
# eg. because this run wasn't included in the log
if (
run_id != self.root_id
and (key := self._key_map_by_run_id.get(run_id))
and (
not self.send(
{
"op": "add",
"path": f"/logs/{key}/streamed_output/-",
"value": chunk,
}
)
)
):
break
yield chunk
def include_run(self, run: Run) -> bool:
"""Check if a Run should be included in the log.
Args:
run: The Run to check.
Returns:
`True` if the run should be included, `False` otherwise.
"""
if run.id == self.root_id:
return False
run_tags = run.tags or []
if (
self.include_names is None
and self.include_types is None
and self.include_tags is None
):
include = True
else:
include = False
if self.include_names is not None:
include = include or run.name in self.include_names
if self.include_types is not None:
include = include or run.run_type in self.include_types
if self.include_tags is not None:
include = include or any(tag in self.include_tags for tag in run_tags)
if self.exclude_names is not None:
include = include and run.name not in self.exclude_names
if self.exclude_types is not None:
include = include and run.run_type not in self.exclude_types
if self.exclude_tags is not None:
include = include and all(tag not in self.exclude_tags for tag in run_tags)
return include
def _persist_run(self, run: Run) -> None:
# This is a legacy method only called once for an entire run tree
# therefore not useful here
pass
def _on_run_create(self, run: Run) -> None:
"""Start a run."""
if self.root_id is None:
self.root_id = run.id
if not self.send(
{
"op": "replace",
"path": "",
"value": RunState(
id=str(run.id),
streamed_output=[],
final_output=None,
logs={},
name=run.name,
type=run.run_type,
),
}
):
return
if not self.include_run(run):
return
# Determine previous index, increment by 1
with self.lock:
self._counter_map_by_name[run.name] += 1
count = self._counter_map_by_name[run.name]
self._key_map_by_run_id[run.id] = (
run.name if count == 1 else f"{run.name}:{count}"
)
entry = LogEntry(
id=str(run.id),
name=run.name,
type=run.run_type,
tags=run.tags or [],
metadata=(run.extra or {}).get("metadata", {}),
start_time=run.start_time.isoformat(timespec="milliseconds"),
streamed_output=[],
streamed_output_str=[],
final_output=None,
end_time=None,
)
if self._schema_format == "streaming_events":
# If using streaming events let's add inputs as well
entry["inputs"] = _get_standardized_inputs(run, self._schema_format)
# Add the run to the stream
self.send(
{
"op": "add",
"path": f"/logs/{self._key_map_by_run_id[run.id]}",
"value": entry,
}
)
def _on_run_update(self, run: Run) -> None:
"""Finish a run."""
try:
index = self._key_map_by_run_id.get(run.id)
if index is None:
return
ops = []
if self._schema_format == "streaming_events":
ops.append(
{
"op": "replace",
"path": f"/logs/{index}/inputs",
"value": _get_standardized_inputs(run, self._schema_format),
}
)
ops.extend(
[
# Replace 'inputs' with final inputs
# This is needed because in many cases the inputs are not
# known until after the run is finished and the entire
# input stream has been processed by the runnable.
{
"op": "add",
"path": f"/logs/{index}/final_output",
# to undo the dumpd done by some runnables / tracer / etc
"value": _get_standardized_outputs(run, self._schema_format),
},
{
"op": "add",
"path": f"/logs/{index}/end_time",
"value": run.end_time.isoformat(timespec="milliseconds")
if run.end_time is not None
else None,
},
]
)
self.send(*ops)
finally:
if run.id == self.root_id and self.auto_close:
self.send_stream.close()
def _on_llm_new_token(
self,
run: Run,
token: str,
chunk: GenerationChunk | ChatGenerationChunk | None,
) -> None:
"""Process new LLM token."""
index = self._key_map_by_run_id.get(run.id)
if index is None:
return
self.send(
{
"op": "add",
"path": f"/logs/{index}/streamed_output_str/-",
"value": token,
},
{
"op": "add",
"path": f"/logs/{index}/streamed_output/-",
"value": chunk.message
if isinstance(chunk, ChatGenerationChunk)
else token,
},
)
def _get_standardized_inputs(
run: Run, schema_format: Literal["original", "streaming_events"]
) -> dict[str, Any] | None:
"""Extract standardized inputs from a run.
Standardizes the inputs based on the type of the runnable used.
Args:
run: Run object
schema_format: The schema format to use.
Returns:
Valid inputs are only dict. By conventions, inputs always represented
invocation using named arguments.
None means that the input is not yet known!
"""
if schema_format == "original":
msg = (
"Do not assign inputs with original schema drop the key for now."
"When inputs are added to astream_log they should be added with "
"standardized schema for streaming events."
)
raise NotImplementedError(msg)
inputs = load(run.inputs)
if run.run_type in {"retriever", "llm", "chat_model"}:
return inputs
# new style chains
# These nest an additional 'input' key inside the 'inputs' to make sure
# the input is always a dict. We need to unpack and user the inner value.
inputs = inputs["input"]
# We should try to fix this in Runnables and callbacks/tracers
# Runnables should be using a None type here not a placeholder
# dict.
if inputs == {"input": ""}: # Workaround for Runnables not using None
# The input is not known, so we don't assign data['input']
return None
return inputs
def _get_standardized_outputs(
run: Run, schema_format: Literal["original", "streaming_events", "original+chat"]
) -> Any | None:
"""Extract standardized output from a run.
Standardizes the outputs based on the type of the runnable used.
Args:
run: the run object.
schema_format: The schema format to use.
Returns:
An output if returned, otherwise a None
"""
outputs = load(run.outputs)
if schema_format == "original":
if run.run_type == "prompt" and "output" in outputs:
# These were previously dumped before the tracer.
# Now we needn't do anything to them.
return outputs["output"]
# Return the old schema, without standardizing anything
return outputs
if run.run_type in {"retriever", "llm", "chat_model"}:
return outputs
if isinstance(outputs, dict):
return outputs.get("output", None)
return None
@overload
def _astream_log_implementation(
runnable: Runnable[Input, Output],
value: Any,
config: RunnableConfig | None = None,
*,
stream: LogStreamCallbackHandler,
diff: Literal[True] = True,
with_streamed_output_list: bool = True,
**kwargs: Any,
) -> AsyncIterator[RunLogPatch]: ...
@overload
def _astream_log_implementation(
runnable: Runnable[Input, Output],
value: Any,
config: RunnableConfig | None = None,
*,
stream: LogStreamCallbackHandler,
diff: Literal[False],
with_streamed_output_list: bool = True,
**kwargs: Any,
) -> AsyncIterator[RunLog]: ...
async def _astream_log_implementation(
runnable: Runnable[Input, Output],
value: Any,
config: RunnableConfig | None = None,
*,
stream: LogStreamCallbackHandler,
diff: bool = True,
with_streamed_output_list: bool = True,
**kwargs: Any,
) -> AsyncIterator[RunLogPatch] | AsyncIterator[RunLog]:
"""Implementation of astream_log for a given runnable.
The implementation has been factored out (at least temporarily) as both
astream_log and astream_events relies on it.
Args:
runnable: The runnable to run in streaming mode.
value: The input to the runnable.
config: The config to pass to the runnable.
stream: The stream to send the run logs to.
diff: Whether to yield run log patches (True) or full run logs (False).
with_streamed_output_list: Whether to include a list of all streamed
outputs in each patch. If `False`, only the final output will be included
in the patches.
**kwargs: Additional keyword arguments to pass to the runnable.
Raises:
ValueError: If the callbacks in the config are of an unexpected type.
Yields:
The run log patches or states, depending on the value of `diff`.
"""
# Assign the stream handler to the config
config = ensure_config(config)
callbacks = config.get("callbacks")
if callbacks is None:
config["callbacks"] = [stream]
elif isinstance(callbacks, list):
config["callbacks"] = [*callbacks, stream]
elif isinstance(callbacks, BaseCallbackManager):
callbacks = callbacks.copy()
callbacks.add_handler(stream, inherit=True)
config["callbacks"] = callbacks
else:
msg = (
f"Unexpected type for callbacks: {callbacks}."
"Expected None, list or AsyncCallbackManager."
)
raise ValueError(msg)
# Call the runnable in streaming mode,
# add each chunk to the output stream
async def consume_astream() -> None:
try:
prev_final_output: Output | None = None
final_output: Output | None = None
async for chunk in runnable.astream(value, config, **kwargs):
prev_final_output = final_output
if final_output is None:
final_output = chunk
else:
try:
final_output = final_output + chunk # type: ignore[operator]
except TypeError:
prev_final_output = None
final_output = chunk
patches: list[dict[str, Any]] = []
if with_streamed_output_list:
patches.append(
{
"op": "add",
"path": "/streamed_output/-",
# chunk cannot be shared between
# streamed_output and final_output
# otherwise jsonpatch.apply will
# modify both
"value": copy.deepcopy(chunk),
}
)
patches.extend(
{**op, "path": f"/final_output{op['path']}"}
for op in jsonpatch.JsonPatch.from_diff(
prev_final_output, final_output, dumps=dumps
)
)
await stream.send_stream.send(RunLogPatch(*patches))
finally:
await stream.send_stream.aclose()
# Start the runnable in a task, so we can start consuming output
task = asyncio.create_task(consume_astream())
try:
# Yield each chunk from the output stream
if diff:
async for log in stream:
yield log
else:
state = RunLog(state=None) # type: ignore[arg-type]
async for log in stream:
state += log
yield state
finally:
# Wait for the runnable to finish, if not cancelled (eg. by break)
with contextlib.suppress(asyncio.CancelledError):
await task
|
LogStreamCallbackHandler
|
python
|
pytorch__pytorch
|
torch/_inductor/template_heuristics/triton.py
|
{
"start": 1571,
"end": 1772
}
|
class ____(BaseConfig):
"""
Gemm configuration used for most backends (CPU, CUDA)
"""
group_m: int = 8
ConvConfig = BaseConfig
# FlexAttention Configs
@dataclasses.dataclass
|
GemmConfig
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/felix.py
|
{
"start": 454,
"end": 9408
}
|
class ____(RegexLexer):
"""
For `Felix <http://www.felix-lang.org>`_ source code.
.. versionadded:: 1.2
"""
name = 'Felix'
aliases = ['felix', 'flx']
filenames = ['*.flx', '*.flxh']
mimetypes = ['text/x-felix']
preproc = (
'elif', 'else', 'endif', 'if', 'ifdef', 'ifndef',
)
keywords = (
'_', '_deref', 'all', 'as',
'assert', 'attempt', 'call', 'callback', 'case', 'caseno', 'cclass',
'code', 'compound', 'ctypes', 'do', 'done', 'downto', 'elif', 'else',
'endattempt', 'endcase', 'endif', 'endmatch', 'enum', 'except',
'exceptions', 'expect', 'finally', 'for', 'forall', 'forget', 'fork',
'functor', 'goto', 'ident', 'if', 'incomplete', 'inherit', 'instance',
'interface', 'jump', 'lambda', 'loop', 'match', 'module', 'namespace',
'new', 'noexpand', 'nonterm', 'obj', 'of', 'open', 'parse', 'raise',
'regexp', 'reglex', 'regmatch', 'rename', 'return', 'the', 'then',
'to', 'type', 'typecase', 'typedef', 'typematch', 'typeof', 'upto',
'when', 'whilst', 'with', 'yield',
)
keyword_directives = (
'_gc_pointer', '_gc_type', 'body', 'comment', 'const', 'export',
'header', 'inline', 'lval', 'macro', 'noinline', 'noreturn',
'package', 'private', 'pod', 'property', 'public', 'publish',
'requires', 'todo', 'virtual', 'use',
)
keyword_declarations = (
'def', 'let', 'ref', 'val', 'var',
)
keyword_types = (
'unit', 'void', 'any', 'bool',
'byte', 'offset',
'address', 'caddress', 'cvaddress', 'vaddress',
'tiny', 'short', 'int', 'long', 'vlong',
'utiny', 'ushort', 'vshort', 'uint', 'ulong', 'uvlong',
'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float', 'double', 'ldouble',
'complex', 'dcomplex', 'lcomplex',
'imaginary', 'dimaginary', 'limaginary',
'char', 'wchar', 'uchar',
'charp', 'charcp', 'ucharp', 'ucharcp',
'string', 'wstring', 'ustring',
'cont',
'array', 'varray', 'list',
'lvalue', 'opt', 'slice',
)
keyword_constants = (
'false', 'true',
)
operator_words = (
'and', 'not', 'in', 'is', 'isin', 'or', 'xor',
)
name_builtins = (
'_svc', 'while',
)
name_pseudo = (
'root', 'self', 'this',
)
decimal_suffixes = '([tTsSiIlLvV]|ll|LL|([iIuU])(8|16|32|64))?'
tokens = {
'root': [
include('whitespace'),
# Keywords
(words(('axiom', 'ctor', 'fun', 'gen', 'proc', 'reduce',
'union'), suffix=r'\b'),
Keyword, 'funcname'),
(words(('class', 'cclass', 'cstruct', 'obj', 'struct'), suffix=r'\b'),
Keyword, 'classname'),
(r'(instance|module|typeclass)\b', Keyword, 'modulename'),
(words(keywords, suffix=r'\b'), Keyword),
(words(keyword_directives, suffix=r'\b'), Name.Decorator),
(words(keyword_declarations, suffix=r'\b'), Keyword.Declaration),
(words(keyword_types, suffix=r'\b'), Keyword.Type),
(words(keyword_constants, suffix=r'\b'), Keyword.Constant),
# Operators
include('operators'),
# Float Literal
# -- Hex Float
(r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
r'[pP][+\-]?[0-9_]+[lLfFdD]?', Number.Float),
# -- DecimalFloat
(r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[lLfFdD]?', Number.Float),
(r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[lLfFdD]?',
Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+%s' % decimal_suffixes, Number.Bin),
# -- Octal
(r'0[0-7_]+%s' % decimal_suffixes, Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+%s' % decimal_suffixes, Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)%s' % decimal_suffixes, Number.Integer),
# Strings
('([rR][cC]?|[cC][rR])"""', String, 'tdqs'),
("([rR][cC]?|[cC][rR])'''", String, 'tsqs'),
('([rR][cC]?|[cC][rR])"', String, 'dqs'),
("([rR][cC]?|[cC][rR])'", String, 'sqs'),
('[cCfFqQwWuU]?"""', String, combined('stringescape', 'tdqs')),
("[cCfFqQwWuU]?'''", String, combined('stringescape', 'tsqs')),
('[cCfFqQwWuU]?"', String, combined('stringescape', 'dqs')),
("[cCfFqQwWuU]?'", String, combined('stringescape', 'sqs')),
# Punctuation
(r'[\[\]{}:(),;?]', Punctuation),
# Labels
(r'[a-zA-Z_]\w*:>', Name.Label),
# Identifiers
(r'(%s)\b' % '|'.join(name_builtins), Name.Builtin),
(r'(%s)\b' % '|'.join(name_pseudo), Name.Builtin.Pseudo),
(r'[a-zA-Z_]\w*', Name),
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
include('comment'),
# Preprocessor
(r'#\s*if\s+0', Comment.Preproc, 'if0'),
(r'#', Comment.Preproc, 'macro'),
],
'operators': [
(r'(%s)\b' % '|'.join(operator_words), Operator.Word),
(r'!=|==|<<|>>|\|\||&&|[-~+/*%=<>&^|.$]', Operator),
],
'comment': [
(r'//(.*?)\n', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment2'),
],
'comment2': [
(r'[^/*]', Comment.Multiline),
(r'/[*]', Comment.Multiline, '#push'),
(r'[*]/', Comment.Multiline, '#pop'),
(r'[/*]', Comment.Multiline),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment, '#pop'),
(r'.*?\n', Comment),
],
'macro': [
include('comment'),
(r'(import|include)(\s+)(<[^>]*?>)',
bygroups(Comment.Preproc, Text, String), '#pop'),
(r'(import|include)(\s+)("[^"]*?")',
bygroups(Comment.Preproc, Text, String), '#pop'),
(r"(import|include)(\s+)('[^']*?')",
bygroups(Comment.Preproc, Text, String), '#pop'),
(r'[^/\n]+', Comment.Preproc),
# (r'/[*](.|\n)*?[*]/', Comment),
# (r'//.*?\n', Comment, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'funcname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Function, '#pop'),
# anonymous functions
(r'(?=\()', Text, '#pop'),
],
'classname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# anonymous classes
(r'(?=\{)', Text, '#pop'),
],
'modulename': [
include('whitespace'),
(r'\[', Punctuation, ('modulename2', 'tvarlist')),
default('modulename2'),
],
'modulename2': [
include('whitespace'),
(r'([a-zA-Z_]\w*)', Name.Namespace, '#pop:2'),
],
'tvarlist': [
include('whitespace'),
include('operators'),
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
(r',', Punctuation),
(r'(with|where)\b', Keyword),
(r'[a-zA-Z_]\w*', Name),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[E-GXc-giorsux%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
# included here again for raw strings
(r'\\\\|\\"|\\\n', String.Escape),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
# included here again for raw strings
(r"\\\\|\\'|\\\n", String.Escape),
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
|
FelixLexer
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 292692,
"end": 294800
}
|
class ____(sgqlc.types.Input):
"""Specifies the parameters for a `RepositoryRule` object. Only one
of the fields should be specified.
"""
__schema__ = github_schema
__field_names__ = (
"update",
"required_deployments",
"pull_request",
"required_status_checks",
"commit_message_pattern",
"commit_author_email_pattern",
"committer_email_pattern",
"branch_name_pattern",
"tag_name_pattern",
)
update = sgqlc.types.Field("UpdateParametersInput", graphql_name="update")
"""Parameters used for the `update` rule type"""
required_deployments = sgqlc.types.Field(RequiredDeploymentsParametersInput, graphql_name="requiredDeployments")
"""Parameters used for the `required_deployments` rule type"""
pull_request = sgqlc.types.Field(PullRequestParametersInput, graphql_name="pullRequest")
"""Parameters used for the `pull_request` rule type"""
required_status_checks = sgqlc.types.Field(RequiredStatusChecksParametersInput, graphql_name="requiredStatusChecks")
"""Parameters used for the `required_status_checks` rule type"""
commit_message_pattern = sgqlc.types.Field(CommitMessagePatternParametersInput, graphql_name="commitMessagePattern")
"""Parameters used for the `commit_message_pattern` rule type"""
commit_author_email_pattern = sgqlc.types.Field(CommitAuthorEmailPatternParametersInput, graphql_name="commitAuthorEmailPattern")
"""Parameters used for the `commit_author_email_pattern` rule type"""
committer_email_pattern = sgqlc.types.Field(CommitterEmailPatternParametersInput, graphql_name="committerEmailPattern")
"""Parameters used for the `committer_email_pattern` rule type"""
branch_name_pattern = sgqlc.types.Field(BranchNamePatternParametersInput, graphql_name="branchNamePattern")
"""Parameters used for the `branch_name_pattern` rule type"""
tag_name_pattern = sgqlc.types.Field("TagNamePatternParametersInput", graphql_name="tagNamePattern")
"""Parameters used for the `tag_name_pattern` rule type"""
|
RuleParametersInput
|
python
|
huggingface__transformers
|
utils/test_module/custom_feature_extraction.py
|
{
"start": 52,
"end": 117
}
|
class ____(Wav2Vec2FeatureExtractor):
pass
|
CustomFeatureExtractor
|
python
|
sphinx-doc__sphinx
|
tests/roots/test-ext-autodoc/target/typehints.py
|
{
"start": 1342,
"end": 1436
}
|
class ____:
def __new__(cls, i):
# type: (int) -> NewComment
pass
|
NewComment
|
python
|
huggingface__transformers
|
src/transformers/models/apertus/modeling_apertus.py
|
{
"start": 18484,
"end": 21934
}
|
class ____(ApertusPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = ApertusModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, ApertusForCausalLM
>>> model = ApertusForCausalLM.from_pretrained("swiss-ai/Apertus-8B")
>>> tokenizer = AutoTokenizer.from_pretrained("swiss-ai/Apertus-8B")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
ApertusForCausalLM
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-the-maximum-number-of-marked-indices.py
|
{
"start": 453,
"end": 783
}
|
class ____(object):
def maxNumOfMarkedIndices(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
left = 0
for right in xrange(len(nums)):
if nums[right] >= 2*nums[left]:
left += 1
return min(left, len(nums)//2)*2
|
Solution2
|
python
|
huggingface__transformers
|
src/transformers/models/wavlm/modular_wavlm.py
|
{
"start": 23017,
"end": 23251
}
|
class ____(Wav2Vec2ForXVector):
pass
__all__ = [
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
|
WavLMForXVector
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/lib/test_twodim_base.py
|
{
"start": 1682,
"end": 3794
}
|
class ____(TestCase):
def test_basic(self):
assert_equal(
eye(4), array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
)
assert_equal(
eye(4, dtype="f"),
array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], "f"),
)
assert_equal(eye(3) == 1, eye(3, dtype=bool))
def test_diag(self):
assert_equal(
eye(4, k=1), array([[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]])
)
assert_equal(
eye(4, k=-1),
array([[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]),
)
def test_2d(self):
assert_equal(eye(4, 3), array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]]))
assert_equal(eye(3, 4), array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]))
def test_diag2d(self):
assert_equal(eye(3, 4, k=2), array([[0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]]))
assert_equal(
eye(4, 3, k=-2), array([[0, 0, 0], [0, 0, 0], [1, 0, 0], [0, 1, 0]])
)
def test_eye_bounds(self):
assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]])
assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]])
assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]])
assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]])
assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]])
assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]])
assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]])
assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]])
assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]])
def test_bool(self):
assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]])
@xpassIfTorchDynamo_np # (reason="TODO: implement order=non-default")
def test_order(self):
mat_c = eye(4, 3, k=-1)
mat_f = eye(4, 3, k=-1, order="F")
assert_equal(mat_c, mat_f)
assert mat_c.flags.c_contiguous
assert not mat_c.flags.f_contiguous
assert not mat_f.flags.c_contiguous
assert mat_f.flags.f_contiguous
|
TestEye
|
python
|
pytorch__pytorch
|
torch/ao/quantization/fx/utils.py
|
{
"start": 2197,
"end": 38784
}
|
class ____:
node_name_to_qconfig: dict[str, QConfigAny]
node_name_to_scope: dict[str, tuple[str, type]]
prepare_custom_config: PrepareCustomConfig
equalization_node_name_to_qconfig: dict[str, Any]
qconfig_mapping: QConfigMapping
is_qat: bool
observed_node_names: set[str]
is_observed_standalone_module: bool = False
standalone_module_input_quantized_idxs: list[int] | None = None
standalone_module_output_quantized_idxs: list[int] | None = None
def node_arg_is_weight(node: Node, arg: Any) -> bool:
"""Returns if node arg is weight"""
weight_index = None
if "target_dtype_info" in node.meta:
weight_index = node.meta["target_dtype_info"].get("weight_index", None)
if (
weight_index is not None
and weight_index < len(node.args)
and node.args[weight_index] is arg
):
return True
return node.kwargs.get("weight") is arg
def node_arg_is_bias(node: Node, arg: Any) -> bool:
"""Returns if node arg is bias"""
bias_index = None
if "target_dtype_info" in node.meta:
bias_index = node.meta["target_dtype_info"].get("bias_index", None)
if (
bias_index is not None
and bias_index < len(node.args)
and node.args[bias_index] is arg
):
return True
return node.kwargs.get("bias") is arg
def get_custom_module_class_keys(
custom_module_mapping: dict[QuantType, dict[type, type]],
) -> list[Any]:
r"""Get all the unique custom module keys in the custom config dict
e.g.
Input:
{
QuantType.STATIC: {
CustomModule1: ObservedCustomModule
},
QuantType.DYNAMIC: {
CustomModule2: DynamicObservedCustomModule
},
QuantType.WEIGHT_ONLY: {
CustomModule3: WeightOnlyObservedCustomModule
},
}
Output:
# extract the keys across all inner STATIC, DYNAMIC, and WEIGHT_ONLY dicts
[CustomModule1, CustomModule2, CustomModule3]
"""
# using set to dedup
float_custom_module_classes: set[Any] = set()
for quant_mode in [QuantType.STATIC, QuantType.DYNAMIC, QuantType.WEIGHT_ONLY]:
quant_mode_custom_module_config = custom_module_mapping.get(quant_mode, {})
quant_mode_custom_module_classes = set(quant_mode_custom_module_config.keys())
float_custom_module_classes |= quant_mode_custom_module_classes
return list(float_custom_module_classes)
def get_linear_prepack_op_for_dtype(dtype):
if dtype == torch.float16:
return torch.ops.quantized.linear_prepack_fp16
elif dtype == torch.qint8:
return torch.ops.quantized.linear_prepack
else:
raise Exception("can't get linear prepack op for dtype:", dtype) # noqa: TRY002
def get_qconv_prepack_op(conv_op: Callable) -> Callable:
prepack_ops = {
torch.nn.functional.conv1d: torch.ops.quantized.conv1d_prepack,
torch.nn.functional.conv2d: torch.ops.quantized.conv2d_prepack,
torch.nn.functional.conv3d: torch.ops.quantized.conv3d_prepack,
torch.nn.functional.conv_transpose1d: torch.ops.quantized.conv_transpose1d_prepack,
torch.nn.functional.conv_transpose2d: torch.ops.quantized.conv_transpose2d_prepack,
torch.nn.functional.conv_transpose3d: torch.ops.quantized.conv_transpose3d_prepack,
}
prepack_op = prepack_ops.get(conv_op)
if prepack_op is None:
raise AssertionError(f"Didn't find prepack op for {conv_op}")
return prepack_op
# Returns a function that can get a new attribute name for module with given
# prefix, for example,
# >> get_new_observer_name = get_new_attr_name_with_prefix('_observer')
# >> new_name = get_new_observer_name(module)
# new_name will be an unused attribute name on module, e.g. `_observer_1`
def get_new_attr_name_with_prefix(prefix: str) -> Callable:
prefix = prefix.replace(".", "_")
def get_new_attr_name(module: torch.nn.Module):
def get_attr_name(i: int):
return prefix + str(i)
i = 0
attr_name = get_attr_name(i)
while hasattr(module, attr_name):
i += 1
attr_name = get_attr_name(i)
return attr_name
return get_new_attr_name
def collect_producer_nodes(node: Node) -> list[Node] | None:
r"""Starting from a target node, trace back until we hit input or
getattr node. This is used to extract the chain of operators
starting from getattr to the target node, for example::
def forward(self, x):
observed = self.observer(self.weight)
return F.linear(x, observed)
collect_producer_nodes(observed) will either return a list of nodes that
produces the observed node or None if we can't extract a self contained
graph without free variables(inputs of the forward function).
"""
nodes = [node]
frontier = [node]
while frontier:
node = frontier.pop()
all_args = list(node.args) + list(node.kwargs.values())
for arg in all_args:
if not isinstance(arg, Node):
continue
if arg.op == "placeholder":
# hit input, can't fold in this case
return None
nodes.append(arg)
if not (arg.op == "call_function" and arg.target is getattr):
frontier.append(arg)
return nodes
def graph_module_from_producer_nodes(
root: GraphModule, producer_nodes: list[Node]
) -> GraphModule:
r"""Construct a graph module from extracted producer nodes
from `collect_producer_nodes` function
Args:
root: the root module for the original graph
producer_nodes: a list of nodes we use to construct the graph
Return:
A graph module constructed from the producer nodes
"""
if len(producer_nodes) == 0:
raise AssertionError("list of producer nodes can not be empty")
# since we traced back from node to getattr
producer_nodes.reverse()
graph = Graph()
env: dict[Any, Any] = {}
def load_arg(a):
return map_arg(a, lambda node: env[node])
for producer_node in producer_nodes:
env[producer_node] = graph.node_copy(producer_node, load_arg)
graph.output(load_arg(producer_nodes[-1]))
graph_module = GraphModule(root, graph)
return graph_module
# TODO: delete
@functools.cache
def assert_and_get_unique_device(module: torch.nn.Module) -> Any:
"""
Returns the unique device for a module, or None if no device is found.
Throws an error if multiple devices are detected.
"""
return _assert_and_get_unique_device(module)
def create_getattr_from_value(
module: torch.nn.Module,
graph: Graph,
prefix: str,
value: Any,
device: torch.device | None = None,
) -> Node:
"""
Given a value of any type, creates a getattr node corresponding to the value and
registers the value as a buffer to the module.
"""
get_new_attr_name = get_new_attr_name_with_prefix(prefix)
attr_name = get_new_attr_name(module)
if device is None:
device = assert_and_get_unique_device(module)
new_value = (
value.detach().clone()
if isinstance(value, torch.Tensor)
else torch.tensor(value, device=device)
)
module.register_buffer(attr_name, new_value)
# Create get_attr with value
attr_node = graph.create_node("get_attr", attr_name)
return attr_node
def all_node_args_have_no_tensors(
node: Node, modules: dict[str, torch.nn.Module], cache: dict[Node, bool]
) -> bool:
"""
If we know for sure that all of this node's args have no
tensors (are primitives), return True. If we either
find a tensor or are not sure, return False. Note: this
function is not exact.
"""
if cache and node in cache:
return cache[node]
result = False # will be overwritten
if not isinstance(node, Node):
result = True
elif node.op == "placeholder":
result = False
elif node.op == "call_module":
if not isinstance(node.target, str):
raise AssertionError("node.target must be a string for call_module nodes")
if _is_activation_post_process(modules[node.target]):
result = all_node_args_have_no_tensors(node.args[0], modules, cache) # type: ignore[arg-type]
elif node.op == "call_module":
result = False
elif node.op == "call_function" and node.target is operator.getitem:
result = all_node_args_have_no_tensors(node.args[0], modules, cache) # type: ignore[arg-type]
elif node.op == "get_attr":
result = False
elif node.target is getattr and node.args[1] in ["ndim", "shape"]:
# x1 = x0.ndim
result = True
elif node.op == "call_method" and node.target == "size":
# x1 = x0.size(0)
result = True
else:
found_one_tensor = False
for arg in node.args:
if isinstance(arg, list):
for list_el in arg:
if isinstance(list_el, Node):
this_list_el_args_have_no_tensors = (
all_node_args_have_no_tensors(list_el, modules, cache)
)
found_one_tensor = found_one_tensor or (
not this_list_el_args_have_no_tensors
)
# If found_one_tensor is True, there is no point in
# recursing further as the end result will always
# be True.
# TODO(future PR): remove this entire function and
# change to dtype inference without recursion.
if found_one_tensor:
result = not found_one_tensor
if cache:
cache[node] = result
return result
elif isinstance(arg, int):
pass
else:
if isinstance(arg, Node):
this_arg_args_have_no_tensors = all_node_args_have_no_tensors(
arg, modules, cache
)
found_one_tensor = found_one_tensor or (
not this_arg_args_have_no_tensors
)
# If found_one_tensor is True, there is no point in
# recursing further as the end result will always
# be True.
# TODO(future PR): remove this entire function and
# change to dtype inference without recursion.
if found_one_tensor:
result = not found_one_tensor
if cache:
cache[node] = result
return result
else:
found_one_tensor = True
result = not found_one_tensor
if cache:
cache[node] = result
return result
def all_node_args_except_first(node: Node) -> list[int]:
"""
Returns all node arg indices after first
"""
return list(range(1, len(node.args)))
def return_arg_list(arg_indices: list[int]) -> Callable[[Node], list[int]]:
"""
Constructs a function that takes a node as arg and returns the arg_indices
that are valid for node.args
"""
def arg_indices_func(node: Node) -> list[int]:
return [i for i in arg_indices if i < len(node.args)]
return arg_indices_func
NodeInfo = namedtuple("NodeInfo", "op target")
# this dict identifies which indices of a node are non tensors
# so that they can be propagated correctly since inserting observers
# for them would cause errors
NON_OBSERVABLE_ARG_DICT: dict[
NodeInfo, dict[type | torch.dtype, Callable[[Node], list[int]]]
] = {
NodeInfo("call_method", "masked_fill"): {
torch.bool: return_arg_list([1]),
float: return_arg_list([2]),
},
NodeInfo("call_method", "permute"): {int: all_node_args_except_first},
NodeInfo("call_method", "repeat"): {int: all_node_args_except_first},
NodeInfo("call_method", "reshape"): {int: all_node_args_except_first},
NodeInfo("call_method", "size"): {int: return_arg_list([1])},
NodeInfo("call_method", "transpose"): {int: all_node_args_except_first},
NodeInfo("call_method", torch.transpose): {int: all_node_args_except_first},
NodeInfo("call_method", "unsqueeze"): {int: return_arg_list([1])},
NodeInfo("call_method", "unsqueeze_"): {int: return_arg_list([1])},
NodeInfo("call_method", torch.unsqueeze): {int: return_arg_list([1])},
NodeInfo("call_method", "view"): {int: all_node_args_except_first},
}
EMPTY_ARG_DICT: dict[type | torch.dtype, Callable[[Node], list[int]]] = {}
def get_non_observable_arg_indexes_and_types(
node: Node,
) -> dict[type | torch.dtype, Callable[[Node], list[int]]]:
"""
Returns a dict with of non float tensor types as keys and values which correspond to a
function to retrieve the list (which takes the node as an argument)
"""
info = NodeInfo(node.op, node.target)
return NON_OBSERVABLE_ARG_DICT.get(info, EMPTY_ARG_DICT)
def maybe_get_next_module(
node: Node,
modules: dict[str, nn.Module],
target_module_type: type[nn.Module] | None = None,
target_functional_type: Any = None,
) -> Node | None:
"""Gets the next module that matches what is needed in
is_target_module_type if it exists
Args:
node: The node whose users we want to look at
target_module_type: Module type that we want to check
target_functional_type: Functional type that we want to check
"""
for user in node.users:
if (
user.op == "call_module"
and target_module_type is not None
and isinstance(modules[str(user.target)], target_module_type)
):
return user
elif (
user.op == "call_function"
and target_functional_type is not None
and user.target == target_functional_type
):
return user
return None
def create_node_from_old_node_preserve_meta(
quantized_graph: Graph,
create_node_args: tuple[Any, ...],
old_node: Node,
) -> Node:
"""
Creates `new_node` and copies the necessary metadata to it from `old_node`.
"""
new_node = quantized_graph.create_node(*create_node_args)
new_node.stack_trace = old_node.stack_trace
return new_node
def get_skipped_module_name_and_classes(
prepare_custom_config: PrepareCustomConfig, is_standalone_module: bool
) -> tuple[list[str], list[type[Any]]]:
skipped_module_names = copy.copy(prepare_custom_config.non_traceable_module_names)
skipped_module_classes = copy.copy(
prepare_custom_config.non_traceable_module_classes
)
if not is_standalone_module:
# standalone module and custom module config are applied in top level module
skipped_module_names += list(
prepare_custom_config.standalone_module_names.keys()
)
skipped_module_classes += list(
prepare_custom_config.standalone_module_classes.keys()
)
skipped_module_classes += get_custom_module_class_keys(
prepare_custom_config.float_to_observed_mapping
)
return skipped_module_names, skipped_module_classes
def _is_custom_module_lstm(
node: Node,
named_modules: dict[str, torch.nn.Module],
qconfig: QConfigAny = None,
# QuantizeHandler, but we cannot include the type here due to circular imports
qhandler: Any | None = None,
) -> bool:
"""
Return whether this refers to the custom module LSTM flow.
"""
mod = _get_module(node, named_modules)
if qconfig is not None and qhandler is not None:
if not isinstance(
qhandler, torch.ao.quantization.fx.quantize_handler.QuantizeHandler
): # type: ignore[attr-defined]
raise AssertionError("qhandler must be a QuantizeHandler when provided")
return (
isinstance(mod, torch.nn.LSTM)
and activation_is_statically_quantized(qconfig)
and qhandler.is_custom_module()
)
else:
return isinstance(mod, torch.ao.nn.quantizable.LSTM)
def _is_custom_module_mha(
node: Node,
named_modules: dict[str, torch.nn.Module],
qconfig: QConfigAny = None,
# QuantizeHandler, but we cannot include the type here due to circular imports
qhandler: Any | None = None,
) -> bool:
"""
Return whether this refers to the custom module MultiheadAttention flow.
"""
mod = _get_module(node, named_modules)
if qconfig is not None and qhandler is not None:
if not isinstance(
qhandler, torch.ao.quantization.fx.quantize_handler.QuantizeHandler
): # type: ignore[attr-defined]
raise AssertionError("qhandler must be a QuantizeHandler when provided")
return (
isinstance(mod, torch.nn.MultiheadAttention)
and activation_is_statically_quantized(qconfig)
and qhandler.is_custom_module()
)
else:
return isinstance(mod, torch.ao.nn.quantizable.MultiheadAttention)
def _get_module(
node: Node, named_modules: dict[str, torch.nn.Module]
) -> torch.nn.Module | None:
"""
If `node` refers to a call_module node, return the module, else None.
"""
if node.op == "call_module" and str(node.target) in named_modules:
return named_modules[str(node.target)]
else:
return None
def _insert_dequant_stub(
node: Node,
model: torch.nn.Module,
named_modules: dict[str, torch.nn.Module],
graph: Graph,
) -> Node:
"""
Attach a `DeQuantStub` to the model and create a node that calls this
`DeQuantStub` on the output of `node`, similar to how observers are inserted.
"""
prefix = "dequant_stub_"
get_new_dequant_stub_name = get_new_attr_name_with_prefix(prefix)
dequant_stub_name = get_new_dequant_stub_name(model)
dequant_stub = DeQuantStub()
setattr(model, dequant_stub_name, dequant_stub)
named_modules[dequant_stub_name] = dequant_stub
with graph.inserting_after(node):
return graph.call_module(dequant_stub_name, (node,))
def _insert_dequant_stubs_for_custom_module_lstm_output(
node: Node,
model: torch.nn.Module,
named_modules: dict[str, torch.nn.Module],
graph: Graph,
) -> Node:
"""
Insert DeQuantStubs after each internal output node of custom module LSTM.
Custom module LSTM outputs are nested tuples of the structure (output, (hidden0, hidden1)),
Since we cannot dequantize a tuple as a whole, we must first break down the tuple into its
components through `getitem`. This function transforms the graph as follows:
(1) Split the LSTM node into (output, (hidden0, hidden1))
(2) Insert a DeQuantStub after each internal node
(3) Recombine the DeQuantStubs into the same structure as before
(4) Reroute all consumers of the original LSTM node and its sub-nodes
(e.g. lstm[0])
Before:
lstm_output
|
v
original_user(s)
After:
lstm_output
/ \\
/ (getitem) \\
/ \\
v v
output hidden
| / \\
(DeQuantStub) (getitem)
| / \\
v v v
output_dq hidden0 hidden1
| | |
| (DeQuantStub) (DeQuantStub)
| | |
| v v
| hidden0_dq hidden1_dq
| \\ /
| (tuple)
| \\ /
| v v
| hidden_dq
\\ /
\\ (tuple) /
v v
lstm_output_dq
|
v
original_user(s)
For step (4), reroute all users of the original LSTM node(s) as follows:
lstm_output -> lstm_output_dq
lstm_output[0] -> output_dq
lstm_output[1] -> hidden_dq
lstm_output[1][0] -> hidden0_dq
lstm_output[1][1] -> hidden1_dq
Return the node `lstm_output_dq`.
"""
# (1) Split the LSTM node into (output, (hidden0, hidden1))
# (2) Insert a DeQuantStub after each internal node
with graph.inserting_after(node):
output = graph.call_function(operator.getitem, (node, 0))
output_dq = _insert_dequant_stub(output, model, named_modules, graph)
with graph.inserting_after(output_dq):
hidden = graph.call_function(operator.getitem, (node, 1))
with graph.inserting_after(hidden):
hidden0 = graph.call_function(operator.getitem, (hidden, 0))
hidden0_dq = _insert_dequant_stub(hidden0, model, named_modules, graph)
with graph.inserting_after(hidden0_dq):
hidden1 = graph.call_function(operator.getitem, (hidden, 1))
hidden1_dq = _insert_dequant_stub(hidden1, model, named_modules, graph)
# (3) Recombine the DeQuantStubs into the same structure as before
with graph.inserting_after(hidden1_dq):
hidden_dq = graph.call_function(tuple, ([hidden0_dq, hidden1_dq],))
with graph.inserting_after(hidden_dq):
lstm_output_dq = graph.call_function(tuple, ([output_dq, hidden_dq],))
# (4) Reroute all consumers of the original LSTM node and its sub-nodes
for user in list(node.users.keys()):
if user != output and user != hidden:
user.replace_input_with(node, lstm_output_dq)
# The getitem and tuple nodes we added here may interfere with reference quantized
# pattern matching, so we need to redirect the consumers of internal nodes to the
# corresponding nodes with DeQuantStubs (e.g. lstm_output_dq[0] -> output_dq) attached,
# in order to preserve reference patterns like "dequantize - consumer - quantize".
_reroute_tuple_getitem_pattern(graph)
return lstm_output_dq
def _maybe_get_custom_module_lstm_from_node_arg(
arg: Node,
named_modules: dict[str, torch.nn.Module],
) -> Node | None:
"""
Given an argument of a node, if the argument refers to the path through which the node
is a consumer of custom module LSTM, return the custom module LSTM node, or None otherwise.
This is used to determine whether a node is a consumer of custom module LSTM, and, if so,
skip inserting input observers for this node. This is because custom module LSTM produces
quantized outputs, so inserting an input observer for the consumer of custom module LSTM
would unnecessarily quantize the outputs again.
lstm -> consumer
In practice, however, custom module LSTM outputs a tuple (output, (hidden0, hidden1)) with
DeQuantStubs attached to each internal node (see `_insert_dequant_stubs_for_custom_module_lstm_output`).
This tuple can be consumed in one of four ways:
lstm -> getitem -> DeQuantStub -> consumer # consume lstm[0]
lstm -> getitem -> getitem -> DeQuantStub -> tuple -> consumer # consume lstm[1]
lstm -> getitem -> getitem -> DeQuantStub -> consumer # consume lstm[1][0] or lstm[1][1]
lstm -> getitem -> DeQuantStub -> tuple -> consumer # consume lstm
Thus, we must match against the above patterns instead of simply checking the parent node
to determine whether this node is a consumer of a custom module LSTM.
"""
def match_dq(a):
return isinstance(_get_module(a, named_modules), DeQuantStub)
def match_lstm(a):
return _is_custom_module_lstm(a, named_modules)
def match_getitem(a):
return a.op == "call_function" and a.target is operator.getitem
def match_tuple(a):
return a.op == "call_function" and a.target is tuple
def _match_pattern(match_pattern: list[Callable]) -> Node | None:
"""
Traverse up the graph and match the args one by one.
If there is a match, return the last matched node, or None otherwise.
"""
a = arg
for i, match in enumerate(match_pattern):
if not match(a):
return None
# Match next arg, for tuple the arg is a tuple of a list, e.g. ([dq_1, other_node],)
if i < len(match_pattern) - 1:
if match is match_tuple:
a = a.args[0][0] # type: ignore[assignment,index]
else:
a = a.args[0] # type: ignore[assignment]
# pyrefly: ignore [bad-return]
return a
all_match_patterns = [
[match_dq, match_getitem, match_lstm],
[match_tuple, match_dq, match_getitem, match_getitem, match_lstm],
[match_dq, match_getitem, match_getitem, match_lstm],
[match_tuple, match_dq, match_getitem, match_lstm],
]
for p in all_match_patterns:
matched_node = _match_pattern(p)
if matched_node is not None:
return matched_node
return None
def _reroute_tuple_getitem_pattern(graph: Graph):
"""
Search for patterns where N consecutive `tuple` call_function nodes are followed by
N consecutive `getitem` call_function nodes that are "reverses" of the `tuple` nodes.
If we find this pattern, reroute the consumers of the last `getitem` to skip these
N `tuple` and `getitem` nodes.
Before:
a b c
| \\ /
\\ tuple
\\ /
tuple
|
getitem(1)
|
getitem(0)
|
d
After:
b
|
d
"""
def find_patterns(
node: Node,
index_stack: list[int],
current_pattern: list[Node],
matched_patterns: list[list[Node]],
seen: set[tuple[Node, tuple[int, ...]]],
):
"""
Traverse the graph recursively to match for the N-tuple - N-getitem patterns,
starting at the given node.
We use a stack to keep track of the expected `getitem` indices, since these are
reversed from the `tuple` indices. In the above example, the stack after
(b -> tuple -> tuple) will be [0, 1], which will be popped by getitem(1) first
and then by getitem(0).
TODO: traverse upwards from the output and handle the case when tuple is not a
separate node, e.g. graph.call_function(operator.getitem, args=(a, (b, c)))
"""
if len(index_stack) == 0 and len(current_pattern) > 0:
matched_patterns.append(copy.copy(current_pattern))
current_pattern.clear()
# Avoid duplicating work
state = (node, tuple(index_stack))
if state in seen:
return
seen.add(state)
# Iterate through users of this node to find tuple/getitem nodes to match
for user in node.users:
if user.op == "call_function" and user.target is tuple:
for i, user_arg in enumerate(user.args[0]): # type: ignore[arg-type]
if user_arg == node:
index_stack.append(i)
current_pattern.append(user)
find_patterns(
user, index_stack, current_pattern, matched_patterns, seen
)
elif user.op == "call_function" and user.target is operator.getitem:
if len(index_stack) > 0:
if user.args[1] == index_stack[-1]:
index_stack.pop()
current_pattern.append(user)
find_patterns(
user, index_stack, current_pattern, matched_patterns, seen
)
return matched_patterns
# Collect all matched patterns
matched_patterns: list[list[Node]] = []
seen: set[tuple[Node, tuple[int, ...]]] = set() # (node, index_stack)
for node in graph.nodes:
find_patterns(node, [], [], matched_patterns, seen)
# For each pattern, redirect all consumers of the last getitem node to the correct input
# of the first tuple node
for pattern in matched_patterns:
first_tuple = pattern[0]
last_getitem = pattern[-1]
if not (first_tuple.op == "call_function" and first_tuple.target is tuple):
raise AssertionError(
"first tuple node must be a call_function with target tuple"
)
if not (
last_getitem.op == "call_function"
and last_getitem.target is operator.getitem
):
raise AssertionError(
"last getitem node must be a call_function with target operator.getitem"
)
last_getitem_index = last_getitem.args[1]
new_input = first_tuple.args[0][last_getitem_index] # type: ignore[index]
for user in list(last_getitem.users.keys()):
user.replace_input_with(last_getitem, new_input) # type: ignore[arg-type]
def _get_observer_from_activation_post_process(
activation_post_process: ObserverBase | FakeQuantizeBase,
) -> ObserverBase:
"""
If `activation_post_process` is an observer, return the observer.
If `activation_post_process` is a fake quantize, return the internal observer.
"""
if isinstance(activation_post_process, ObserverBase):
return activation_post_process
else:
if not isinstance(activation_post_process, FakeQuantizeBase):
raise AssertionError(
"activation_post_process must be an ObserverBase or FakeQuantizeBase"
)
return activation_post_process.activation_post_process # type: ignore[return-value]
def _qconfig_satisfies_dtype_config_constraints(
qconfig: QConfigAny,
dtype_with_constraints: DTypeWithConstraints,
is_activation: bool = True,
) -> bool:
"""
Return whether `qconfig` satisfies the following constraints from the backend,
specified through the activation and weight DTypeWithConstraints.
1. QConfig specified a quantization range that falls within the backend's, if any
2. QConfig specified a min scale value that is >= the backend's, if any
3. QConfig specified a FixedQParamsObserver or FixedQParamsFakeQuantize that has
scale and zero point that match the backend's, if any
If `is_activation` is True, we check `qconfig.activation`, else we check `qconfig.weight`.
If `qconfig` or `dtype_with_constraints.dtype` is None, or the dtypes do not match, return True.
"""
# TODO: log warnings only when the user enabled a debug flag
def _activation_post_process_satisfies_dtype_config_constraints(
activation_post_process: ObserverBase | FakeQuantizeBase,
dtype_with_constraints: DTypeWithConstraints,
debug_string: str,
) -> bool:
observer = _get_observer_from_activation_post_process(activation_post_process)
app_quant_min = getattr(observer, "quant_min", None)
app_quant_max = getattr(observer, "quant_max", None)
# TODO: for now, just use the existing eps value as scale_min. In the future, we should
# resolve the differences between the two, either by renaming eps or some other way
app_scale_min = getattr(observer, "eps", None)
backend_quant_min = dtype_with_constraints.quant_min_lower_bound
backend_quant_max = dtype_with_constraints.quant_max_upper_bound
backend_scale_min = dtype_with_constraints.scale_min_lower_bound
backend_scale_exact_match = dtype_with_constraints.scale_exact_match
backend_zero_point_exact_match = dtype_with_constraints.zero_point_exact_match
# check quantization ranges
if backend_quant_min is not None and backend_quant_max is not None:
if app_quant_min is None or app_quant_max is None:
warnings.warn(
f"QConfig {debug_string} must specify 'quant_min' and 'quant_max', ignoring {qconfig}",
stacklevel=2,
)
return False
elif app_quant_min < backend_quant_min or app_quant_max > backend_quant_max:
warnings.warn(
f"QConfig {debug_string} quantization range must fall within the backend's:\n"
f"QConfig range = ({app_quant_min}, {app_quant_max}), "
f"BackendConfig range = ({backend_quant_min}, {backend_quant_max}), "
f"ignoring {qconfig}",
stacklevel=2,
)
return False
# check scale min
if backend_scale_min is not None:
if app_scale_min is None:
warnings.warn(
f"QConfig {debug_string} must specify 'eps', ignoring {qconfig}",
stacklevel=2,
)
return False
if app_scale_min < backend_scale_min:
warnings.warn(
f"QConfig {debug_string} eps ({app_scale_min}) must be greater than or equal to "
f"the backend's min scale value ({backend_scale_min}), ignoring {qconfig}",
stacklevel=2,
)
return False
# check fixed scale and zero point
if (
backend_scale_exact_match is not None
and backend_zero_point_exact_match is not None
):
# For tests only, accept the following qconfigs for now
# TODO: handle fp16 qconfigs properly
for accepted_qconfig in [float16_static_qconfig, float16_dynamic_qconfig]:
if qconfig_equals(qconfig, accepted_qconfig):
return True
suggestion_str = (
"Please use torch.ao.quantization.get_default_qconfig_mapping or "
"torch.ao.quantization.get_default_qat_qconfig_mapping. Example:\n"
' qconfig_mapping = get_default_qconfig_mapping("fbgemm")\n'
" model = prepare_fx(model, qconfig_mapping, example_inputs)"
)
if not isinstance(
activation_post_process, FixedQParamsObserver
) and not isinstance(activation_post_process, FixedQParamsFakeQuantize):
warnings.warn(
f"QConfig must specify a FixedQParamsObserver or a FixedQParamsFakeQuantize "
f"for fixed qparams ops, ignoring {qconfig}.\n{suggestion_str}",
stacklevel=2,
)
return False
if (
observer.scale != backend_scale_exact_match
or observer.zero_point != backend_zero_point_exact_match
):
warnings.warn(
f"QConfig fixed scale ({observer.scale}) and zero point ({observer.zero_point}) "
f"do not match the backend's ({backend_scale_exact_match} and {backend_zero_point_exact_match}), "
f"ignoring {qconfig}.\n{suggestion_str}",
stacklevel=2,
)
return False
return True
if qconfig is None or dtype_with_constraints.dtype is None:
return True
activation_post_process_ctr = (
qconfig.activation if is_activation else qconfig.weight
)
debug_string = "activation" if is_activation else "weight"
satisfies_constraints = True
if activation_post_process_ctr is not None:
activation_post_process = activation_post_process_ctr()
if not _is_activation_post_process(activation_post_process):
raise AssertionError(
"activation_post_process must be an activation post process"
)
# If dtypes don't match, don't check the activation_post_process and return True early
if activation_post_process.dtype != dtype_with_constraints.dtype:
return True
satisfies_constraints = (
_activation_post_process_satisfies_dtype_config_constraints(
activation_post_process, dtype_with_constraints, debug_string
)
)
return satisfies_constraints
|
ObservedGraphModuleAttrs
|
python
|
pallets__quart
|
src/quart/cli.py
|
{
"start": 6843,
"end": 9487
}
|
class ____:
def __init__(
self,
app_import_path: str | None = None,
create_app: Callable[..., Quart] | None = None,
set_debug_flag: bool = True,
) -> None:
self.app_import_path = app_import_path
self.create_app = create_app
self.data: dict[Any, Any] = {}
self.set_debug_flag = set_debug_flag
self._loaded_app: Quart | None = None
def load_app(self) -> Quart:
if self._loaded_app is not None:
return self._loaded_app
if self.create_app is not None:
app = self.create_app()
else:
if self.app_import_path:
path, name = (
re.split(r":(?![\\/])", self.app_import_path, maxsplit=1) + [None]
)[:2]
import_name = prepare_import(path)
app = locate_app(import_name, name)
else:
import_name = prepare_import("app.py")
app = locate_app(import_name, None)
if not app:
raise NoAppException(
"Could not locate a Quart application. Use the"
" 'quart --app' option, 'QUART_APP' environment"
" variable, or an 'app.py' file in the"
" current directory."
)
if self.set_debug_flag:
# Update the app's debug flag through the descriptor so that
# other values repopulate as well.
app.debug = get_debug_flag()
self._loaded_app = app
return app
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(fn: Callable | None = None) -> Callable:
# decorator was used with parenthesis
if fn is None:
return with_appcontext
@click.pass_context
def decorator(__ctx: click.Context, *args: Any, **kwargs: Any) -> Any:
async def _inner() -> Any:
async with __ctx.ensure_object(ScriptInfo).load_app().app_context():
try:
return __ctx.invoke(fn, *args, **kwargs)
except RuntimeError as error:
if (
error.args[0]
== "Cannot run the event loop while another loop is running"
):
click.echo(
"The appcontext cannot be used with a command that"
" runs an event loop. See quart#361 for more details"
)
raise
return asyncio.run(_inner())
return functools.update_wrapper(decorator, fn)
|
ScriptInfo
|
python
|
xlwings__xlwings
|
tests/test_sheet.py
|
{
"start": 81,
"end": 1791
}
|
class ____(TestBase):
def test_active(self):
self.assertEqual(self.wb2.sheets.active.name, self.wb2.sheets[0].name)
def test_index(self):
self.assertEqual(self.wb1.sheets[0].name, self.wb1.sheets(1).name)
def test_len(self):
self.assertEqual(len(self.wb1.sheets), 3)
def del_sheet(self):
name = self.wb1.sheets[0].name
del self.wb1.sheets[0]
self.assertEqual(len(self.wb1.sheets), 2)
self.assertFalse(self.wb1.sheets[0].name, name)
def test_iter(self):
for ix, sht in enumerate(self.wb1.sheets):
self.assertEqual(self.wb1.sheets[ix].name, sht.name)
def test_add(self):
self.wb1.sheets.add()
self.assertEqual(len(self.wb1.sheets), 4)
def test_add_before(self):
new_sheet = self.wb1.sheets.add(before="Sheet1")
self.assertEqual(self.wb1.sheets[0].name, new_sheet.name)
def test_add_after(self):
self.wb1.sheets.add(after=len(self.wb1.sheets))
self.assertEqual(
self.wb1.sheets[(len(self.wb1.sheets) - 1)].name,
self.wb1.sheets.active.name,
)
self.wb1.sheets.add(after=1)
self.assertEqual(self.wb1.sheets[1].name, self.wb1.sheets.active.name)
def test_add_default(self):
current_index = self.wb1.sheets.active.index
self.wb1.sheets.add()
self.assertEqual(self.wb1.sheets.active.index, current_index)
def test_add_named(self):
self.wb1.sheets.add("test", before=1)
self.assertEqual(self.wb1.sheets[0].name, "test")
def test_add_name_already_taken(self):
with self.assertRaises(Exception):
self.wb1.sheets.add("Sheet1")
|
TestSheets
|
python
|
pypa__setuptools
|
setuptools/tests/test_config_discovery.py
|
{
"start": 15237,
"end": 22580
}
|
class ____:
def _simulate_package_with_data_files(self, tmp_path, src_root):
files = [
f"{src_root}/proj/__init__.py",
f"{src_root}/proj/file1.txt",
f"{src_root}/proj/nested/file2.txt",
]
_populate_project_dir(tmp_path, files, {})
manifest = """
global-include *.py *.txt
"""
(tmp_path / "MANIFEST.in").write_text(DALS(manifest), encoding="utf-8")
EXAMPLE_SETUPCFG = """
[metadata]
name = proj
version = 42
[options]
include_package_data = True
"""
EXAMPLE_PYPROJECT = """
[project]
name = "proj"
version = "42"
"""
PYPROJECT_PACKAGE_DIR = """
[tool.setuptools]
package-dir = {"" = "src"}
"""
@pytest.mark.parametrize(
("src_root", "files"),
[
(".", {"setup.cfg": DALS(EXAMPLE_SETUPCFG)}),
(".", {"pyproject.toml": DALS(EXAMPLE_PYPROJECT)}),
("src", {"setup.cfg": DALS(EXAMPLE_SETUPCFG)}),
("src", {"pyproject.toml": DALS(EXAMPLE_PYPROJECT)}),
(
"src",
{
"setup.cfg": DALS(EXAMPLE_SETUPCFG)
+ DALS(
"""
packages = find:
package_dir =
=src
[options.packages.find]
where = src
"""
)
},
),
(
"src",
{
"pyproject.toml": DALS(EXAMPLE_PYPROJECT)
+ DALS(
"""
[tool.setuptools]
package-dir = {"" = "src"}
"""
)
},
),
],
)
def test_include_package_data(self, tmp_path, src_root, files):
"""
Make sure auto-discovery does not affect package include_package_data.
See issue #3196.
"""
jaraco.path.build(files, prefix=str(tmp_path))
self._simulate_package_with_data_files(tmp_path, src_root)
expected = {
os.path.normpath(f"{src_root}/proj/file1.txt").replace(os.sep, "/"),
os.path.normpath(f"{src_root}/proj/nested/file2.txt").replace(os.sep, "/"),
}
_run_build(tmp_path)
sdist_files = get_sdist_members(next(tmp_path.glob("dist/*.tar.gz")))
print("~~~~~ sdist_members ~~~~~")
print('\n'.join(sdist_files))
assert sdist_files >= expected
wheel_files = get_wheel_members(next(tmp_path.glob("dist/*.whl")))
print("~~~~~ wheel_members ~~~~~")
print('\n'.join(wheel_files))
orig_files = {f.replace("src/", "").replace("lib/", "") for f in expected}
assert wheel_files >= orig_files
def test_compatible_with_numpy_configuration(tmp_path):
files = [
"dir1/__init__.py",
"dir2/__init__.py",
"file.py",
]
_populate_project_dir(tmp_path, files, {})
dist = Distribution({})
dist.configuration = object()
dist.set_defaults()
assert dist.py_modules is None
assert dist.packages is None
def test_name_discovery_doesnt_break_cli(tmpdir_cwd):
jaraco.path.build({"pkg.py": ""})
dist = Distribution({})
dist.script_args = ["--name"]
dist.set_defaults()
dist.parse_command_line() # <-- no exception should be raised here.
assert dist.get_name() == "pkg"
def test_preserve_explicit_name_with_dynamic_version(tmpdir_cwd, monkeypatch):
"""According to #3545 it seems that ``name`` discovery is running,
even when the project already explicitly sets it.
This seems to be related to parsing of dynamic versions (via ``attr`` directive),
which requires the auto-discovery of ``package_dir``.
"""
files = {
"src": {
"pkg": {"__init__.py": "__version__ = 42\n"},
},
"pyproject.toml": DALS(
"""
[project]
name = "myproj" # purposefully different from package name
dynamic = ["version"]
[tool.setuptools.dynamic]
version = {"attr" = "pkg.__version__"}
"""
),
}
jaraco.path.build(files)
dist = Distribution({})
orig_analyse_name = dist.set_defaults.analyse_name
def spy_analyse_name():
# We can check if name discovery was triggered by ensuring the original
# name remains instead of the package name.
orig_analyse_name()
assert dist.get_name() == "myproj"
monkeypatch.setattr(dist.set_defaults, "analyse_name", spy_analyse_name)
dist.parse_config_files()
assert dist.get_version() == "42"
assert set(dist.packages) == {"pkg"}
def _populate_project_dir(root, files, options):
# NOTE: Currently pypa/build will refuse to build the project if no
# `pyproject.toml` or `setup.py` is found. So it is impossible to do
# completely "config-less" projects.
basic = {
"setup.py": "import setuptools\nsetuptools.setup()",
"README.md": "# Example Package",
"LICENSE": "Copyright (c) 2018",
}
jaraco.path.build(basic, prefix=root)
_write_setupcfg(root, options)
paths = (root / f for f in files)
for path in paths:
path.parent.mkdir(exist_ok=True, parents=True)
path.touch()
def _write_setupcfg(root, options):
if not options:
print("~~~~~ **NO** setup.cfg ~~~~~")
return
setupcfg = ConfigParser()
setupcfg.add_section("options")
for key, value in options.items():
if key == "packages.find":
setupcfg.add_section(f"options.{key}")
setupcfg[f"options.{key}"].update(value)
elif isinstance(value, list):
setupcfg["options"][key] = ", ".join(value)
elif isinstance(value, dict):
str_value = "\n".join(f"\t{k} = {v}" for k, v in value.items())
setupcfg["options"][key] = "\n" + str_value
else:
setupcfg["options"][key] = str(value)
with open(root / "setup.cfg", "w", encoding="utf-8") as f:
setupcfg.write(f)
print("~~~~~ setup.cfg ~~~~~")
print((root / "setup.cfg").read_text(encoding="utf-8"))
def _run_build(path, *flags):
cmd = [sys.executable, "-m", "build", "--no-isolation", *flags, str(path)]
return run(cmd, env={'DISTUTILS_DEBUG': ''})
def _get_dist(dist_path, attrs):
root = "/".join(os.path.split(dist_path)) # POSIX-style
script = dist_path / 'setup.py'
if script.exists():
with Path(dist_path):
dist = cast(
Distribution,
distutils.core.run_setup("setup.py", {}, stop_after="init"),
)
else:
dist = Distribution(attrs)
dist.src_root = root
dist.script_name = "setup.py"
with Path(dist_path):
dist.parse_config_files()
dist.set_defaults()
return dist
def _run_sdist_programatically(dist_path, attrs):
dist = _get_dist(dist_path, attrs)
cmd = sdist(dist)
cmd.ensure_finalized()
assert cmd.distribution.packages or cmd.distribution.py_modules
with quiet(), Path(dist_path):
cmd.run()
return dist, cmd
|
TestWithPackageData
|
python
|
davidhalter__parso
|
parso/python/tree.py
|
{
"start": 22997,
"end": 23756
}
|
class ____(Flow):
type = 'with_stmt'
__slots__ = ()
def get_defined_names(self, include_setitem=False):
"""
Returns the a list of `Name` that the with statement defines. The
defined names are set after `as`.
"""
names = []
for with_item in self.children[1:-2:2]:
# Check with items for 'as' names.
if with_item.type == 'with_item':
names += _defined_names(with_item.children[2], include_setitem)
return names
def get_test_node_from_name(self, name):
node = name.search_ancestor("with_item")
if node is None:
raise ValueError('The name is not actually part of a with statement.')
return node.children[0]
|
WithStmt
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeNarrowingFalsy1.py
|
{
"start": 4034,
"end": 4083
}
|
class ____(TypedDict):
d1: NotRequired[int]
|
TD2
|
python
|
zarr-developers__zarr-python
|
tests/test_dtype/test_npy/test_bool.py
|
{
"start": 157,
"end": 1064
}
|
class ____(BaseTestZDType):
test_cls = Bool
valid_dtype = (np.dtype(np.bool_),)
invalid_dtype = (
np.dtype(np.int8),
np.dtype(np.float64),
np.dtype(np.uint16),
)
valid_json_v2 = ({"name": "|b1", "object_codec_id": None},)
valid_json_v3 = ("bool",)
invalid_json_v2 = (
"|b1",
"bool",
"|f8",
)
invalid_json_v3 = (
"|b1",
"|f8",
{"name": "bool", "configuration": {"endianness": "little"}},
)
scalar_v2_params = ((Bool(), True), (Bool(), False))
scalar_v3_params = ((Bool(), True), (Bool(), False))
cast_value_params = (
(Bool(), "true", np.True_),
(Bool(), True, np.True_),
(Bool(), False, np.False_),
(Bool(), np.True_, np.True_),
(Bool(), np.False_, np.False_),
)
invalid_scalar_params = (None,)
item_size_params = (Bool(),)
|
TestBool
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/core/converter_test.py
|
{
"start": 1998,
"end": 4618
}
|
class ____(converter_testing.TestCase):
def test_get_definition_directive_basic(self):
directive_key = object
def f():
a = 1
return a
_, node, ctx = self.transform(f, (), include_ast=True)
symbol_a = node.body[1].value
defs, = anno.getanno(symbol_a, anno.Static.ORIG_DEFINITIONS)
defs.directives[directive_key] = {
'test_arg': parser.parse_expression('foo'),
'other_arg': parser.parse_expression('bar'),
}
c = TestConverter(ctx)
value = c.get_definition_directive(symbol_a, directive_key, 'test_arg',
None)
self.assertEqual(value.id, 'foo')
def test_get_definition_directive_default(self):
directive_key = object
def f():
a = 1
return a
_, node, ctx = self.transform(f, (), include_ast=True)
symbol_a = node.body[1].value
c = TestConverter(ctx)
value = c.get_definition_directive(symbol_a, directive_key, 'test_arg',
parser.parse_expression('default'))
self.assertEqual(value.id, 'default')
def test_get_definition_directive_multiple_consistent(self):
directive_key = object
def f():
a = 1
if a:
a = 2
return a
_, node, ctx = self.transform(f, (), include_ast=True)
symbol_a = node.body[2].value
defs = anno.getanno(symbol_a, anno.Static.ORIG_DEFINITIONS)
defs[0].directives[directive_key] = {
'test_arg': parser.parse_expression('foo'),
'other_arg': parser.parse_expression('bar'),
}
defs[1].directives[directive_key] = {
'test_arg': parser.parse_expression('foo'),
'other_arg': parser.parse_expression('baz'),
}
c = TestConverter(ctx)
value = c.get_definition_directive(symbol_a, directive_key, 'test_arg',
None)
self.assertEqual(value.id, 'foo')
def test_get_definition_directive_multiple_inconsistent(self):
directive_key = object
def f():
a = 1
if a:
a = 2
return a
_, node, ctx = self.transform(f, (), include_ast=True)
symbol_a = node.body[2].value
defs = anno.getanno(symbol_a, anno.Static.ORIG_DEFINITIONS)
defs[0].directives[directive_key] = {
'test_arg': parser.parse_expression('foo'),
}
defs[1].directives[directive_key] = {
'test_arg': parser.parse_expression('bar'),
}
c = TestConverter(ctx)
with self.assertRaises(ValueError):
c.get_definition_directive(symbol_a, directive_key, 'test_arg', None)
if __name__ == '__main__':
test.main()
|
ConverterBaseTest
|
python
|
scrapy__scrapy
|
tests/test_downloader_handlers.py
|
{
"start": 4227,
"end": 4928
}
|
class ____:
def setup_method(self):
crawler = get_crawler()
self.s3reqh = build_from_crawler(
S3DownloadHandler,
crawler,
httpdownloadhandler=HttpDownloadHandlerMock,
# anon=True, # implicit
)
self.download_request = self.s3reqh.download_request
self.spider = DefaultSpider()
def test_anon_request(self):
req = Request("s3://aws-publicdatasets/")
httpreq = self.download_request(req, self.spider)
assert hasattr(self.s3reqh, "anon")
assert self.s3reqh.anon
assert httpreq.url == "http://aws-publicdatasets.s3.amazonaws.com/"
@pytest.mark.requires_botocore
|
TestS3Anon
|
python
|
davidhalter__jedi
|
test/completion/usages.py
|
{
"start": 3631,
"end": 3965
}
|
class ____(object):
#< 4 (0,4), (23,18), (25,13)
base_class = 1
#< 4 (0,4),
class_var = 1
#< 8 (0,8),
def base_method(self):
#< 13 (0,13), (20,13)
self.base_var = 1
#< 13 (0,13),
self.instance_var = 1
#< 8 (0,8),
def just_a_method(self): pass
#< 20 (0,16), (-18,6)
|
Super
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/links/vertex_ai.py
|
{
"start": 5773,
"end": 5974
}
|
class ____(BaseGoogleLink):
"""Helper class for constructing Vertex AI Endpoint link."""
name = "Endpoint"
key = "endpoint_conf"
format_str = VERTEX_AI_ENDPOINT_LINK
|
VertexAIEndpointLink
|
python
|
huggingface__transformers
|
src/transformers/models/prompt_depth_anything/modeling_prompt_depth_anything.py
|
{
"start": 2780,
"end": 3973
}
|
class ____(nn.Module):
"""
ResidualConvUnit, pre-activate residual unit.
Args:
config (`[PromptDepthAnythingConfig]`):
Model configuration class defining the model architecture.
"""
def __init__(self, config):
super().__init__()
self.activation1 = nn.ReLU()
self.convolution1 = nn.Conv2d(
config.fusion_hidden_size,
config.fusion_hidden_size,
kernel_size=3,
stride=1,
padding=1,
bias=True,
)
self.activation2 = nn.ReLU()
self.convolution2 = nn.Conv2d(
config.fusion_hidden_size,
config.fusion_hidden_size,
kernel_size=3,
stride=1,
padding=1,
bias=True,
)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
residual = hidden_state
hidden_state = self.activation1(hidden_state)
hidden_state = self.convolution1(hidden_state)
hidden_state = self.activation2(hidden_state)
hidden_state = self.convolution2(hidden_state)
return hidden_state + residual
|
PromptDepthAnythingPreActResidualLayer
|
python
|
ray-project__ray
|
python/ray/util/queue.py
|
{
"start": 196,
"end": 262
}
|
class ____(queue.Empty):
pass
@PublicAPI(stability="beta")
|
Empty
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_accounts_stream.py
|
{
"start": 514,
"end": 9287
}
|
class ____(BaseTest):
stream_name = "accounts"
def read_stream(
self,
stream_name: str,
sync_mode: SyncMode,
config: Dict[str, Any],
stream_data_file: Optional[str] = None,
state: Optional[Dict[str, Any]] = None,
expecting_exception: bool = False,
) -> EntrypointOutput:
catalog = CatalogBuilder().with_stream(stream_name, sync_mode).build()
return read_helper(
config=config,
catalog=catalog,
state=state,
expecting_exception=expecting_exception,
)
def test_read_accounts_tax_certificate_data(self):
http_mocker = self.http_mocker
http_mocker.post(
RequestBuilder(resource="User/Query").with_body('{"UserId": null}').build(),
HttpResponse(json.dumps(find_template("user_query", __file__)), 200),
)
http_mocker.post(
RequestBuilder(resource="Accounts/Search")
.with_body(
b'{"PageInfo": {"Index": 0, "Size": 1000}, "Predicates": [{"Field": "UserId", "Operator": "Equals", "Value": "123456789"}], "ReturnAdditionalFields": "TaxCertificate,AccountMode"}'
)
.build(),
HttpResponse(json.dumps(find_template("accounts_search", __file__)), 200),
)
# Our account doesn't have configured Tax certificate.
output = self.read_stream(self.stream_name, SyncMode.full_refresh, self._config)
assert output.records[0].record.data["TaxCertificate"] == {
"Status": "Active",
"TaxCertificateBlobContainerName": "Test Container Name",
"TaxCertificates": [{"key": "test_key", "value": "test_value"}],
}
def test_read_linked_agencies_data(self):
"""
Test reading linked agencies data from the accounts stream.
We are manually putting the data in CustomerInfo field through a transformation
to keep it backward compatible with the SOAP response.
"""
http_mocker = self.http_mocker
http_mocker.post(
RequestBuilder(resource="User/Query").with_body('{"UserId": null}').build(),
HttpResponse(json.dumps(find_template("user_query", __file__)), 200),
)
http_mocker.post(
RequestBuilder(resource="Accounts/Search")
.with_body(
b'{"PageInfo": {"Index": 0, "Size": 1000}, "Predicates": [{"Field": "UserId", "Operator": "Equals", "Value": "123456789"}], "ReturnAdditionalFields": "TaxCertificate,AccountMode"}'
)
.build(),
HttpResponse(json.dumps(find_template("accounts_search_with_linked_agencies", __file__)), 200),
)
# Our account doesn't have configured Tax certificate.
output = self.read_stream(self.stream_name, SyncMode.full_refresh, self._config)
assert output.records[0].record.data["LinkedAgencies"] == {
"CustomerInfo": [
{
"Id": 123456789,
"Name": "Ramp (MCC)",
}
]
}
def test_read_accounts_with_account_names_predicate(self):
"""
Test reading accounts data with account_names predicate in the config.
This tests the ListPartitionRouter that processes the account_names configuration.
"""
# Use a config with account_names predicate
config_with_account_names = {
**self._config,
"account_names": [{"operator": "Equals", "name": "Airbyte"}, {"operator": "Contains", "name": "demo"}],
}
http_mocker = self.http_mocker
http_mocker.post(
RequestBuilder(resource="User/Query").with_body('{"UserId": null}').build(),
HttpResponse(json.dumps(find_template("user_query", __file__)), 200),
)
# Mock the first Accounts/Search request with Equals operator for "Airbyte"
http_mocker.post(
RequestBuilder(resource="Accounts/Search")
.with_body(
b'{"PageInfo": {"Index": 0, "Size": 1000}, "Predicates": [{"Field": "UserId", "Operator": "Equals", "Value": "123456789"}, {"Field": "AccountName", "Operator": "Equals", "Value": "Airbyte"}], "ReturnAdditionalFields": "TaxCertificate,AccountMode"}'
)
.build(),
HttpResponse(json.dumps(find_template("accounts_search_equals_airbyte", __file__)), 200),
)
# Mock the second Accounts/Search request with Contains operator for "demo"
http_mocker.post(
RequestBuilder(resource="Accounts/Search")
.with_body(
b'{"PageInfo": {"Index": 0, "Size": 1000}, "Predicates": [{"Field": "UserId", "Operator": "Equals", "Value": "123456789"}, {"Field": "AccountName", "Operator": "Contains", "Value": "demo"}], "ReturnAdditionalFields": "TaxCertificate,AccountMode"}'
)
.build(),
HttpResponse(json.dumps(find_template("accounts_search_contains_demo", __file__)), 200),
)
# Read the accounts stream with account_names predicate
output = self.read_stream(self.stream_name, SyncMode.full_refresh, config_with_account_names)
assert len(output.records) == 2
account_names = [record.record.data["Name"] for record in output.records]
assert "Airbyte" in account_names
assert any("demo" in name.lower() for name in account_names)
def test_read_accounts_with_account_names_predicate_that_returns_duplicated_records(self):
"""
Test reading accounts data with account_names predicate in the config.
In this scenario two predicates cause to get the same record twice and we need to filter.
"""
# Use a config with account_names predicate
config_with_account_names = {
**self._config,
"account_names": [
{"operator": "Equals", "name": "Airbyte"},
{"operator": "Contains", "name": "demo"},
{"operator": "Contains", "name": "account"},
],
}
http_mocker = self.http_mocker
http_mocker.post(
RequestBuilder(resource="User/Query").with_body('{"UserId": null}').build(),
HttpResponse(json.dumps(find_template("user_query", __file__)), 200),
)
# Mock the first Accounts/Search request with Equals operator for "Airbyte"
http_mocker.post(
RequestBuilder(resource="Accounts/Search")
.with_body(
b'{"PageInfo": {"Index": 0, "Size": 1000}, "Predicates": [{"Field": "UserId", "Operator": "Equals", "Value": "123456789"}, {"Field": "AccountName", "Operator": "Equals", "Value": "Airbyte"}], "ReturnAdditionalFields": "TaxCertificate,AccountMode"}'
)
.build(),
HttpResponse(json.dumps(find_template("accounts_search_equals_airbyte", __file__)), 200),
)
# Mock the second Accounts/Search request with Contains operator for "demo"
http_mocker.post(
RequestBuilder(resource="Accounts/Search")
.with_body(
b'{"PageInfo": {"Index": 0, "Size": 1000}, "Predicates": [{"Field": "UserId", "Operator": "Equals", "Value": "123456789"}, {"Field": "AccountName", "Operator": "Contains", "Value": "demo"}], "ReturnAdditionalFields": "TaxCertificate,AccountMode"}'
)
.build(),
HttpResponse(json.dumps(find_template("accounts_search_contains_demo", __file__)), 200),
)
# Mock the second Accounts/Search request with Contains operator for "account"
http_mocker.post(
RequestBuilder(resource="Accounts/Search")
.with_body(
b'{"PageInfo": {"Index": 0, "Size": 1000}, "Predicates": [{"Field": "UserId", "Operator": "Equals", "Value": "123456789"}, {"Field": "AccountName", "Operator": "Contains", "Value": "account"}], "ReturnAdditionalFields": "TaxCertificate,AccountMode"}'
)
.build(),
# We return same response as the above mock to simulate the same record being returned twice.
HttpResponse(json.dumps(find_template("accounts_search_contains_demo", __file__)), 200),
)
# Read the accounts stream with account_names predicate
output = self.read_stream(self.stream_name, SyncMode.full_refresh, config_with_account_names)
assert len(output.records) == 2
account_names = [record.record.data["Name"] for record in output.records]
assert "Airbyte" in account_names
assert any("demo" in name.lower() for name in account_names)
assert any("account" in name.lower() for name in account_names)
|
TestAccountsStream
|
python
|
hynek__structlog
|
tests/processors/test_renderers.py
|
{
"start": 8660,
"end": 10666
}
|
class ____:
def test_renders_json(self, event_dict):
"""
Renders a predictable JSON string.
"""
rv = JSONRenderer(sort_keys=True)(None, None, event_dict)
assert (
r'{"a": "<A(\\o/)>", "b": [3, 4], "x": 7, '
r'"y": "test", "z": '
r"[1, 2]}"
) == rv
def test_FallbackEncoder_handles_ThreadLocalDictWrapped_dicts(self):
"""
Our fallback handling handles properly ThreadLocalDictWrapper values.
"""
with pytest.deprecated_call():
d = wrap_dict(dict)
s = json.dumps(d({"a": 42}), default=_json_fallback_handler)
assert '{"a": 42}' == s
def test_FallbackEncoder_falls_back(self):
"""
The fallback handler uses repr if it doesn't know the type.
"""
s = json.dumps(
{"date": datetime.date(1980, 3, 25)},
default=_json_fallback_handler,
)
assert '{"date": "datetime.date(1980, 3, 25)"}' == s
def test_serializer(self):
"""
A custom serializer is used if specified.
"""
jr = JSONRenderer(serializer=lambda obj, **kw: {"a": 42})
obj = object()
assert {"a": 42} == jr(None, None, obj)
def test_custom_fallback(self):
"""
A custom fallback handler can be used.
"""
jr = JSONRenderer(default=lambda x: repr(x)[::-1])
d = {"date": datetime.date(1980, 3, 25)}
assert '{"date": ")52 ,3 ,0891(etad.emitetad"}' == jr(None, None, d)
@pytest.mark.skipif(simplejson is None, reason="simplejson is missing.")
def test_simplejson(self, event_dict):
"""
Integration test with simplejson.
"""
jr = JSONRenderer(serializer=simplejson.dumps)
assert {
"a": "<A(\\o/)>",
"b": [3, 4],
"x": 7,
"y": "test",
"z": [1, 2],
} == json.loads(jr(None, None, event_dict))
|
TestJSONRenderer
|
python
|
pyca__cryptography
|
tests/hazmat/primitives/test_kbkdf_vectors.py
|
{
"start": 292,
"end": 487
}
|
class ____:
test_kbkdfctr = generate_kbkdf_counter_mode_test(
load_nist_kbkdf_vectors,
os.path.join("KDF"),
["nist-800-108-KBKDF-CTR.txt"],
)
|
TestCounterKDFCounterMode
|
python
|
walkccc__LeetCode
|
solutions/2596. Check Knight Tour Configuration/2596.py
|
{
"start": 0,
"end": 821
}
|
class ____:
def checkValidGrid(self, grid: list[list[int]]) -> bool:
if grid[0][0] != 0:
return False
DIRS = ((1, 2), (2, 1), (2, -1), (1, -2),
(-1, -2), (-2, -1), (-2, 1), (-1, 2))
n = len(grid)
i = 0
j = 0
def nextGrid(i: int, j: int, target: int) -> tuple[int, int]:
"""
Returns (x, y), where grid[x][y] == target if (i, j) can reach target.
"""
for dx, dy in DIRS:
x = i + dx
y = j + dy
if x < 0 or x >= n or y < 0 or y >= n:
continue
if grid[x][y] == target:
return (x, y)
return (-1, -1)
for target in range(1, n * n):
x, y = nextGrid(i, j, target)
if x == -1 and y == -1:
return False
# Move (x, y) to (i, j).
i = x
j = y
return True
|
Solution
|
python
|
python-excel__xlrd
|
xlrd/sheet.py
|
{
"start": 1604,
"end": 95597
}
|
class ____(BaseObject):
"""
Contains the data for one worksheet.
In the cell access functions, ``rowx`` is a row index, counting from
zero, and ``colx`` is a column index, counting from zero.
Negative values for row/column indexes and slice positions are supported in
the expected fashion.
For information about cell types and cell values, refer to the documentation
of the :class:`Cell` class.
.. warning::
You don't instantiate this class yourself. You access :class:`Sheet`
objects via the :class:`~xlrd.book.Book` object that
was returned when you called :func:`xlrd.open_workbook`.
"""
#: Name of sheet.
name = ''
#: A reference to the :class:`~xlrd.book.Book` object to which this sheet
#: belongs.
#:
#: Example usage: ``some_sheet.book.datemode``
book = None
#: Number of rows in sheet. A row index is in ``range(thesheet.nrows)``.
nrows = 0
#: Nominal number of columns in sheet. It is one more than the maximum
#: column index found, ignoring trailing empty cells.
#: See also the ``ragged_rows`` parameter to :func:`~xlrd.open_workbook`
#: and :meth:`~xlrd.sheet.Sheet.row_len`.
ncols = 0
#: The map from a column index to a :class:`Colinfo` object. Often there is
#: an entry in ``COLINFO`` records for all column indexes in ``range(257)``.
#:
#: .. note::
#: xlrd ignores the entry for the non-existent
#: 257th column.
#:
#: On the other hand, there may be no entry for unused columns.
#:
#: .. versionadded:: 0.6.1
#:
#: Populated only if ``open_workbook(..., formatting_info=True)``
colinfo_map = {}
#: The map from a row index to a :class:`Rowinfo` object.
#:
#: ..note::
#: It is possible to have missing entries -- at least one source of
#: XLS files doesn't bother writing ``ROW`` records.
#:
#: .. versionadded:: 0.6.1
#:
#: Populated only if ``open_workbook(..., formatting_info=True)``
rowinfo_map = {}
#: List of address ranges of cells containing column labels.
#: These are set up in Excel by Insert > Name > Labels > Columns.
#:
#: .. versionadded:: 0.6.0
#:
#: How to deconstruct the list:
#:
#: .. code-block:: python
#:
#: for crange in thesheet.col_label_ranges:
#: rlo, rhi, clo, chi = crange
#: for rx in xrange(rlo, rhi):
#: for cx in xrange(clo, chi):
#: print "Column label at (rowx=%d, colx=%d) is %r" \
#: (rx, cx, thesheet.cell_value(rx, cx))
col_label_ranges = []
#: List of address ranges of cells containing row labels.
#: For more details, see :attr:`col_label_ranges`.
#:
#: .. versionadded:: 0.6.0
row_label_ranges = []
#: List of address ranges of cells which have been merged.
#: These are set up in Excel by Format > Cells > Alignment, then ticking
#: the "Merge cells" box.
#:
#: .. note::
#: The upper limits are exclusive: i.e. ``[2, 3, 7, 9]`` only
#: spans two cells.
#:
#: .. note:: Extracted only if ``open_workbook(..., formatting_info=True)``
#:
#: .. versionadded:: 0.6.1
#:
#: How to deconstruct the list:
#:
#: .. code-block:: python
#:
#: for crange in thesheet.merged_cells:
#: rlo, rhi, clo, chi = crange
#: for rowx in xrange(rlo, rhi):
#: for colx in xrange(clo, chi):
#: # cell (rlo, clo) (the top left one) will carry the data
#: # and formatting info; the remainder will be recorded as
#: # blank cells, but a renderer will apply the formatting info
#: # for the top left cell (e.g. border, pattern) to all cells in
#: # the range.
merged_cells = []
#: Mapping of ``(rowx, colx)`` to list of ``(offset, font_index)`` tuples.
#: The offset defines where in the string the font begins to be used.
#: Offsets are expected to be in ascending order.
#: If the first offset is not zero, the meaning is that the cell's ``XF``'s
#: font should be used from offset 0.
#:
#: This is a sparse mapping. There is no entry for cells that are not
#: formatted with rich text.
#:
#: How to use:
#:
#: .. code-block:: python
#:
#: runlist = thesheet.rich_text_runlist_map.get((rowx, colx))
#: if runlist:
#: for offset, font_index in runlist:
#: # do work here.
#: pass
#:
#: .. versionadded:: 0.7.2
#:
#: Populated only if ``open_workbook(..., formatting_info=True)``
rich_text_runlist_map = {}
#: Default column width from ``DEFCOLWIDTH`` record, else ``None``.
#: From the OOo docs:
#:
#: Column width in characters, using the width of the zero character
#: from default font (first FONT record in the file). Excel adds some
#: extra space to the default width, depending on the default font and
#: default font size. The algorithm how to exactly calculate the resulting
#: column width is not known.
#: Example: The default width of 8 set in this record results in a column
#: width of 8.43 using Arial font with a size of 10 points.
#:
#: For the default hierarchy, refer to the :class:`Colinfo` class.
#:
#: .. versionadded:: 0.6.1
defcolwidth = None
#: Default column width from ``STANDARDWIDTH`` record, else ``None``.
#:
#: From the OOo docs:
#:
#: Default width of the columns in 1/256 of the width of the zero
#: character, using default font (first FONT record in the file).
#:
#: For the default hierarchy, refer to the :class:`Colinfo` class.
#:
#: .. versionadded:: 0.6.1
standardwidth = None
#: Default value to be used for a row if there is
#: no ``ROW`` record for that row.
#: From the *optional* ``DEFAULTROWHEIGHT`` record.
default_row_height = None
#: Default value to be used for a row if there is
#: no ``ROW`` record for that row.
#: From the *optional* ``DEFAULTROWHEIGHT`` record.
default_row_height_mismatch = None
#: Default value to be used for a row if there is
#: no ``ROW`` record for that row.
#: From the *optional* ``DEFAULTROWHEIGHT`` record.
default_row_hidden = None
#: Default value to be used for a row if there is
#: no ``ROW`` record for that row.
#: From the *optional* ``DEFAULTROWHEIGHT`` record.
default_additional_space_above = None
#: Default value to be used for a row if there is
#: no ``ROW`` record for that row.
#: From the *optional* ``DEFAULTROWHEIGHT`` record.
default_additional_space_below = None
#: Visibility of the sheet:
#: ::
#:
#: 0 = visible
#: 1 = hidden (can be unhidden by user -- Format -> Sheet -> Unhide)
#: 2 = "very hidden" (can be unhidden only by VBA macro).
visibility = 0
#: A 256-element tuple corresponding to the contents of the GCW record for
#: this sheet. If no such record, treat as all bits zero.
#: Applies to BIFF4-7 only. See docs of the :class:`Colinfo` class for
#: discussion.
gcw = (0, ) * 256
#: A list of :class:`Hyperlink` objects corresponding to ``HLINK`` records
#: found in the worksheet.
#:
#: .. versionadded:: 0.7.2
hyperlink_list = []
#: A sparse mapping from ``(rowx, colx)`` to an item in
#: :attr:`~xlrd.sheet.Sheet.hyperlink_list`.
#: Cells not covered by a hyperlink are not mapped.
#: It is possible using the Excel UI to set up a hyperlink that
#: covers a larger-than-1x1 rectangle of cells.
#: Hyperlink rectangles may overlap (Excel doesn't check).
#: When a multiply-covered cell is clicked on, the hyperlink that is
#: activated
#: (and the one that is mapped here) is the last in
#: :attr:`~xlrd.sheet.Sheet.hyperlink_list`.
#:
#: .. versionadded:: 0.7.2
hyperlink_map = {}
#: A sparse mapping from ``(rowx, colx)`` to a :class:`Note` object.
#: Cells not containing a note ("comment") are not mapped.
#:
#: .. versionadded:: 0.7.2
cell_note_map = {}
#: Number of columns in left pane (frozen panes; for split panes, see
#: comments in code)
vert_split_pos = 0
#: Number of rows in top pane (frozen panes; for split panes, see comments
#: in code)
horz_split_pos = 0
#: Index of first visible row in bottom frozen/split pane
horz_split_first_visible = 0
#: Index of first visible column in right frozen/split pane
vert_split_first_visible = 0
#: Frozen panes: ignore it. Split panes: explanation and diagrams in
#: OOo docs.
split_active_pane = 0
#: Boolean specifying if a ``PANE`` record was present, ignore unless you're
#: ``xlutils.copy``
has_pane_record = 0
#: A list of the horizontal page breaks in this sheet.
#: Breaks are tuples in the form
#: ``(index of row after break, start col index, end col index)``.
#:
#: Populated only if ``open_workbook(..., formatting_info=True)``
#:
#: .. versionadded:: 0.7.2
horizontal_page_breaks = []
#: A list of the vertical page breaks in this sheet.
#: Breaks are tuples in the form
#: ``(index of col after break, start row index, end row index)``.
#:
#: Populated only if ``open_workbook(..., formatting_info=True)``
#:
#: .. versionadded:: 0.7.2
vertical_page_breaks = []
def __init__(self, book, position, name, number):
self.book = book
self.biff_version = book.biff_version
self._position = position
self.logfile = book.logfile
self.bt = array('B', [XL_CELL_EMPTY])
self.bf = array('h', [-1])
self.name = name
self.number = number
self.verbosity = book.verbosity
self.formatting_info = book.formatting_info
self.ragged_rows = book.ragged_rows
if self.ragged_rows:
self.put_cell = self.put_cell_ragged
else:
self.put_cell = self.put_cell_unragged
self._xf_index_to_xl_type_map = book._xf_index_to_xl_type_map
self.nrows = 0 # actual, including possibly empty cells
self.ncols = 0
self._maxdatarowx = -1 # highest rowx containing a non-empty cell
self._maxdatacolx = -1 # highest colx containing a non-empty cell
self._dimnrows = 0 # as per DIMENSIONS record
self._dimncols = 0
self._cell_values = []
self._cell_types = []
self._cell_xf_indexes = []
self.defcolwidth = None
self.standardwidth = None
self.default_row_height = None
self.default_row_height_mismatch = 0
self.default_row_hidden = 0
self.default_additional_space_above = 0
self.default_additional_space_below = 0
self.colinfo_map = {}
self.rowinfo_map = {}
self.col_label_ranges = []
self.row_label_ranges = []
self.merged_cells = []
self.rich_text_runlist_map = {}
self.horizontal_page_breaks = []
self.vertical_page_breaks = []
self._xf_index_stats = [0, 0, 0, 0]
self.visibility = book._sheet_visibility[number] # from BOUNDSHEET record
for attr, defval in _WINDOW2_options:
setattr(self, attr, defval)
self.first_visible_rowx = 0
self.first_visible_colx = 0
self.gridline_colour_index = 0x40
self.gridline_colour_rgb = None # pre-BIFF8
self.hyperlink_list = []
self.hyperlink_map = {}
self.cell_note_map = {}
# Values calculated by xlrd to predict the mag factors that
# will actually be used by Excel to display your worksheet.
# Pass these values to xlwt when writing XLS files.
# Warning 1: Behaviour of OOo Calc and Gnumeric has been observed to differ from Excel's.
# Warning 2: A value of zero means almost exactly what it says. Your sheet will be
# displayed as a very tiny speck on the screen. xlwt will reject attempts to set
# a mag_factor that is not (10 <= mag_factor <= 400).
self.cooked_page_break_preview_mag_factor = 60
self.cooked_normal_view_mag_factor = 100
# Values (if any) actually stored on the XLS file
self.cached_page_break_preview_mag_factor = 0 # default (60%), from WINDOW2 record
self.cached_normal_view_mag_factor = 0 # default (100%), from WINDOW2 record
self.scl_mag_factor = None # from SCL record
self._ixfe = None # BIFF2 only
self._cell_attr_to_xfx = {} # BIFF2.0 only
if self.biff_version >= 80:
self.utter_max_rows = 65536
else:
self.utter_max_rows = 16384
self.utter_max_cols = 256
self._first_full_rowx = -1
# self._put_cell_exceptions = 0
# self._put_cell_row_widenings = 0
# self._put_cell_rows_appended = 0
# self._put_cell_cells_appended = 0
def cell(self, rowx, colx):
"""
:class:`Cell` object in the given row and column.
"""
if self.formatting_info:
xfx = self.cell_xf_index(rowx, colx)
else:
xfx = None
return Cell(
self._cell_types[rowx][colx],
self._cell_values[rowx][colx],
xfx,
)
def cell_value(self, rowx, colx):
"Value of the cell in the given row and column."
return self._cell_values[rowx][colx]
def cell_type(self, rowx, colx):
"""
Type of the cell in the given row and column.
Refer to the documentation of the :class:`Cell` class.
"""
return self._cell_types[rowx][colx]
def cell_xf_index(self, rowx, colx):
"""
XF index of the cell in the given row and column.
This is an index into :attr:`~xlrd.book.Book.xf_list`.
.. versionadded:: 0.6.1
"""
self.req_fmt_info()
xfx = self._cell_xf_indexes[rowx][colx]
if xfx > -1:
self._xf_index_stats[0] += 1
return xfx
# Check for a row xf_index
try:
xfx = self.rowinfo_map[rowx].xf_index
if xfx > -1:
self._xf_index_stats[1] += 1
return xfx
except KeyError:
pass
# Check for a column xf_index
try:
xfx = self.colinfo_map[colx].xf_index
if xfx == -1: xfx = 15
self._xf_index_stats[2] += 1
return xfx
except KeyError:
# If all else fails, 15 is used as hardwired global default xf_index.
self._xf_index_stats[3] += 1
return 15
def row_len(self, rowx):
"""
Returns the effective number of cells in the given row. For use with
``open_workbook(ragged_rows=True)`` which is likely to produce rows
with fewer than :attr:`~Sheet.ncols` cells.
.. versionadded:: 0.7.2
"""
return len(self._cell_values[rowx])
def row(self, rowx):
"""
Returns a sequence of the :class:`Cell` objects in the given row.
"""
return [
self.cell(rowx, colx)
for colx in xrange(len(self._cell_values[rowx]))
]
def __getitem__(self, item):
"""
Takes either rowindex or (rowindex, colindex) as an index,
and returns either row or cell respectively.
"""
try:
rowix, colix = item
except TypeError:
# it's not a tuple (or of right size), let's try indexing as is
# if this is a problem, let this error propagate back
return self.row(item)
else:
return self.cell(rowix, colix)
def get_rows(self):
"Returns a generator for iterating through each row."
return (self.row(index) for index in range(self.nrows))
# makes `for row in sheet` natural and intuitive
__iter__ = get_rows
def row_types(self, rowx, start_colx=0, end_colx=None):
"""
Returns a slice of the types of the cells in the given row.
"""
if end_colx is None:
return self._cell_types[rowx][start_colx:]
return self._cell_types[rowx][start_colx:end_colx]
def row_values(self, rowx, start_colx=0, end_colx=None):
"""
Returns a slice of the values of the cells in the given row.
"""
if end_colx is None:
return self._cell_values[rowx][start_colx:]
return self._cell_values[rowx][start_colx:end_colx]
def row_slice(self, rowx, start_colx=0, end_colx=None):
"""
Returns a slice of the :class:`Cell` objects in the given row.
"""
nc = len(self._cell_values[rowx])
if start_colx < 0:
start_colx += nc
if start_colx < 0:
start_colx = 0
if end_colx is None or end_colx > nc:
end_colx = nc
elif end_colx < 0:
end_colx += nc
return [
self.cell(rowx, colx)
for colx in xrange(start_colx, end_colx)
]
def col_slice(self, colx, start_rowx=0, end_rowx=None):
"""
Returns a slice of the :class:`Cell` objects in the given column.
"""
nr = self.nrows
if start_rowx < 0:
start_rowx += nr
if start_rowx < 0:
start_rowx = 0
if end_rowx is None or end_rowx > nr:
end_rowx = nr
elif end_rowx < 0:
end_rowx += nr
return [
self.cell(rowx, colx)
for rowx in xrange(start_rowx, end_rowx)
]
def col_values(self, colx, start_rowx=0, end_rowx=None):
"""
Returns a slice of the values of the cells in the given column.
"""
nr = self.nrows
if start_rowx < 0:
start_rowx += nr
if start_rowx < 0:
start_rowx = 0
if end_rowx is None or end_rowx > nr:
end_rowx = nr
elif end_rowx < 0:
end_rowx += nr
return [
self._cell_values[rowx][colx]
for rowx in xrange(start_rowx, end_rowx)
]
def col_types(self, colx, start_rowx=0, end_rowx=None):
"""
Returns a slice of the types of the cells in the given column.
"""
nr = self.nrows
if start_rowx < 0:
start_rowx += nr
if start_rowx < 0:
start_rowx = 0
if end_rowx is None or end_rowx > nr:
end_rowx = nr
elif end_rowx < 0:
end_rowx += nr
return [
self._cell_types[rowx][colx]
for rowx in xrange(start_rowx, end_rowx)
]
col = col_slice
# === Following methods are used in building the worksheet.
# === They are not part of the API.
def tidy_dimensions(self):
if self.verbosity >= 3:
fprintf(
self.logfile,
"tidy_dimensions: nrows=%d ncols=%d \n",
self.nrows, self.ncols,
)
if 1 and self.merged_cells:
nr = nc = 0
umaxrows = self.utter_max_rows
umaxcols = self.utter_max_cols
for crange in self.merged_cells:
rlo, rhi, clo, chi = crange
if not (0 <= rlo < rhi <= umaxrows) or not (0 <= clo < chi <= umaxcols):
fprintf(self.logfile,
"*** WARNING: sheet #%d (%r), MERGEDCELLS bad range %r\n",
self.number, self.name, crange)
if rhi > nr: nr = rhi
if chi > nc: nc = chi
if nc > self.ncols:
self.ncols = nc
self._first_full_rowx = -2
if nr > self.nrows:
# we put one empty cell at (nr-1,0) to make sure
# we have the right number of rows. The ragged rows
# will sort out the rest if needed.
self.put_cell(nr-1, 0, XL_CELL_EMPTY, UNICODE_LITERAL(''), -1)
if (self.verbosity >= 1 and
(self.nrows != self._dimnrows or self.ncols != self._dimncols)):
fprintf(
self.logfile,
"NOTE *** sheet %d (%r): DIMENSIONS R,C = %d,%d should be %d,%d\n",
self.number,
self.name,
self._dimnrows,
self._dimncols,
self.nrows,
self.ncols,
)
if not self.ragged_rows:
# fix ragged rows
ncols = self.ncols
s_cell_types = self._cell_types
s_cell_values = self._cell_values
s_cell_xf_indexes = self._cell_xf_indexes
s_fmt_info = self.formatting_info
# for rowx in xrange(self.nrows):
if self._first_full_rowx == -2:
ubound = self.nrows
else:
ubound = self._first_full_rowx
for rowx in xrange(ubound):
trow = s_cell_types[rowx]
rlen = len(trow)
nextra = ncols - rlen
if nextra > 0:
s_cell_values[rowx][rlen:] = [UNICODE_LITERAL('')] * nextra
trow[rlen:] = self.bt * nextra
if s_fmt_info:
s_cell_xf_indexes[rowx][rlen:] = self.bf * nextra
def put_cell_ragged(self, rowx, colx, ctype, value, xf_index):
if ctype is None:
# we have a number, so look up the cell type
ctype = self._xf_index_to_xl_type_map[xf_index]
assert 0 <= colx < self.utter_max_cols
assert 0 <= rowx < self.utter_max_rows
fmt_info = self.formatting_info
try:
nr = rowx + 1
if self.nrows < nr:
scta = self._cell_types.append
scva = self._cell_values.append
scxa = self._cell_xf_indexes.append
bt = self.bt
bf = self.bf
for _unused in xrange(self.nrows, nr):
scta(bt * 0)
scva([])
if fmt_info:
scxa(bf * 0)
self.nrows = nr
types_row = self._cell_types[rowx]
values_row = self._cell_values[rowx]
if fmt_info:
fmt_row = self._cell_xf_indexes[rowx]
ltr = len(types_row)
if colx >= self.ncols:
self.ncols = colx + 1
num_empty = colx - ltr
if not num_empty:
# most common case: colx == previous colx + 1
# self._put_cell_cells_appended += 1
types_row.append(ctype)
values_row.append(value)
if fmt_info:
fmt_row.append(xf_index)
return
if num_empty > 0:
num_empty += 1
# self._put_cell_row_widenings += 1
# types_row.extend(self.bt * num_empty)
# values_row.extend([UNICODE_LITERAL('')] * num_empty)
# if fmt_info:
# fmt_row.extend(self.bf * num_empty)
types_row[ltr:] = self.bt * num_empty
values_row[ltr:] = [UNICODE_LITERAL('')] * num_empty
if fmt_info:
fmt_row[ltr:] = self.bf * num_empty
types_row[colx] = ctype
values_row[colx] = value
if fmt_info:
fmt_row[colx] = xf_index
except:
print("put_cell", rowx, colx, file=self.logfile)
raise
def put_cell_unragged(self, rowx, colx, ctype, value, xf_index):
if ctype is None:
# we have a number, so look up the cell type
ctype = self._xf_index_to_xl_type_map[xf_index]
# assert 0 <= colx < self.utter_max_cols
# assert 0 <= rowx < self.utter_max_rows
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
if self.formatting_info:
self._cell_xf_indexes[rowx][colx] = xf_index
except IndexError:
# print >> self.logfile, "put_cell extending", rowx, colx
# self.extend_cells(rowx+1, colx+1)
# self._put_cell_exceptions += 1
nr = rowx + 1
nc = colx + 1
assert 1 <= nc <= self.utter_max_cols
assert 1 <= nr <= self.utter_max_rows
if nc > self.ncols:
self.ncols = nc
# The row self._first_full_rowx and all subsequent rows
# are guaranteed to have length == self.ncols. Thus the
# "fix ragged rows" section of the tidy_dimensions method
# doesn't need to examine them.
if nr < self.nrows:
# cell data is not in non-descending row order *AND*
# self.ncols has been bumped up.
# This very rare case ruins this optimisation.
self._first_full_rowx = -2
elif rowx > self._first_full_rowx > -2:
self._first_full_rowx = rowx
if nr <= self.nrows:
# New cell is in an existing row, so extend that row (if necessary).
# Note that nr < self.nrows means that the cell data
# is not in ascending row order!!
trow = self._cell_types[rowx]
nextra = self.ncols - len(trow)
if nextra > 0:
# self._put_cell_row_widenings += 1
trow.extend(self.bt * nextra)
if self.formatting_info:
self._cell_xf_indexes[rowx].extend(self.bf * nextra)
self._cell_values[rowx].extend([UNICODE_LITERAL('')] * nextra)
else:
scta = self._cell_types.append
scva = self._cell_values.append
scxa = self._cell_xf_indexes.append
fmt_info = self.formatting_info
nc = self.ncols
bt = self.bt
bf = self.bf
for _unused in xrange(self.nrows, nr):
# self._put_cell_rows_appended += 1
scta(bt * nc)
scva([UNICODE_LITERAL('')] * nc)
if fmt_info:
scxa(bf * nc)
self.nrows = nr
# === end of code from extend_cells()
try:
self._cell_types[rowx][colx] = ctype
self._cell_values[rowx][colx] = value
if self.formatting_info:
self._cell_xf_indexes[rowx][colx] = xf_index
except:
print("put_cell", rowx, colx, file=self.logfile)
raise
except:
print("put_cell", rowx, colx, file=self.logfile)
raise
# === Methods after this line neither know nor care about how cells are stored.
def read(self, bk):
global rc_stats
DEBUG = 0
blah = DEBUG or self.verbosity >= 2
blah_rows = DEBUG or self.verbosity >= 4
blah_formulas = 0 and blah
r1c1 = 0
oldpos = bk._position
bk._position = self._position
XL_SHRFMLA_ETC_ETC = (
XL_SHRFMLA, XL_ARRAY, XL_TABLEOP, XL_TABLEOP2,
XL_ARRAY2, XL_TABLEOP_B2,
)
self_put_cell = self.put_cell
local_unpack = unpack
bk_get_record_parts = bk.get_record_parts
bv = self.biff_version
fmt_info = self.formatting_info
do_sst_rich_text = fmt_info and bk._rich_text_runlist_map
rowinfo_sharing_dict = {}
txos = {}
eof_found = 0
while 1:
# if DEBUG: print "SHEET.READ: about to read from position %d" % bk._position
rc, data_len, data = bk_get_record_parts()
# if rc in rc_stats:
# rc_stats[rc] += 1
# else:
# rc_stats[rc] = 1
# if DEBUG: print "SHEET.READ: op 0x%04x, %d bytes %r" % (rc, data_len, data)
if rc == XL_NUMBER:
# [:14] in following stmt ignores extraneous rubbish at end of record.
# Sample file testEON-8.xls supplied by Jan Kraus.
rowx, colx, xf_index, d = local_unpack('<HHHd', data[:14])
# if xf_index == 0:
# fprintf(self.logfile,
# "NUMBER: r=%d c=%d xfx=%d %f\n", rowx, colx, xf_index, d)
self_put_cell(rowx, colx, None, d, xf_index)
elif rc == XL_LABELSST:
rowx, colx, xf_index, sstindex = local_unpack('<HHHi', data)
# print "LABELSST", rowx, colx, sstindex, bk._sharedstrings[sstindex]
self_put_cell(rowx, colx, XL_CELL_TEXT, bk._sharedstrings[sstindex], xf_index)
if do_sst_rich_text:
runlist = bk._rich_text_runlist_map.get(sstindex)
if runlist:
self.rich_text_runlist_map[(rowx, colx)] = runlist
elif rc == XL_LABEL:
rowx, colx, xf_index = local_unpack('<HHH', data[0:6])
if bv < BIFF_FIRST_UNICODE:
strg = unpack_string(data, 6, bk.encoding or bk.derive_encoding(), lenlen=2)
else:
strg = unpack_unicode(data, 6, lenlen=2)
self_put_cell(rowx, colx, XL_CELL_TEXT, strg, xf_index)
elif rc == XL_RSTRING:
rowx, colx, xf_index = local_unpack('<HHH', data[0:6])
if bv < BIFF_FIRST_UNICODE:
strg, pos = unpack_string_update_pos(data, 6, bk.encoding or bk.derive_encoding(), lenlen=2)
nrt = BYTES_ORD(data[pos])
pos += 1
runlist = []
for _unused in xrange(nrt):
runlist.append(unpack('<BB', data[pos:pos+2]))
pos += 2
assert pos == len(data)
else:
strg, pos = unpack_unicode_update_pos(data, 6, lenlen=2)
nrt = unpack('<H', data[pos:pos+2])[0]
pos += 2
runlist = []
for _unused in xrange(nrt):
runlist.append(unpack('<HH', data[pos:pos+4]))
pos += 4
assert pos == len(data)
self_put_cell(rowx, colx, XL_CELL_TEXT, strg, xf_index)
self.rich_text_runlist_map[(rowx, colx)] = runlist
elif rc == XL_RK:
rowx, colx, xf_index = local_unpack('<HHH', data[:6])
d = unpack_RK(data[6:10])
self_put_cell(rowx, colx, None, d, xf_index)
elif rc == XL_MULRK:
mulrk_row, mulrk_first = local_unpack('<HH', data[0:4])
mulrk_last, = local_unpack('<H', data[-2:])
pos = 4
for colx in xrange(mulrk_first, mulrk_last+1):
xf_index, = local_unpack('<H', data[pos:pos+2])
d = unpack_RK(data[pos+2:pos+6])
pos += 6
self_put_cell(mulrk_row, colx, None, d, xf_index)
elif rc == XL_ROW:
# Version 0.6.0a3: ROW records are just not worth using (for memory allocation).
# Version 0.6.1: now used for formatting info.
if not fmt_info: continue
rowx, bits1, bits2 = local_unpack('<H4xH4xi', data[0:16])
if not(0 <= rowx < self.utter_max_rows):
print("*** NOTE: ROW record has row index %d; "
"should have 0 <= rowx < %d -- record ignored!"
% (rowx, self.utter_max_rows), file=self.logfile)
continue
key = (bits1, bits2)
r = rowinfo_sharing_dict.get(key)
if r is None:
rowinfo_sharing_dict[key] = r = Rowinfo()
# Using upkbits() is far too slow on a file
# with 30 sheets each with 10K rows :-(
# upkbits(r, bits1, (
# ( 0, 0x7FFF, 'height'),
# (15, 0x8000, 'has_default_height'),
# ))
# upkbits(r, bits2, (
# ( 0, 0x00000007, 'outline_level'),
# ( 4, 0x00000010, 'outline_group_starts_ends'),
# ( 5, 0x00000020, 'hidden'),
# ( 6, 0x00000040, 'height_mismatch'),
# ( 7, 0x00000080, 'has_default_xf_index'),
# (16, 0x0FFF0000, 'xf_index'),
# (28, 0x10000000, 'additional_space_above'),
# (29, 0x20000000, 'additional_space_below'),
# ))
# So:
r.height = bits1 & 0x7fff
r.has_default_height = (bits1 >> 15) & 1
r.outline_level = bits2 & 7
r.outline_group_starts_ends = (bits2 >> 4) & 1
r.hidden = (bits2 >> 5) & 1
r.height_mismatch = (bits2 >> 6) & 1
r.has_default_xf_index = (bits2 >> 7) & 1
r.xf_index = (bits2 >> 16) & 0xfff
r.additional_space_above = (bits2 >> 28) & 1
r.additional_space_below = (bits2 >> 29) & 1
if not r.has_default_xf_index:
r.xf_index = -1
self.rowinfo_map[rowx] = r
if 0 and r.xf_index > -1:
fprintf(self.logfile,
"**ROW %d %d %d\n",
self.number, rowx, r.xf_index)
if blah_rows:
print('ROW', rowx, bits1, bits2, file=self.logfile)
r.dump(self.logfile,
header="--- sh #%d, rowx=%d ---" % (self.number, rowx))
elif rc in XL_FORMULA_OPCODES: # 06, 0206, 0406
# DEBUG = 1
# if DEBUG: print "FORMULA: rc: 0x%04x data: %r" % (rc, data)
if bv >= 50:
rowx, colx, xf_index, result_str, flags = local_unpack('<HHH8sH', data[0:16])
elif bv >= 30:
rowx, colx, xf_index, result_str, flags = local_unpack('<HHH8sH', data[0:16])
else: # BIFF2
rowx, colx, cell_attr, result_str, flags = local_unpack('<HH3s8sB', data[0:16])
xf_index = self.fixed_BIFF2_xfindex(cell_attr, rowx, colx)
if blah_formulas: # testing formula dumper
#### XXXX FIXME
fprintf(self.logfile, "FORMULA: rowx=%d colx=%d\n", rowx, colx)
fmlalen = local_unpack("<H", data[20:22])[0]
decompile_formula(bk, data[22:], fmlalen, FMLA_TYPE_CELL,
browx=rowx, bcolx=colx, blah=1, r1c1=r1c1)
if result_str[6:8] == b"\xFF\xFF":
first_byte = BYTES_ORD(result_str[0])
if first_byte == 0:
# need to read next record (STRING)
gotstring = 0
# if flags & 8:
if 1: # "flags & 8" applies only to SHRFMLA
# actually there's an optional SHRFMLA or ARRAY etc record to skip over
rc2, data2_len, data2 = bk.get_record_parts()
if rc2 == XL_STRING or rc2 == XL_STRING_B2:
gotstring = 1
elif rc2 == XL_ARRAY:
row1x, rownx, col1x, colnx, array_flags, tokslen = \
local_unpack("<HHBBBxxxxxH", data2[:14])
if blah_formulas:
fprintf(self.logfile, "ARRAY: %d %d %d %d %d\n",
row1x, rownx, col1x, colnx, array_flags)
# dump_formula(bk, data2[14:], tokslen, bv, reldelta=0, blah=1)
elif rc2 == XL_SHRFMLA:
row1x, rownx, col1x, colnx, nfmlas, tokslen = \
local_unpack("<HHBBxBH", data2[:10])
if blah_formulas:
fprintf(self.logfile, "SHRFMLA (sub): %d %d %d %d %d\n",
row1x, rownx, col1x, colnx, nfmlas)
decompile_formula(bk, data2[10:], tokslen, FMLA_TYPE_SHARED,
blah=1, browx=rowx, bcolx=colx, r1c1=r1c1)
elif rc2 not in XL_SHRFMLA_ETC_ETC:
raise XLRDError(
"Expected SHRFMLA, ARRAY, TABLEOP* or STRING record; found 0x%04x" % rc2)
# if DEBUG: print "gotstring:", gotstring
# now for the STRING record
if not gotstring:
rc2, _unused_len, data2 = bk.get_record_parts()
if rc2 not in (XL_STRING, XL_STRING_B2):
raise XLRDError("Expected STRING record; found 0x%04x" % rc2)
# if DEBUG: print "STRING: data=%r BIFF=%d cp=%d" % (data2, self.biff_version, bk.encoding)
strg = self.string_record_contents(data2)
self.put_cell(rowx, colx, XL_CELL_TEXT, strg, xf_index)
# if DEBUG: print "FORMULA strg %r" % strg
elif first_byte == 1:
# boolean formula result
value = BYTES_ORD(result_str[2])
self_put_cell(rowx, colx, XL_CELL_BOOLEAN, value, xf_index)
elif first_byte == 2:
# Error in cell
value = BYTES_ORD(result_str[2])
self_put_cell(rowx, colx, XL_CELL_ERROR, value, xf_index)
elif first_byte == 3:
# empty ... i.e. empty (zero-length) string, NOT an empty cell.
self_put_cell(rowx, colx, XL_CELL_TEXT, "", xf_index)
else:
raise XLRDError("unexpected special case (0x%02x) in FORMULA" % first_byte)
else:
# it is a number
d = local_unpack('<d', result_str)[0]
self_put_cell(rowx, colx, None, d, xf_index)
elif rc == XL_BOOLERR:
rowx, colx, xf_index, value, is_err = local_unpack('<HHHBB', data[:8])
# Note OOo Calc 2.0 writes 9-byte BOOLERR records.
# OOo docs say 8. Excel writes 8.
cellty = (XL_CELL_BOOLEAN, XL_CELL_ERROR)[is_err]
# if DEBUG: print "XL_BOOLERR", rowx, colx, xf_index, value, is_err
self_put_cell(rowx, colx, cellty, value, xf_index)
elif rc == XL_COLINFO:
if not fmt_info: continue
c = Colinfo()
first_colx, last_colx, c.width, c.xf_index, flags \
= local_unpack("<HHHHH", data[:10])
#### Colinfo.width is denominated in 256ths of a character,
#### *not* in characters.
if not(0 <= first_colx <= last_colx <= 256):
# Note: 256 instead of 255 is a common mistake.
# We silently ignore the non-existing 257th column in that case.
print("*** NOTE: COLINFO record has first col index %d, last %d; "
"should have 0 <= first <= last <= 255 -- record ignored!"
% (first_colx, last_colx), file=self.logfile)
del c
continue
upkbits(c, flags, (
( 0, 0x0001, 'hidden'),
( 1, 0x0002, 'bit1_flag'),
# *ALL* colinfos created by Excel in "default" cases are 0x0002!!
# Maybe it's "locked" by analogy with XFProtection data.
( 8, 0x0700, 'outline_level'),
(12, 0x1000, 'collapsed'),
))
for colx in xrange(first_colx, last_colx+1):
if colx > 255: break # Excel does 0 to 256 inclusive
self.colinfo_map[colx] = c
if 0:
fprintf(self.logfile,
"**COL %d %d %d\n",
self.number, colx, c.xf_index)
if blah:
fprintf(
self.logfile,
"COLINFO sheet #%d cols %d-%d: wid=%d xf_index=%d flags=0x%04x\n",
self.number, first_colx, last_colx, c.width, c.xf_index, flags,
)
c.dump(self.logfile, header='===')
elif rc == XL_DEFCOLWIDTH:
self.defcolwidth, = local_unpack("<H", data[:2])
if 0: print('DEFCOLWIDTH', self.defcolwidth, file=self.logfile)
elif rc == XL_STANDARDWIDTH:
if data_len != 2:
print('*** ERROR *** STANDARDWIDTH', data_len, repr(data), file=self.logfile)
self.standardwidth, = local_unpack("<H", data[:2])
if 0: print('STANDARDWIDTH', self.standardwidth, file=self.logfile)
elif rc == XL_GCW:
if not fmt_info: continue # useless w/o COLINFO
assert data_len == 34
assert data[0:2] == b"\x20\x00"
iguff = unpack("<8i", data[2:34])
gcw = []
for bits in iguff:
for j in xrange(32):
gcw.append(bits & 1)
bits >>= 1
self.gcw = tuple(gcw)
if 0:
showgcw = "".join(map(lambda x: "F "[x], gcw)).rstrip().replace(' ', '.')
print("GCW:", showgcw, file=self.logfile)
elif rc == XL_BLANK:
if not fmt_info: continue
rowx, colx, xf_index = local_unpack('<HHH', data[:6])
# if 0: print >> self.logfile, "BLANK", rowx, colx, xf_index
self_put_cell(rowx, colx, XL_CELL_BLANK, '', xf_index)
elif rc == XL_MULBLANK: # 00BE
if not fmt_info: continue
nitems = data_len >> 1
result = local_unpack("<%dH" % nitems, data)
rowx, mul_first = result[:2]
mul_last = result[-1]
# print >> self.logfile, "MULBLANK", rowx, mul_first, mul_last, data_len, nitems, mul_last + 4 - mul_first
assert nitems == mul_last + 4 - mul_first
pos = 2
for colx in xrange(mul_first, mul_last + 1):
self_put_cell(rowx, colx, XL_CELL_BLANK, '', result[pos])
pos += 1
elif rc == XL_DIMENSION or rc == XL_DIMENSION2:
if data_len == 0:
# Four zero bytes after some other record. See github issue 64.
continue
# if data_len == 10:
# Was crashing on BIFF 4.0 file w/o the two trailing unused bytes.
# Reported by Ralph Heimburger.
if bv < 80:
dim_tuple = local_unpack('<HxxH', data[2:8])
else:
dim_tuple = local_unpack('<ixxH', data[4:12])
self.nrows, self.ncols = 0, 0
self._dimnrows, self._dimncols = dim_tuple
if bv in (21, 30, 40) and self.book.xf_list and not self.book._xf_epilogue_done:
self.book.xf_epilogue()
if blah:
fprintf(
self.logfile,
"sheet %d(%r) DIMENSIONS: ncols=%d nrows=%d\n",
self.number, self.name, self._dimncols, self._dimnrows
)
elif rc == XL_HLINK:
self.handle_hlink(data)
elif rc == XL_QUICKTIP:
self.handle_quicktip(data)
elif rc == XL_EOF:
DEBUG = 0
if DEBUG: print("SHEET.READ: EOF", file=self.logfile)
eof_found = 1
break
elif rc == XL_OBJ:
# handle SHEET-level objects; note there's a separate Book.handle_obj
saved_obj = self.handle_obj(data)
if saved_obj: saved_obj_id = saved_obj.id
else: saved_obj_id = None
elif rc == XL_MSO_DRAWING:
self.handle_msodrawingetc(rc, data_len, data)
elif rc == XL_TXO:
txo = self.handle_txo(data)
if txo and saved_obj_id:
txos[saved_obj_id] = txo
saved_obj_id = None
elif rc == XL_NOTE:
self.handle_note(data, txos)
elif rc == XL_FEAT11:
self.handle_feat11(data)
elif rc in bofcodes: ##### EMBEDDED BOF #####
version, boftype = local_unpack('<HH', data[0:4])
if boftype != 0x20: # embedded chart
print("*** Unexpected embedded BOF (0x%04x) at offset %d: version=0x%04x type=0x%04x"
% (rc, bk._position - data_len - 4, version, boftype), file=self.logfile)
while 1:
code, data_len, data = bk.get_record_parts()
if code == XL_EOF:
break
if DEBUG: print("---> found EOF", file=self.logfile)
elif rc == XL_COUNTRY:
bk.handle_country(data)
elif rc == XL_LABELRANGES:
pos = 0
pos = unpack_cell_range_address_list_update_pos(
self.row_label_ranges, data, pos, bv, addr_size=8,
)
pos = unpack_cell_range_address_list_update_pos(
self.col_label_ranges, data, pos, bv, addr_size=8,
)
assert pos == data_len
elif rc == XL_ARRAY:
row1x, rownx, col1x, colnx, array_flags, tokslen = \
local_unpack("<HHBBBxxxxxH", data[:14])
if blah_formulas:
print("ARRAY:", row1x, rownx, col1x, colnx, array_flags, file=self.logfile)
# dump_formula(bk, data[14:], tokslen, bv, reldelta=0, blah=1)
elif rc == XL_SHRFMLA:
row1x, rownx, col1x, colnx, nfmlas, tokslen = \
local_unpack("<HHBBxBH", data[:10])
if blah_formulas:
print("SHRFMLA (main):", row1x, rownx, col1x, colnx, nfmlas, file=self.logfile)
decompile_formula(bk, data[10:], tokslen, FMLA_TYPE_SHARED,
blah=1, browx=rowx, bcolx=colx, r1c1=r1c1)
elif rc == XL_CONDFMT:
if not fmt_info: continue
assert bv >= 80
num_CFs, needs_recalc, browx1, browx2, bcolx1, bcolx2 = \
unpack("<6H", data[0:12])
if self.verbosity >= 1:
fprintf(
self.logfile,
"\n*** WARNING: Ignoring CONDFMT (conditional formatting) record\n"
"*** in Sheet %d (%r).\n"
"*** %d CF record(s); needs_recalc_or_redraw = %d\n"
"*** Bounding box is %s\n",
self.number, self.name, num_CFs, needs_recalc,
rangename2d(browx1, browx2+1, bcolx1, bcolx2+1),
)
olist = [] # updated by the function
pos = unpack_cell_range_address_list_update_pos(
olist, data, 12, bv, addr_size=8)
# print >> self.logfile, repr(result), len(result)
if self.verbosity >= 1:
fprintf(
self.logfile,
"*** %d individual range(s):\n"
"*** %s\n",
len(olist),
", ".join(rangename2d(*coords) for coords in olist),
)
elif rc == XL_CF:
if not fmt_info: continue
cf_type, cmp_op, sz1, sz2, flags = unpack("<BBHHi", data[0:10])
font_block = (flags >> 26) & 1
bord_block = (flags >> 28) & 1
patt_block = (flags >> 29) & 1
if self.verbosity >= 1:
fprintf(
self.logfile,
"\n*** WARNING: Ignoring CF (conditional formatting) sub-record.\n"
"*** cf_type=%d, cmp_op=%d, sz1=%d, sz2=%d, flags=0x%08x\n"
"*** optional data blocks: font=%d, border=%d, pattern=%d\n",
cf_type, cmp_op, sz1, sz2, flags,
font_block, bord_block, patt_block,
)
# hex_char_dump(data, 0, data_len, fout=self.logfile)
pos = 12
if font_block:
(font_height, font_options, weight, escapement, underline,
font_colour_index, two_bits, font_esc, font_underl) = unpack("<64x i i H H B 3x i 4x i i i 18x", data[pos:pos+118])
font_style = (two_bits > 1) & 1
posture = (font_options > 1) & 1
font_canc = (two_bits > 7) & 1
cancellation = (font_options > 7) & 1
if self.verbosity >= 1:
fprintf(
self.logfile,
"*** Font info: height=%d, weight=%d, escapement=%d,\n"
"*** underline=%d, colour_index=%d, esc=%d, underl=%d,\n"
"*** style=%d, posture=%d, canc=%d, cancellation=%d\n",
font_height, weight, escapement, underline,
font_colour_index, font_esc, font_underl,
font_style, posture, font_canc, cancellation,
)
pos += 118
if bord_block:
pos += 8
if patt_block:
pos += 4
fmla1 = data[pos:pos+sz1]
pos += sz1
if blah and sz1:
fprintf(self.logfile, "*** formula 1:\n")
dump_formula(bk, fmla1, sz1, bv, reldelta=0, blah=1)
fmla2 = data[pos:pos+sz2]
pos += sz2
assert pos == data_len
if blah and sz2:
fprintf(self.logfile, "*** formula 2:\n")
dump_formula(bk, fmla2, sz2, bv, reldelta=0, blah=1)
elif rc == XL_DEFAULTROWHEIGHT:
if data_len == 4:
bits, self.default_row_height = unpack("<HH", data[:4])
elif data_len == 2:
self.default_row_height, = unpack("<H", data)
bits = 0
fprintf(self.logfile,
"*** WARNING: DEFAULTROWHEIGHT record len is 2, "
"should be 4; assuming BIFF2 format\n")
else:
bits = 0
fprintf(self.logfile,
"*** WARNING: DEFAULTROWHEIGHT record len is %d, "
"should be 4; ignoring this record\n",
data_len)
self.default_row_height_mismatch = bits & 1
self.default_row_hidden = (bits >> 1) & 1
self.default_additional_space_above = (bits >> 2) & 1
self.default_additional_space_below = (bits >> 3) & 1
elif rc == XL_MERGEDCELLS:
if not fmt_info: continue
pos = unpack_cell_range_address_list_update_pos(
self.merged_cells, data, 0, bv, addr_size=8)
if blah:
fprintf(self.logfile,
"MERGEDCELLS: %d ranges\n", (pos - 2) // 8)
assert pos == data_len, \
"MERGEDCELLS: pos=%d data_len=%d" % (pos, data_len)
elif rc == XL_WINDOW2:
if bv >= 80 and data_len >= 14:
(
options,
self.first_visible_rowx, self.first_visible_colx,
self.gridline_colour_index,
self.cached_page_break_preview_mag_factor,
self.cached_normal_view_mag_factor
) = unpack("<HHHHxxHH", data[:14])
else:
assert bv >= 30 # BIFF3-7
(
options,
self.first_visible_rowx, self.first_visible_colx,
) = unpack("<HHH", data[:6])
self.gridline_colour_rgb = unpack("<BBB", data[6:9])
self.gridline_colour_index = nearest_colour_index(
self.book.colour_map, self.gridline_colour_rgb, debug=0)
# options -- Bit, Mask, Contents:
# 0 0001H 0 = Show formula results 1 = Show formulas
# 1 0002H 0 = Do not show grid lines 1 = Show grid lines
# 2 0004H 0 = Do not show sheet headers 1 = Show sheet headers
# 3 0008H 0 = Panes are not frozen 1 = Panes are frozen (freeze)
# 4 0010H 0 = Show zero values as empty cells 1 = Show zero values
# 5 0020H 0 = Manual grid line colour 1 = Automatic grid line colour
# 6 0040H 0 = Columns from left to right 1 = Columns from right to left
# 7 0080H 0 = Do not show outline symbols 1 = Show outline symbols
# 8 0100H 0 = Keep splits if pane freeze is removed 1 = Remove splits if pane freeze is removed
# 9 0200H 0 = Sheet not selected 1 = Sheet selected (BIFF5-BIFF8)
# 10 0400H 0 = Sheet not visible 1 = Sheet visible (BIFF5-BIFF8)
# 11 0800H 0 = Show in normal view 1 = Show in page break preview (BIFF8)
# The freeze flag specifies, if a following PANE record (6.71) describes unfrozen or frozen panes.
for attr, _unused_defval in _WINDOW2_options:
setattr(self, attr, options & 1)
options >>= 1
elif rc == XL_SCL:
num, den = unpack("<HH", data)
result = 0
if den:
result = (num * 100) // den
if not(10 <= result <= 400):
if DEBUG or self.verbosity >= 0:
print(
"WARNING *** SCL rcd sheet %d: should have 0.1 <= num/den <= 4; got %d/%d"
% (self.number, num, den),
file=self.logfile,
)
result = 100
self.scl_mag_factor = result
elif rc == XL_PANE:
(
self.vert_split_pos,
self.horz_split_pos,
self.horz_split_first_visible,
self.vert_split_first_visible,
self.split_active_pane,
) = unpack("<HHHHB", data[:9])
self.has_pane_record = 1
elif rc == XL_HORIZONTALPAGEBREAKS:
if not fmt_info: continue
num_breaks, = local_unpack("<H", data[:2])
assert num_breaks * (2 + 4 * (bv >= 80)) + 2 == data_len
pos = 2
if bv < 80:
while pos < data_len:
self.horizontal_page_breaks.append((local_unpack("<H", data[pos:pos+2])[0], 0, 255))
pos += 2
else:
while pos < data_len:
self.horizontal_page_breaks.append(local_unpack("<HHH", data[pos:pos+6]))
pos += 6
elif rc == XL_VERTICALPAGEBREAKS:
if not fmt_info: continue
num_breaks, = local_unpack("<H", data[:2])
assert num_breaks * (2 + 4 * (bv >= 80)) + 2 == data_len
pos = 2
if bv < 80:
while pos < data_len:
self.vertical_page_breaks.append((local_unpack("<H", data[pos:pos+2])[0], 0, 65535))
pos += 2
else:
while pos < data_len:
self.vertical_page_breaks.append(local_unpack("<HHH", data[pos:pos+6]))
pos += 6
#### all of the following are for BIFF <= 4W
elif bv <= 45:
if rc == XL_FORMAT or rc == XL_FORMAT2:
bk.handle_format(data, rc)
elif rc == XL_FONT or rc == XL_FONT_B3B4:
bk.handle_font(data)
elif rc == XL_STYLE:
if not self.book._xf_epilogue_done:
self.book.xf_epilogue()
bk.handle_style(data)
elif rc == XL_PALETTE:
bk.handle_palette(data)
elif rc == XL_BUILTINFMTCOUNT:
bk.handle_builtinfmtcount(data)
elif rc == XL_XF4 or rc == XL_XF3 or rc == XL_XF2: #### N.B. not XL_XF
bk.handle_xf(data)
elif rc == XL_DATEMODE:
bk.handle_datemode(data)
elif rc == XL_CODEPAGE:
bk.handle_codepage(data)
elif rc == XL_FILEPASS:
bk.handle_filepass(data)
elif rc == XL_WRITEACCESS:
bk.handle_writeaccess(data)
elif rc == XL_IXFE:
self._ixfe = local_unpack('<H', data)[0]
elif rc == XL_NUMBER_B2:
rowx, colx, cell_attr, d = local_unpack('<HH3sd', data)
self_put_cell(rowx, colx, None, d, self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_INTEGER:
rowx, colx, cell_attr, d = local_unpack('<HH3sH', data)
self_put_cell(rowx, colx, None, float(d), self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_LABEL_B2:
rowx, colx, cell_attr = local_unpack('<HH3s', data[0:7])
strg = unpack_string(data, 7, bk.encoding or bk.derive_encoding(), lenlen=1)
self_put_cell(rowx, colx, XL_CELL_TEXT, strg, self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_BOOLERR_B2:
rowx, colx, cell_attr, value, is_err = local_unpack('<HH3sBB', data)
cellty = (XL_CELL_BOOLEAN, XL_CELL_ERROR)[is_err]
# if DEBUG: print "XL_BOOLERR_B2", rowx, colx, cell_attr, value, is_err
self_put_cell(rowx, colx, cellty, value, self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_BLANK_B2:
if not fmt_info: continue
rowx, colx, cell_attr = local_unpack('<HH3s', data[:7])
self_put_cell(rowx, colx, XL_CELL_BLANK, '', self.fixed_BIFF2_xfindex(cell_attr, rowx, colx))
elif rc == XL_EFONT:
bk.handle_efont(data)
elif rc == XL_ROW_B2:
if not fmt_info: continue
rowx, bits1, bits2 = local_unpack('<H4xH2xB', data[0:11])
if not(0 <= rowx < self.utter_max_rows):
print("*** NOTE: ROW_B2 record has row index %d; "
"should have 0 <= rowx < %d -- record ignored!"
% (rowx, self.utter_max_rows), file=self.logfile)
continue
if not (bits2 & 1): # has_default_xf_index is false
xf_index = -1
elif data_len == 18:
# Seems the XF index in the cell_attr is dodgy
xfx = local_unpack('<H', data[16:18])[0]
xf_index = self.fixed_BIFF2_xfindex(cell_attr=None, rowx=rowx, colx=-1, true_xfx=xfx)
else:
cell_attr = data[13:16]
xf_index = self.fixed_BIFF2_xfindex(cell_attr, rowx, colx=-1)
key = (bits1, bits2, xf_index)
r = rowinfo_sharing_dict.get(key)
if r is None:
rowinfo_sharing_dict[key] = r = Rowinfo()
r.height = bits1 & 0x7fff
r.has_default_height = (bits1 >> 15) & 1
r.has_default_xf_index = bits2 & 1
r.xf_index = xf_index
# r.outline_level = 0 # set in __init__
# r.outline_group_starts_ends = 0 # set in __init__
# r.hidden = 0 # set in __init__
# r.height_mismatch = 0 # set in __init__
# r.additional_space_above = 0 # set in __init__
# r.additional_space_below = 0 # set in __init__
self.rowinfo_map[rowx] = r
if 0 and r.xf_index > -1:
fprintf(self.logfile,
"**ROW %d %d %d\n",
self.number, rowx, r.xf_index)
if blah_rows:
print('ROW_B2', rowx, bits1, file=self.logfile)
r.dump(self.logfile,
header="--- sh #%d, rowx=%d ---" % (self.number, rowx))
elif rc == XL_COLWIDTH: # BIFF2 only
if not fmt_info: continue
first_colx, last_colx, width\
= local_unpack("<BBH", data[:4])
if not(first_colx <= last_colx):
print("*** NOTE: COLWIDTH record has first col index %d, last %d; "
"should have first <= last -- record ignored!"
% (first_colx, last_colx), file=self.logfile)
continue
for colx in xrange(first_colx, last_colx+1):
if colx in self.colinfo_map:
c = self.colinfo_map[colx]
else:
c = Colinfo()
self.colinfo_map[colx] = c
c.width = width
if blah:
fprintf(
self.logfile,
"COLWIDTH sheet #%d cols %d-%d: wid=%d\n",
self.number, first_colx, last_colx, width,
)
elif rc == XL_COLUMNDEFAULT: # BIFF2 only
if not fmt_info: continue
first_colx, last_colx = local_unpack("<HH", data[:4])
#### Warning OOo docs wrong; first_colx <= colx < last_colx
if blah:
fprintf(
self.logfile,
"COLUMNDEFAULT sheet #%d cols in range(%d, %d)\n",
self.number, first_colx, last_colx,
)
if not(0 <= first_colx < last_colx <= 256):
print("*** NOTE: COLUMNDEFAULT record has first col index %d, last %d; "
"should have 0 <= first < last <= 256"
% (first_colx, last_colx), file=self.logfile)
last_colx = min(last_colx, 256)
for colx in xrange(first_colx, last_colx):
offset = 4 + 3 * (colx - first_colx)
cell_attr = data[offset:offset+3]
xf_index = self.fixed_BIFF2_xfindex(cell_attr, rowx=-1, colx=colx)
if colx in self.colinfo_map:
c = self.colinfo_map[colx]
else:
c = Colinfo()
self.colinfo_map[colx] = c
c.xf_index = xf_index
elif rc == XL_WINDOW2_B2: # BIFF 2 only
attr_names = ("show_formulas", "show_grid_lines", "show_sheet_headers",
"panes_are_frozen", "show_zero_values")
for attr, char in zip(attr_names, data[0:5]):
setattr(self, attr, int(char != b'\0'))
(
self.first_visible_rowx, self.first_visible_colx,
self.automatic_grid_line_colour,
) = unpack("<HHB", data[5:10])
self.gridline_colour_rgb = unpack("<BBB", data[10:13])
self.gridline_colour_index = nearest_colour_index(
self.book.colour_map, self.gridline_colour_rgb, debug=0)
else:
# if DEBUG: print "SHEET.READ: Unhandled record type %02x %d bytes %r" % (rc, data_len, data)
pass
if not eof_found:
raise XLRDError("Sheet %d (%r) missing EOF record"
% (self.number, self.name))
self.tidy_dimensions()
self.update_cooked_mag_factors()
bk._position = oldpos
return 1
def string_record_contents(self, data):
bv = self.biff_version
bk = self.book
lenlen = (bv >= 30) + 1
nchars_expected = unpack("<" + "BH"[lenlen - 1], data[:lenlen])[0]
offset = lenlen
if bv < 80:
enc = bk.encoding or bk.derive_encoding()
nchars_found = 0
result = UNICODE_LITERAL("")
while 1:
if bv >= 80:
flag = BYTES_ORD(data[offset]) & 1
enc = ("latin_1", "utf_16_le")[flag]
offset += 1
chunk = unicode(data[offset:], enc)
result += chunk
nchars_found += len(chunk)
if nchars_found == nchars_expected:
return result
if nchars_found > nchars_expected:
msg = ("STRING/CONTINUE: expected %d chars, found %d"
% (nchars_expected, nchars_found))
raise XLRDError(msg)
rc, _unused_len, data = bk.get_record_parts()
if rc != XL_CONTINUE:
raise XLRDError(
"Expected CONTINUE record; found record-type 0x%04X" % rc)
offset = 0
def update_cooked_mag_factors(self):
# Cached values are used ONLY for the non-active view mode.
# When the user switches to the non-active view mode,
# if the cached value for that mode is not valid,
# Excel pops up a window which says:
# "The number must be between 10 and 400. Try again by entering a number in this range."
# When the user hits OK, it drops into the non-active view mode
# but uses the magn from the active mode.
# NOTE: definition of "valid" depends on mode ... see below
blah = DEBUG or self.verbosity > 0
if self.show_in_page_break_preview:
if self.scl_mag_factor is None: # no SCL record
self.cooked_page_break_preview_mag_factor = 100 # Yes, 100, not 60, NOT a typo
else:
self.cooked_page_break_preview_mag_factor = self.scl_mag_factor
zoom = self.cached_normal_view_mag_factor
if not (10 <= zoom <=400):
if blah:
print(
"WARNING *** WINDOW2 rcd sheet %d: Bad cached_normal_view_mag_factor: %d"
% (self.number, self.cached_normal_view_mag_factor),
file=self.logfile,
)
zoom = self.cooked_page_break_preview_mag_factor
self.cooked_normal_view_mag_factor = zoom
else:
# normal view mode
if self.scl_mag_factor is None: # no SCL record
self.cooked_normal_view_mag_factor = 100
else:
self.cooked_normal_view_mag_factor = self.scl_mag_factor
zoom = self.cached_page_break_preview_mag_factor
if not zoom:
# VALID, defaults to 60
zoom = 60
elif not (10 <= zoom <= 400):
if blah:
print(
"WARNING *** WINDOW2 rcd sheet %r: Bad cached_page_break_preview_mag_factor: %r"
% (self.number, self.cached_page_break_preview_mag_factor),
file=self.logfile,
)
zoom = self.cooked_normal_view_mag_factor
self.cooked_page_break_preview_mag_factor = zoom
def fixed_BIFF2_xfindex(self, cell_attr, rowx, colx, true_xfx=None):
DEBUG = 0
blah = DEBUG or self.verbosity >= 2
if self.biff_version == 21:
if self.book.xf_list:
if true_xfx is not None:
xfx = true_xfx
else:
xfx = BYTES_ORD(cell_attr[0]) & 0x3F
if xfx == 0x3F:
if self._ixfe is None:
raise XLRDError("BIFF2 cell record has XF index 63 but no preceding IXFE record.")
xfx = self._ixfe
# OOo docs are capable of interpretation that each
# cell record is preceded immediately by its own IXFE record.
# Empirical evidence is that (sensibly) an IXFE record applies to all
# following cell records until another IXFE comes along.
return xfx
# Have either Excel 2.0, or broken 2.1 w/o XF records -- same effect.
self.biff_version = self.book.biff_version = 20
#### check that XF slot in cell_attr is zero
xfx_slot = BYTES_ORD(cell_attr[0]) & 0x3F
assert xfx_slot == 0
xfx = self._cell_attr_to_xfx.get(cell_attr)
if xfx is not None:
return xfx
if blah:
fprintf(self.logfile, "New cell_attr %r at (%r, %r)\n", cell_attr, rowx, colx)
if not self.book.xf_list:
for xfx in xrange(16):
self.insert_new_BIFF20_xf(cell_attr=b"\x40\x00\x00", style=xfx < 15)
xfx = self.insert_new_BIFF20_xf(cell_attr=cell_attr)
return xfx
def insert_new_BIFF20_xf(self, cell_attr, style=0):
DEBUG = 0
blah = DEBUG or self.verbosity >= 2
book = self.book
xfx = len(book.xf_list)
xf = self.fake_XF_from_BIFF20_cell_attr(cell_attr, style)
xf.xf_index = xfx
book.xf_list.append(xf)
if blah:
xf.dump(self.logfile, header="=== Faked XF %d ===" % xfx, footer="======")
if xf.format_key not in book.format_map:
if xf.format_key:
msg = "ERROR *** XF[%d] unknown format key (%d, 0x%04x)\n"
fprintf(self.logfile, msg,
xf.xf_index, xf.format_key, xf.format_key)
fmt = Format(xf.format_key, FUN, UNICODE_LITERAL("General"))
book.format_map[xf.format_key] = fmt
book.format_list.append(fmt)
cellty_from_fmtty = {
FNU: XL_CELL_NUMBER,
FUN: XL_CELL_NUMBER,
FGE: XL_CELL_NUMBER,
FDT: XL_CELL_DATE,
FTX: XL_CELL_NUMBER, # Yes, a number can be formatted as text.
}
fmt = book.format_map[xf.format_key]
cellty = cellty_from_fmtty[fmt.type]
self._xf_index_to_xl_type_map[xf.xf_index] = cellty
self._cell_attr_to_xfx[cell_attr] = xfx
return xfx
def fake_XF_from_BIFF20_cell_attr(self, cell_attr, style=0):
from .formatting import XF, XFAlignment, XFBorder, XFBackground, XFProtection
xf = XF()
xf.alignment = XFAlignment()
xf.alignment.indent_level = 0
xf.alignment.shrink_to_fit = 0
xf.alignment.text_direction = 0
xf.border = XFBorder()
xf.border.diag_up = 0
xf.border.diag_down = 0
xf.border.diag_colour_index = 0
xf.border.diag_line_style = 0 # no line
xf.background = XFBackground()
xf.protection = XFProtection()
(prot_bits, font_and_format, halign_etc) = unpack('<BBB', cell_attr)
xf.format_key = font_and_format & 0x3F
xf.font_index = (font_and_format & 0xC0) >> 6
upkbits(xf.protection, prot_bits, (
(6, 0x40, 'cell_locked'),
(7, 0x80, 'formula_hidden'),
))
xf.alignment.hor_align = halign_etc & 0x07
for mask, side in ((0x08, 'left'), (0x10, 'right'), (0x20, 'top'), (0x40, 'bottom')):
if halign_etc & mask:
colour_index, line_style = 8, 1 # black, thin
else:
colour_index, line_style = 0, 0 # none, none
setattr(xf.border, side + '_colour_index', colour_index)
setattr(xf.border, side + '_line_style', line_style)
bg = xf.background
if halign_etc & 0x80:
bg.fill_pattern = 17
else:
bg.fill_pattern = 0
bg.background_colour_index = 9 # white
bg.pattern_colour_index = 8 # black
xf.parent_style_index = (0x0FFF, 0)[style]
xf.alignment.vert_align = 2 # bottom
xf.alignment.rotation = 0
attr_stems = [
'format',
'font',
'alignment',
'border',
'background',
'protection',
]
for attr_stem in attr_stems:
attr = "_" + attr_stem + "_flag"
setattr(xf, attr, 1)
return xf
def req_fmt_info(self):
if not self.formatting_info:
raise XLRDError("Feature requires open_workbook(..., formatting_info=True)")
def computed_column_width(self, colx):
"""
Determine column display width.
:param colx:
Index of the queried column, range 0 to 255.
Note that it is possible to find out the width that will be used to
display columns with no cell information e.g. column IV (colx=255).
:return:
The column width that will be used for displaying
the given column by Excel, in units of 1/256th of the width of a
standard character (the digit zero in the first font).
.. versionadded:: 0.6.1
"""
self.req_fmt_info()
if self.biff_version >= 80:
colinfo = self.colinfo_map.get(colx, None)
if colinfo is not None:
return colinfo.width
if self.standardwidth is not None:
return self.standardwidth
elif self.biff_version >= 40:
if self.gcw[colx]:
if self.standardwidth is not None:
return self.standardwidth
else:
colinfo = self.colinfo_map.get(colx, None)
if colinfo is not None:
return colinfo.width
elif self.biff_version == 30:
colinfo = self.colinfo_map.get(colx, None)
if colinfo is not None:
return colinfo.width
# All roads lead to Rome and the DEFCOLWIDTH ...
if self.defcolwidth is not None:
return self.defcolwidth * 256
return 8 * 256 # 8 is what Excel puts in a DEFCOLWIDTH record
def handle_hlink(self, data):
# DEBUG = 1
if DEBUG: print("\n=== hyperlink ===", file=self.logfile)
record_size = len(data)
h = Hyperlink()
h.frowx, h.lrowx, h.fcolx, h.lcolx, guid0, dummy, options = unpack('<HHHH16s4si', data[:32])
assert guid0 == b"\xD0\xC9\xEA\x79\xF9\xBA\xCE\x11\x8C\x82\x00\xAA\x00\x4B\xA9\x0B"
assert dummy == b"\x02\x00\x00\x00"
if DEBUG: print("options: %08X" % options, file=self.logfile)
offset = 32
def get_nul_terminated_unicode(buf, ofs):
nb = unpack('<L', buf[ofs:ofs+4])[0] * 2
ofs += 4
uc = unicode(buf[ofs:ofs+nb], 'UTF-16le')[:-1]
ofs += nb
return uc, ofs
if options & 0x14: # has a description
h.desc, offset = get_nul_terminated_unicode(data, offset)
if options & 0x80: # has a target
h.target, offset = get_nul_terminated_unicode(data, offset)
if (options & 1) and not (options & 0x100): # HasMoniker and not MonikerSavedAsString
# an OLEMoniker structure
clsid, = unpack('<16s', data[offset:offset + 16])
if DEBUG: fprintf(self.logfile, "clsid=%r\n", clsid)
offset += 16
if clsid == b"\xE0\xC9\xEA\x79\xF9\xBA\xCE\x11\x8C\x82\x00\xAA\x00\x4B\xA9\x0B":
# E0H C9H EAH 79H F9H BAH CEH 11H 8CH 82H 00H AAH 00H 4BH A9H 0BH
# URL Moniker
h.type = UNICODE_LITERAL('url')
nbytes = unpack('<L', data[offset:offset + 4])[0]
offset += 4
h.url_or_path = unicode(data[offset:offset + nbytes], 'UTF-16le')
if DEBUG: fprintf(self.logfile, "initial url=%r len=%d\n", h.url_or_path, len(h.url_or_path))
endpos = h.url_or_path.find('\x00')
if DEBUG: print("endpos=%d" % endpos, file=self.logfile)
h.url_or_path = h.url_or_path[:endpos]
true_nbytes = 2 * (endpos + 1)
offset += true_nbytes
extra_nbytes = nbytes - true_nbytes
extra_data = data[offset:offset + extra_nbytes]
offset += extra_nbytes
if DEBUG:
fprintf(
self.logfile,
"url=%r\nextra=%r\nnbytes=%d true_nbytes=%d extra_nbytes=%d\n",
h.url_or_path, extra_data, nbytes, true_nbytes, extra_nbytes,
)
assert extra_nbytes in (24, 0)
elif clsid == b"\x03\x03\x00\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46":
# file moniker
h.type = UNICODE_LITERAL('local file')
uplevels, nbytes = unpack("<Hi", data[offset:offset + 6])
offset += 6
shortpath = b"..\\" * uplevels + data[offset:offset + nbytes - 1] #### BYTES, not unicode
if DEBUG: fprintf(self.logfile, "uplevels=%d shortpath=%r\n", uplevels, shortpath)
offset += nbytes
offset += 24 # OOo: "unknown byte sequence"
# above is version 0xDEAD + 20 reserved zero bytes
sz = unpack('<i', data[offset:offset + 4])[0]
if DEBUG: print("sz=%d" % sz, file=self.logfile)
offset += 4
if sz:
xl = unpack('<i', data[offset:offset + 4])[0]
offset += 4
offset += 2 # "unknown byte sequence" MS: 0x0003
extended_path = unicode(data[offset:offset + xl], 'UTF-16le') # not zero-terminated
offset += xl
h.url_or_path = extended_path
else:
h.url_or_path = shortpath
#### MS KLUDGE WARNING ####
# The "shortpath" is bytes encoded in the **UNKNOWN** creator's "ANSI" encoding.
else:
fprintf(self.logfile, "*** unknown clsid %r\n", clsid)
elif options & 0x163 == 0x103: # UNC
h.type = UNICODE_LITERAL('unc')
h.url_or_path, offset = get_nul_terminated_unicode(data, offset)
elif options & 0x16B == 8:
h.type = UNICODE_LITERAL('workbook')
else:
h.type = UNICODE_LITERAL('unknown')
if options & 0x8: # has textmark
h.textmark, offset = get_nul_terminated_unicode(data, offset)
if DEBUG:
h.dump(header="... object dump ...")
print("offset=%d record_size=%d" % (offset, record_size))
extra_nbytes = record_size - offset
if extra_nbytes > 0:
fprintf(
self.logfile,
"*** WARNING: hyperlink at R%dC%d has %d extra data bytes: %s\n",
h.frowx + 1,
h.fcolx + 1,
extra_nbytes,
REPR(data[-extra_nbytes:]),
)
# Seen: b"\x00\x00" also b"A\x00", b"V\x00"
elif extra_nbytes < 0:
raise XLRDError("Bug or corrupt file, send copy of input file for debugging")
self.hyperlink_list.append(h)
for rowx in xrange(h.frowx, h.lrowx+1):
for colx in xrange(h.fcolx, h.lcolx+1):
self.hyperlink_map[rowx, colx] = h
def handle_quicktip(self, data):
rcx, frowx, lrowx, fcolx, lcolx = unpack('<5H', data[:10])
assert rcx == XL_QUICKTIP
assert self.hyperlink_list
h = self.hyperlink_list[-1]
assert (frowx, lrowx, fcolx, lcolx) == (h.frowx, h.lrowx, h.fcolx, h.lcolx)
assert data[-2:] == b'\x00\x00'
h.quicktip = unicode(data[10:-2], 'utf_16_le')
def handle_msodrawingetc(self, recid, data_len, data):
if not OBJ_MSO_DEBUG:
return
DEBUG = 1
if self.biff_version < 80:
return
o = MSODrawing()
pos = 0
while pos < data_len:
tmp, fbt, cb = unpack('<HHI', data[pos:pos+8])
ver = tmp & 0xF
inst = (tmp >> 4) & 0xFFF
if ver == 0xF:
ndb = 0 # container
else:
ndb = cb
if DEBUG:
hex_char_dump(data, pos, ndb + 8, base=0, fout=self.logfile)
fprintf(self.logfile,
"fbt:0x%04X inst:%d ver:0x%X cb:%d (0x%04X)\n",
fbt, inst, ver, cb, cb)
if fbt == 0xF010: # Client Anchor
assert ndb == 18
(o.anchor_unk,
o.anchor_colx_lo, o.anchor_rowx_lo,
o.anchor_colx_hi, o.anchor_rowx_hi) = unpack('<Hiiii', data[pos+8:pos+8+ndb])
elif fbt == 0xF011: # Client Data
# must be followed by an OBJ record
assert cb == 0
assert pos + 8 == data_len
else:
pass
pos += ndb + 8
else:
# didn't break out of while loop
assert pos == data_len
if DEBUG:
o.dump(self.logfile, header="=== MSODrawing ===", footer= " ")
def handle_obj(self, data):
if self.biff_version < 80:
return None
o = MSObj()
data_len = len(data)
pos = 0
if OBJ_MSO_DEBUG:
fprintf(self.logfile, "... OBJ record len=%d...\n", data_len)
while pos < data_len:
ft, cb = unpack('<HH', data[pos:pos+4])
if OBJ_MSO_DEBUG:
fprintf(self.logfile, "pos=%d ft=0x%04X cb=%d\n", pos, ft, cb)
hex_char_dump(data, pos, cb + 4, base=0, fout=self.logfile)
if pos == 0 and not (ft == 0x15 and cb == 18):
if self.verbosity:
fprintf(self.logfile, "*** WARNING Ignoring antique or corrupt OBJECT record\n")
return None
if ft == 0x15: # ftCmo ... s/b first
assert pos == 0
o.type, o.id, option_flags = unpack('<HHH', data[pos+4:pos+10])
upkbits(o, option_flags, (
( 0, 0x0001, 'locked'),
( 4, 0x0010, 'printable'),
( 8, 0x0100, 'autofilter'), # not documented in Excel 97 dev kit
( 9, 0x0200, 'scrollbar_flag'), # not documented in Excel 97 dev kit
(13, 0x2000, 'autofill'),
(14, 0x4000, 'autoline'),
))
elif ft == 0x00:
if data[pos:data_len] == b'\0' * (data_len - pos):
# ignore "optional reserved" data at end of record
break
msg = "Unexpected data at end of OBJECT record"
fprintf(self.logfile, "*** ERROR %s\n" % msg)
hex_char_dump(data, pos, data_len - pos, base=0, fout=self.logfile)
raise XLRDError(msg)
elif ft == 0x0C: # Scrollbar
values = unpack('<5H', data[pos+8:pos+18])
for value, tag in zip(values, ('value', 'min', 'max', 'inc', 'page')):
setattr(o, 'scrollbar_' + tag, value)
elif ft == 0x0D: # "Notes structure" [used for cell comments]
# not documented in Excel 97 dev kit
if OBJ_MSO_DEBUG: fprintf(self.logfile, "*** OBJ record has ft==0x0D 'notes' structure\n")
elif ft == 0x13: # list box data
if o.autofilter: # non standard exit. NOT documented
break
else:
pass
pos += cb + 4
else:
# didn't break out of while loop
pass
if OBJ_MSO_DEBUG:
o.dump(self.logfile, header="=== MSOBj ===", footer= " ")
return o
def handle_note(self, data, txos):
if OBJ_MSO_DEBUG:
fprintf(self.logfile, '... NOTE record ...\n')
hex_char_dump(data, 0, len(data), base=0, fout=self.logfile)
o = Note()
data_len = len(data)
if self.biff_version < 80:
o.rowx, o.colx, expected_bytes = unpack('<HHH', data[:6])
nb = len(data) - 6
assert nb <= expected_bytes
pieces = [data[6:]]
expected_bytes -= nb
while expected_bytes > 0:
rc2, data2_len, data2 = self.book.get_record_parts()
assert rc2 == XL_NOTE
dummy_rowx, nb = unpack('<H2xH', data2[:6])
assert dummy_rowx == 0xFFFF
assert nb == data2_len - 6
pieces.append(data2[6:])
expected_bytes -= nb
assert expected_bytes == 0
enc = self.book.encoding or self.book.derive_encoding()
o.text = unicode(b''.join(pieces), enc)
o.rich_text_runlist = [(0, 0)]
o.show = 0
o.row_hidden = 0
o.col_hidden = 0
o.author = UNICODE_LITERAL('')
o._object_id = None
self.cell_note_map[o.rowx, o.colx] = o
return
# Excel 8.0+
o.rowx, o.colx, option_flags, o._object_id = unpack('<4H', data[:8])
o.show = (option_flags >> 1) & 1
o.row_hidden = (option_flags >> 7) & 1
o.col_hidden = (option_flags >> 8) & 1
# XL97 dev kit book says NULL [sic] bytes padding between string count and string data
# to ensure that string is word-aligned. Appears to be nonsense.
o.author, endpos = unpack_unicode_update_pos(data, 8, lenlen=2)
# There is a random/undefined byte after the author string (not counted in the
# string length).
# Issue 4 on github: Google Spreadsheet doesn't write the undefined byte.
assert (data_len - endpos) in (0, 1)
if OBJ_MSO_DEBUG:
o.dump(self.logfile, header="=== Note ===", footer= " ")
txo = txos.get(o._object_id)
if txo:
o.text = txo.text
o.rich_text_runlist = txo.rich_text_runlist
self.cell_note_map[o.rowx, o.colx] = o
def handle_txo(self, data):
if self.biff_version < 80:
return
o = MSTxo()
fmt = '<HH6sHHH'
fmtsize = calcsize(fmt)
option_flags, o.rot, controlInfo, cchText, cbRuns, o.ifntEmpty = unpack(fmt, data[:fmtsize])
o.fmla = data[fmtsize:]
upkbits(o, option_flags, (
( 3, 0x000E, 'horz_align'),
( 6, 0x0070, 'vert_align'),
( 9, 0x0200, 'lock_text'),
(14, 0x4000, 'just_last'),
(15, 0x8000, 'secret_edit'),
))
totchars = 0
o.text = UNICODE_LITERAL('')
while totchars < cchText:
rc2, data2_len, data2 = self.book.get_record_parts()
assert rc2 == XL_CONTINUE
if OBJ_MSO_DEBUG:
hex_char_dump(data2, 0, data2_len, base=0, fout=self.logfile)
nb = BYTES_ORD(data2[0]) # 0 means latin1, 1 means utf_16_le
nchars = data2_len - 1
if nb:
assert nchars % 2 == 0
nchars //= 2
utext, endpos = unpack_unicode_update_pos(data2, 0, known_len=nchars)
assert endpos == data2_len
o.text += utext
totchars += nchars
o.rich_text_runlist = []
totruns = 0
while totruns < cbRuns: # counts of BYTES, not runs
rc3, data3_len, data3 = self.book.get_record_parts()
# print totruns, cbRuns, rc3, data3_len, repr(data3)
assert rc3 == XL_CONTINUE
assert data3_len % 8 == 0
for pos in xrange(0, data3_len, 8):
run = unpack('<HH4x', data3[pos:pos+8])
o.rich_text_runlist.append(run)
totruns += 8
# remove trailing entries that point to the end of the string
while o.rich_text_runlist and o.rich_text_runlist[-1][0] == cchText:
del o.rich_text_runlist[-1]
if OBJ_MSO_DEBUG:
o.dump(self.logfile, header="=== MSTxo ===", footer= " ")
print(o.rich_text_runlist, file=self.logfile)
return o
def handle_feat11(self, data):
if not OBJ_MSO_DEBUG:
return
# rt: Record type; this matches the BIFF rt in the first two bytes of the record; =0872h
# grbitFrt: FRT cell reference flag (see table below for details)
# Ref0: Range reference to a worksheet cell region if grbitFrt=1 (bitFrtRef). Otherwise blank.
# isf: Shared feature type index =5 for Table
# fHdr: =0 since this is for feat not feat header
# reserved0: Reserved for future use =0 for Table
# cref: Count of ref ranges this feature is on
# cbFeatData: Count of byte for the current feature data.
# reserved1: =0 currently not used
# Ref1: Repeat of Ref0. UNDOCUMENTED
rt, grbitFrt, Ref0, isf, fHdr, reserved0, cref, cbFeatData, reserved1, Ref1 = unpack('<HH8sHBiHiH8s', data[0:35])
assert reserved0 == 0
assert reserved1 == 0
assert isf == 5
assert rt == 0x872
assert fHdr == 0
assert Ref1 == Ref0
print(self.logfile, "FEAT11: grbitFrt=%d Ref0=%r cref=%d cbFeatData=%d\n", grbitFrt, Ref0, cref, cbFeatData)
# lt: Table data source type:
# =0 for Excel Worksheet Table =1 for read-write SharePoint linked List
# =2 for XML mapper Table =3 for Query Table
# idList: The ID of the Table (unique per worksheet)
# crwHeader: How many header/title rows the Table has at the top
# crwTotals: How many total rows the Table has at the bottom
# idFieldNext: Next id to try when assigning a unique id to a new field
# cbFSData: The size of the Fixed Data portion of the Table data structure.
# rupBuild: the rupBuild that generated the record
# unusedShort: UNUSED short that can be used later. The value is reserved during round-tripping.
# listFlags: Collection of bit flags: (see listFlags' bit setting table below for detail.)
# lPosStmCache: Table data stream position of cached data
# cbStmCache: Count of bytes of cached data
# cchStmCache: Count of characters of uncompressed cached data in the stream
# lem: Table edit mode (see List (Table) Editing Mode (lem) setting table below for details.)
# rgbHashParam: Hash value for SharePoint Table
# cchName: Count of characters in the Table name string rgbName
(lt, idList, crwHeader, crwTotals, idFieldNext, cbFSData,
rupBuild, unusedShort, listFlags, lPosStmCache, cbStmCache,
cchStmCache, lem, rgbHashParam, cchName) = unpack('<iiiiiiHHiiiii16sH', data[35:35+66])
print("lt=%d idList=%d crwHeader=%d crwTotals=%d idFieldNext=%d cbFSData=%d\n"
"rupBuild=%d unusedShort=%d listFlags=%04X lPosStmCache=%d cbStmCache=%d\n"
"cchStmCache=%d lem=%d rgbHashParam=%r cchName=%d" % (
lt, idList, crwHeader, crwTotals, idFieldNext, cbFSData,
rupBuild, unusedShort,listFlags, lPosStmCache, cbStmCache,
cchStmCache, lem, rgbHashParam, cchName), file=self.logfile)
def __repr__(self):
return "Sheet {:>2}:<{}>".format(self.number, self.name)
|
Sheet
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/cover/test_settings.py
|
{
"start": 8997,
"end": 11362
}
|
class ____(TestCase):
"""Real nasty edge case here.
in #2160, if ``example`` is after ``given`` but before ``settings``,
it will be completely ignored.
If we set phases to only ``explicit``, the test case will never be called!
We have to run an assertion outside of the test case itself.
"""
@counts_calls
def call_target(self):
pass
@given(st.booleans())
@example(True)
@settings(phases=[Phase.explicit])
# counts_calls is not thread safe (modifying global f.calls attr)
@skipif_threading
def test_example_explicit(self, x):
self.call_target()
def tearDown(self):
# In #2160, this is 0.
assert self.call_target.calls == 1
def test_setattr_on_settings_singleton_is_error():
# https://github.com/pandas-dev/pandas/pull/22679#issuecomment-420750921
# Should be setting attributes on settings.default, not settings!
with pytest.raises(AttributeError):
settings.max_examples = 10
def test_deadline_given_none():
x = settings(deadline=None).deadline
assert x is None
def test_deadline_given_valid_int():
x = settings(deadline=1000).deadline
assert isinstance(x, datetime.timedelta)
assert x.days == 0
assert x.seconds == 1
assert x.microseconds == 0
def test_deadline_given_valid_float():
x = settings(deadline=2050.25).deadline
assert isinstance(x, datetime.timedelta)
assert x.days == 0
assert x.seconds == 2
assert x.microseconds == 50250
def test_deadline_given_valid_timedelta():
x = settings(deadline=datetime.timedelta(days=1, microseconds=15030000)).deadline
assert isinstance(x, datetime.timedelta)
assert x.days == 1
assert x.seconds == 15
assert x.microseconds == 30000
@pytest.mark.parametrize(
"x",
[
0,
-0.7,
-1,
86400000000000000.2,
datetime.timedelta(microseconds=-1),
datetime.timedelta(0),
],
)
def test_invalid_deadline(x):
with pytest.raises(InvalidArgument):
settings(deadline=x)
@pytest.mark.parametrize("value", ["always"])
def test_can_not_set_print_blob_to_non_print_settings(value):
with pytest.raises(InvalidArgument):
settings(print_blob=value)
settings_step_count = 1
@settings(stateful_step_count=settings_step_count)
|
TestGivenExampleSettingsExplicitCalled
|
python
|
walkccc__LeetCode
|
solutions/2847. Smallest Number With Given Digit Product/2847.py
|
{
"start": 0,
"end": 284
}
|
class ____:
def smallestNumber(self, n: int) -> str:
if n <= 9:
return str(n)
ans = []
for divisor in range(9, 1, -1):
while n % divisor == 0:
ans.append(str(divisor))
n //= divisor
return '-1' if n > 1 else ''.join(reversed(ans))
|
Solution
|
python
|
google__pytype
|
pytype/overlays/named_tuple.py
|
{
"start": 20276,
"end": 28195
}
|
class ____(abstract.InterpreterClass):
"""Named tuple classes."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Store the original properties, to output to pyi files.
self.props = None
self.generated_members = None
def instantiate(self, node, container=None):
# For all generic typevars T, add a type parameter alias between cls.T and
# path.to.module.cls.T
inst = super().instantiate(node, container)
for ival in inst.data:
cls = ival.cls
long = cls.full_name
for t in cls.template:
short = t.scope
param = t.name
ival.instance_type_parameters.add_alias(
f"{short}.{param}", f"{long}.{param}", lambda x, y, z: x or y
)
return inst
def _build_namedtuple(props, node, ctx):
"""Build an InterpreterClass representing the namedtuple."""
# TODO(mdemello): Fix this to support late types.
if props.fields and props.fields[0].typ:
field_types_union = abstract.Union([f.typ for f in props.fields], ctx)
else:
field_types_union = ctx.convert.unsolvable
members = {f.name: f.typ.instantiate(node) for f in props.fields}
# NOTE: We add the full list of private methods to all namedtuples.
# Technically collections.namedtuple has a smaller set.
# collections.namedtuple has: __dict__, __slots__ and _fields.
# typing.NamedTuple adds: _field_types, __annotations__ and _field_defaults.
# __slots__ and _fields are tuples containing the names of the fields.
slots = tuple(ctx.convert.build_string(node, f.name) for f in props.fields)
members["__slots__"] = ctx.convert.build_tuple(node, slots)
members["_fields"] = ctx.convert.build_tuple(node, slots)
odict = _DictBuilder(ctx)
# __dict__ and _field_defaults are both dicts of
# { field_name: field_type_instance }
# The field types may refer back to the class being built.
with ctx.allow_recursive_convert():
field_dict_cls = odict.make(field_types_union)
members["__dict__"] = field_dict_cls.instantiate(node)
members["_field_defaults"] = field_dict_cls.instantiate(node)
# _field_types and __annotations__ are both dicts of
# { field_name: field_type }
# Note that ctx.make_class will take care of adding the __annotations__
# member.
field_types_cls = odict.make(ctx.convert.type_type)
members["_field_types"] = field_types_cls.instantiate(node)
# __new__
# We set the bound on this TypeParameter later. This gives __new__ the
# signature: def __new__(cls: Type[_Tname], ...) -> _Tname, i.e. the same
# signature that visitor.CreateTypeParametersForSignatures would create.
# This allows subclasses of the NamedTuple to get the correct type from
# their constructors.
# The TypeParameter name is built from the class name and field names to avoid
# name clashes with other namedtuples.
cls_type_param_name = (
visitors.CreateTypeParametersForSignatures.PREFIX
+ escape.pack_namedtuple(props.name, [f.name for f in props.fields])
)
cls_type_param = abstract.TypeParameter(cls_type_param_name, ctx, bound=None)
cls_type = abstract.ParameterizedClass(
ctx.convert.type_type, {abstract_utils.T: cls_type_param}, ctx
)
params = [Param(f.name, f.typ) for f in props.fields]
# The parameter types may refer back to the class being built.
with ctx.allow_recursive_convert():
members["__new__"] = overlay_utils.make_method(
ctx,
node,
name="__new__",
self_param=Param("cls", cls_type),
params=params,
return_type=cls_type_param,
)
# __init__
members["__init__"] = overlay_utils.make_method(
ctx, node, name="__init__", varargs=Param("args"), kwargs=Param("kwargs")
)
heterogeneous_tuple_type_params = dict(enumerate(f.typ for f in props.fields))
heterogeneous_tuple_type_params[abstract_utils.T] = field_types_union
# Representation of the to-be-created NamedTuple as a typing.Tuple.
heterogeneous_tuple_type = abstract.TupleClass(
ctx.convert.tuple_type, heterogeneous_tuple_type_params, ctx
)
# _make
# _make is a classmethod, so it needs to be wrapped by
# special_builtins.ClassMethodInstance.
# Like __new__, it uses the _Tname TypeVar.
sized_cls = ctx.convert.lookup_value("typing", "Sized")
iterable_type = abstract.ParameterizedClass(
ctx.convert.lookup_value("typing", "Iterable"),
{abstract_utils.T: field_types_union},
ctx,
)
cls_type = abstract.ParameterizedClass(
ctx.convert.type_type, {abstract_utils.T: cls_type_param}, ctx
)
len_type = abstract.CallableClass(
ctx.convert.lookup_value("typing", "Callable"),
{
0: sized_cls,
abstract_utils.ARGS: sized_cls,
abstract_utils.RET: ctx.convert.int_type,
},
ctx,
)
params = [
Param("iterable", iterable_type),
Param("new").unsolvable(ctx, node),
Param("len", len_type).unsolvable(ctx, node),
]
make = overlay_utils.make_method(
ctx,
node,
name="_make",
params=params,
self_param=Param("cls", cls_type),
return_type=cls_type_param,
)
make_args = function.Args(posargs=(make,))
_, members["_make"] = ctx.special_builtins["classmethod"].call(
node, None, make_args
)
# _replace
# Like __new__, it uses the _Tname TypeVar. We have to annotate the `self`
# param to make sure the TypeVar is substituted correctly.
members["_replace"] = overlay_utils.make_method(
ctx,
node,
name="_replace",
self_param=Param("self", cls_type_param),
return_type=cls_type_param,
kwargs=Param("kwds", field_types_union),
)
# __getnewargs__
members["__getnewargs__"] = overlay_utils.make_method(
ctx, node, name="__getnewargs__", return_type=heterogeneous_tuple_type
)
# __getstate__
members["__getstate__"] = overlay_utils.make_method(
ctx, node, name="__getstate__"
)
# _asdict
members["_asdict"] = overlay_utils.make_method(
ctx, node, name="_asdict", return_type=field_dict_cls
)
# Finally, make the class.
cls_dict = abstract.Dict(ctx)
cls_dict.update(node, members)
# Enforces type checking like Tuple[...]
superclass_of_new_type = heterogeneous_tuple_type.to_variable(node)
if props.bases:
final_bases = []
for base in props.bases:
if any(b.full_name == "typing.NamedTuple" for b in base.data):
final_bases.append(superclass_of_new_type)
else:
final_bases.append(base)
else:
final_bases = [superclass_of_new_type]
# This NamedTuple is being created via a function call. We manually
# construct an annotated_locals entry for it so that __annotations__ is
# initialized properly for the generated class.
ctx.vm.annotated_locals[props.name] = {
f.name: abstract_utils.Local(node, None, f.typ, None, ctx)
for f in props.fields
}
cls_props = class_mixin.ClassBuilderProperties(
name_var=ctx.convert.build_string(node, props.name),
bases=final_bases,
class_dict_var=cls_dict.to_variable(node),
class_type=NamedTupleClass,
)
node, cls_var = ctx.make_class(node, cls_props)
cls = cls_var.data[0]
# Now that the class has been made, we can complete the TypeParameter used
# by __new__, _make and _replace.
cls_type_param.bound = cls
# set __new__.__defaults__
defaults = [f.default for f in props.fields if f.default is not None]
defaults = ctx.convert.build_tuple(node, defaults)
node, new_attr = ctx.attribute_handler.get_attribute(node, cls, "__new__")
new_attr = abstract_utils.get_atomic_value(new_attr)
node = ctx.attribute_handler.set_attribute(
node, new_attr, "__defaults__", defaults
)
# Store the original properties
cls.props = props
cls.generated_members = set(members.keys()) - {x.name for x in props.fields}
ctx.vm.trace_classdef(cls_var)
return node, cls_var
|
NamedTupleClass
|
python
|
doocs__leetcode
|
solution/1200-1299/1220.Count Vowels Permutation/Solution.py
|
{
"start": 0,
"end": 400
}
|
class ____:
def countVowelPermutation(self, n: int) -> int:
f = [1] * 5
mod = 10**9 + 7
for _ in range(n - 1):
g = [0] * 5
g[0] = (f[1] + f[2] + f[4]) % mod
g[1] = (f[0] + f[2]) % mod
g[2] = (f[1] + f[3]) % mod
g[3] = f[2]
g[4] = (f[2] + f[3]) % mod
f = g
return sum(f) % mod
|
Solution
|
python
|
walkccc__LeetCode
|
solutions/1857. Largest Color Value in a Directed Graph/1857.py
|
{
"start": 0,
"end": 853
}
|
class ____:
def largestPathValue(self, colors: str, edges: list[list[int]]) -> int:
n = len(colors)
ans = 0
processed = 0
graph = [[] for _ in range(n)]
inDegrees = [0] * n
q = collections.deque()
count = [[0] * 26 for _ in range(n)]
# Build the graph.
for u, v in edges:
graph[u].append(v)
inDegrees[v] += 1
# Vpology
for i, degree in enumerate(inDegrees):
if degree == 0:
q.append(i)
while q:
u = q.popleft()
processed += 1
count[u][ord(colors[u]) - ord('a')] += 1
ans = max(ans, count[u][ord(colors[u]) - ord('a')])
for v in graph[u]:
for i in range(26):
count[v][i] = max(count[v][i], count[u][i])
inDegrees[v] -= 1
if inDegrees[v] == 0:
q.append(v)
return ans if processed == n else -1
|
Solution
|
python
|
doocs__leetcode
|
solution/3100-3199/3149.Find the Minimum Cost Array Permutation/Solution.py
|
{
"start": 0,
"end": 889
}
|
class ____:
def findPermutation(self, nums: List[int]) -> List[int]:
@cache
def dfs(mask: int, pre: int) -> int:
if mask == (1 << n) - 1:
return abs(pre - nums[0])
res = inf
for cur in range(1, n):
if mask >> cur & 1 ^ 1:
res = min(res, abs(pre - nums[cur]) + dfs(mask | 1 << cur, cur))
return res
def g(mask: int, pre: int):
ans.append(pre)
if mask == (1 << n) - 1:
return
res = dfs(mask, pre)
for cur in range(1, n):
if mask >> cur & 1 ^ 1:
if abs(pre - nums[cur]) + dfs(mask | 1 << cur, cur) == res:
g(mask | 1 << cur, cur)
break
n = len(nums)
ans = []
g(1, 0)
return ans
|
Solution
|
python
|
ansible__ansible
|
lib/ansible/module_utils/_internal/_datatag/__init__.py
|
{
"start": 23385,
"end": 29945
}
|
class ____(AnsibleSerializable):
__slots__ = _NO_INSTANCE_STORAGE
_native_type: t.ClassVar[type]
_item_source: t.ClassVar[t.Optional[t.Callable]] = None
_tagged_type_map: t.ClassVar[t.Dict[type, t.Type['AnsibleTaggedObject']]] = {}
_tagged_collection_types: t.ClassVar[t.Set[t.Type[c.Collection]]] = set()
_collection_types: t.ClassVar[t.Set[t.Type[c.Collection]]] = set()
_empty_tags_as_native: t.ClassVar[bool] = True # by default, untag will revert to the native type when no tags remain
_subclasses_native_type: t.ClassVar[bool] = True # by default, tagged types are assumed to subclass the type they augment
_ansible_tags_mapping: _AnsibleTagsMapping | _EmptyROInternalTagsMapping = _EMPTY_INTERNAL_TAGS_MAPPING
"""
Efficient internal storage of tags, indexed by tag type.
Contains no more than one instance of each tag type.
This is defined as a class attribute to support type hinting and documentation.
It is overwritten with an instance attribute during instance creation.
The instance attribute slot is provided by the derived type.
"""
def __init_subclass__(cls, **kwargs) -> None:
super().__init_subclass__(**kwargs)
try:
init_class = cls._init_class # type: ignore[attr-defined]
except AttributeError:
pass
else:
init_class()
if not cls._subclasses_native_type:
return # NOTE: When not subclassing a native type, the derived type must set cls._native_type itself and cls._empty_tags_as_native to False.
try:
# Subclasses of tagged types will already have a native type set and won't need to detect it.
# Special types which do not subclass a native type can also have their native type already set.
# Automatic item source selection is only implemented for types that don't set _native_type.
cls._native_type
except AttributeError:
# Direct subclasses of native types won't have cls._native_type set, so detect the native type.
cls._native_type = cls.__bases__[0]
# Detect the item source if not already set.
if cls._item_source is None and is_non_scalar_collection_type(cls._native_type):
cls._item_source = cls._native_type.__iter__ # type: ignore[attr-defined]
# Use a collection specific factory for types with item sources.
if cls._item_source:
cls._instance_factory = cls._instance_factory_collection # type: ignore[method-assign]
new_type_direct_subclass = cls.__mro__[1]
conflicting_impl = AnsibleTaggedObject._tagged_type_map.get(new_type_direct_subclass)
if conflicting_impl:
raise TypeError(f'Cannot define type {cls.__name__!r} since {conflicting_impl.__name__!r} already extends {new_type_direct_subclass.__name__!r}.')
AnsibleTaggedObject._tagged_type_map[new_type_direct_subclass] = cls
if is_non_scalar_collection_type(cls):
AnsibleTaggedObject._tagged_collection_types.add(cls)
AnsibleTaggedObject._collection_types.update({cls, new_type_direct_subclass})
def _native_copy(self) -> t.Any:
"""
Returns a copy of the current instance as its native Python type.
Any dynamic access behaviors that apply to this instance will be used during creation of the copy.
In the case of a container type, this is a shallow copy.
Recursive calls to native_copy are the responsibility of the caller.
"""
return self._native_type(self) # pylint: disable=abstract-class-instantiated
@classmethod
def _instance_factory(cls, value: t.Any, tags_mapping: _AnsibleTagsMapping) -> t.Self:
# There's no way to indicate cls is callable with a single arg without defining a useless __init__.
instance = cls(value) # type: ignore[call-arg]
instance._ansible_tags_mapping = tags_mapping
return instance
@staticmethod
def _get_tagged_type(value_type: type) -> type[AnsibleTaggedObject]:
tagged_type: t.Optional[type[AnsibleTaggedObject]]
if issubclass(value_type, AnsibleTaggedObject):
tagged_type = value_type
else:
tagged_type = AnsibleTaggedObject._tagged_type_map.get(value_type)
if not tagged_type:
raise NotTaggableError(value_type)
return tagged_type
def _as_dict(self) -> t.Dict[str, t.Any]:
return dict(
value=self._native_copy(),
tags=list(self._ansible_tags_mapping.values()),
)
@classmethod
def _from_dict(cls: t.Type[_TAnsibleTaggedObject], d: t.Dict[str, t.Any]) -> _TAnsibleTaggedObject:
return AnsibleTagHelper.tag(**d)
@classmethod
def _instance_factory_collection(
cls,
value: t.Any,
tags_mapping: _AnsibleTagsMapping,
) -> t.Self:
if type(value) in AnsibleTaggedObject._collection_types:
# use the underlying iterator to avoid access/iteration side effects (e.g. templating/wrapping on Lazy subclasses)
instance = cls(cls._item_source(value)) # type: ignore[call-arg,misc]
else:
# this is used when the value is a generator
instance = cls(value) # type: ignore[call-arg]
instance._ansible_tags_mapping = tags_mapping
return instance
def _copy_collection(self) -> AnsibleTaggedObject:
"""
Return a shallow copy of this instance, which must be a collection.
This uses the underlying iterator to avoid access/iteration side effects (e.g. templating/wrapping on Lazy subclasses).
"""
return AnsibleTagHelper.tag_copy(self, type(self)._item_source(self), value_type=type(self)) # type: ignore[misc]
@classmethod
def _new(cls, value: t.Any, *args, **kwargs) -> t.Self:
if type(value) is _AnsibleTagsMapping: # pylint: disable=unidiomatic-typecheck
self = cls._native_type.__new__(cls, *args, **kwargs)
self._ansible_tags_mapping = value
return self
return cls._native_type.__new__(cls, value, *args, **kwargs)
def _reduce(self, reduced: t.Union[str, tuple[t.Any, ...]]) -> tuple:
if type(reduced) is not tuple: # pylint: disable=unidiomatic-typecheck
raise TypeError()
updated: list[t.Any] = list(reduced)
updated[1] = (self._ansible_tags_mapping,) + updated[1]
return tuple(updated)
|
AnsibleTaggedObject
|
python
|
pdm-project__pdm
|
src/pdm/models/caches.py
|
{
"start": 6535,
"end": 10138
}
|
class ____:
"""Caches wheels so we do not need to rebuild them.
Wheels are only cached when the URL contains egg-info or is a VCS repository
with an *immutable* revision. There might be more than one wheels built for
one sdist, the one with most preferred tag will be returned.
"""
def __init__(self, directory: Path | str) -> None:
self.directory = Path(directory)
self.ephemeral_directory = Path(create_tracked_tempdir(prefix="pdm-wheel-cache-"))
def _get_candidates(self, path: Path) -> Iterable[Path]:
if not path.exists():
return
for candidate in path.iterdir():
if candidate.name.endswith(".whl"):
yield candidate
def _get_path_parts(self, link: Link, env_spec: EnvSpec) -> tuple[str, ...]:
hash_key = {
"url": link.url_without_fragment,
# target env participates in the hash key to handle the some cases
# where the sdist produces different wheels on different Pythons, and
# the differences are not encoded in compatibility tags.
"env_spec": env_spec.as_dict(),
}
if link.subdirectory:
hash_key["subdirectory"] = link.subdirectory
if link.hash and link.hash_name:
hash_key[link.hash_name] = link.hash
hashed = hashlib.sha224(
json.dumps(hash_key, sort_keys=True, separators=(",", ":"), ensure_ascii=True).encode("utf-8")
).hexdigest()
return (hashed[:2], hashed[2:4], hashed[4:6], hashed[6:])
def get_path_for_link(self, link: Link, env_spec: EnvSpec) -> Path:
parts = self._get_path_parts(link, env_spec)
return self.directory.joinpath(*parts)
def get_ephemeral_path_for_link(self, link: Link, env_spec: EnvSpec) -> Path:
parts = self._get_path_parts(link, env_spec)
return self.ephemeral_directory.joinpath(*parts)
def get(self, link: Link, project_name: str | None, env_spec: EnvSpec) -> Path | None:
if not project_name:
return None
canonical_name = canonicalize_name(project_name)
candidate = self._get_from_path(self.get_path_for_link(link, env_spec), canonical_name, env_spec)
if candidate is not None:
return candidate
return self._get_from_path(self.get_ephemeral_path_for_link(link, env_spec), canonical_name, env_spec)
def _get_from_path(self, path: Path, canonical_name: str, env_spec: EnvSpec) -> Path | None:
max_compatible_candidate: tuple[tuple[int, ...], Path | None] = ((-1, -1, -1, -1), None)
for candidate in self._get_candidates(path):
try:
name, *_ = parse_wheel_filename(candidate.name)
except ValueError:
logger.debug("Ignoring invalid cached wheel %s", candidate.name)
continue
if canonical_name != canonicalize_name(name):
logger.debug(
"Ignoring cached wheel %s with invalid project name %s, expected: %s",
candidate.name,
name,
canonical_name,
)
continue
compat = env_spec.wheel_compatibility(candidate.name)
if compat is None:
continue
if compat > max_compatible_candidate[0]:
max_compatible_candidate = (compat, candidate)
return max_compatible_candidate[1]
@lru_cache(maxsize=None)
def get_wheel_cache(directory: Path | str) -> WheelCache:
return WheelCache(directory)
|
WheelCache
|
python
|
python-openxml__python-docx
|
src/docx/image/gif.py
|
{
"start": 97,
"end": 1118
}
|
class ____(BaseImageHeader):
"""Image header parser for GIF images.
Note that the GIF format does not support resolution (DPI) information. Both
horizontal and vertical DPI default to 72.
"""
@classmethod
def from_stream(cls, stream):
"""Return |Gif| instance having header properties parsed from GIF image in
`stream`."""
px_width, px_height = cls._dimensions_from_stream(stream)
return cls(px_width, px_height, 72, 72)
@property
def content_type(self):
"""MIME content type for this image, unconditionally `image/gif` for GIF
images."""
return MIME_TYPE.GIF
@property
def default_ext(self):
"""Default filename extension, always 'gif' for GIF images."""
return "gif"
@classmethod
def _dimensions_from_stream(cls, stream):
stream.seek(6)
bytes_ = stream.read(4)
struct = Struct("<HH")
px_width, px_height = struct.unpack(bytes_)
return px_width, px_height
|
Gif
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/ruff/RUF033.py
|
{
"start": 1018,
"end": 1203
}
|
class ____:
bar = "should've used attrs"
def __post_init__(self, bar: str = "ahhh", baz: str = "hmm") -> None: ...
# https://github.com/astral-sh/ruff/issues/18950
@dataclass
|
Foo
|
python
|
doocs__leetcode
|
solution/1000-1099/1021.Remove Outermost Parentheses/Solution.py
|
{
"start": 0,
"end": 367
}
|
class ____:
def removeOuterParentheses(self, s: str) -> str:
ans = []
cnt = 0
for c in s:
if c == '(':
cnt += 1
if cnt > 1:
ans.append(c)
else:
cnt -= 1
if cnt > 0:
ans.append(c)
return ''.join(ans)
|
Solution
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/inheritance/test_basic.py
|
{
"start": 42571,
"end": 44722
}
|
class ____(fixtures.MappedTest):
"""test a scenario where joined table inheritance might be
confused as an eagerly loaded joined table."""
@classmethod
def define_tables(cls, metadata):
Table(
"a_table",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
Column("type", String(30), nullable=False),
Column("parent_id", Integer, ForeignKey("a_table.id")),
)
Table(
"b_table",
metadata,
Column("id", Integer, ForeignKey("a_table.id"), primary_key=True),
Column("b_data", String(50)),
)
def test_adapt_stringency(self):
b_table, a_table = self.tables.b_table, self.tables.a_table
class A(ComparableEntity):
pass
class B(A):
pass
self.mapper_registry.map_imperatively(
A,
a_table,
polymorphic_on=a_table.c.type,
polymorphic_identity="A",
properties={"children": relationship(A, order_by=a_table.c.name)},
)
self.mapper_registry.map_imperatively(
B,
b_table,
inherits=A,
polymorphic_identity="B",
properties={
"b_derived": column_property(b_table.c.b_data + "DATA")
},
)
sess = fixture_session()
b1 = B(id=1, name="b1", b_data="i")
sess.add(b1)
sess.flush()
b2 = B(id=2, name="b2", b_data="l", parent_id=1)
sess.add(b2)
sess.flush()
bid = b1.id
sess.expunge_all()
node = sess.query(B).filter(B.id == bid).all()[0]
eq_(node, B(id=1, name="b1", b_data="i"))
eq_(node.children[0], B(id=2, name="b2", b_data="l"))
sess.expunge_all()
node = (
sess.query(B)
.options(joinedload(B.children))
.filter(B.id == bid)
.all()[0]
)
eq_(node, B(id=1, name="b1", b_data="i"))
eq_(node.children[0], B(id=2, name="b2", b_data="l"))
|
EagerTargetingTest
|
python
|
Textualize__rich
|
rich/text.py
|
{
"start": 2976,
"end": 47534
}
|
class ____(JupyterMixin):
"""Text with color / style.
Args:
text (str, optional): Default unstyled text. Defaults to "".
style (Union[str, Style], optional): Base style for text. Defaults to "".
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
end (str, optional): Character to end text with. Defaults to "\\\\n".
tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None.
spans (List[Span], optional). A list of predefined style spans. Defaults to None.
"""
__slots__ = [
"_text",
"style",
"justify",
"overflow",
"no_wrap",
"end",
"tab_size",
"_spans",
"_length",
]
def __init__(
self,
text: str = "",
style: Union[str, Style] = "",
*,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = None,
end: str = "\n",
tab_size: Optional[int] = None,
spans: Optional[List[Span]] = None,
) -> None:
sanitized_text = strip_control_codes(text)
self._text = [sanitized_text]
self.style = style
self.justify: Optional["JustifyMethod"] = justify
self.overflow: Optional["OverflowMethod"] = overflow
self.no_wrap = no_wrap
self.end = end
self.tab_size = tab_size
self._spans: List[Span] = spans or []
self._length: int = len(sanitized_text)
def __len__(self) -> int:
return self._length
def __bool__(self) -> bool:
return bool(self._length)
def __str__(self) -> str:
return self.plain
def __repr__(self) -> str:
return f"<text {self.plain!r} {self._spans!r} {self.style!r}>"
def __add__(self, other: Any) -> "Text":
if isinstance(other, (str, Text)):
result = self.copy()
result.append(other)
return result
return NotImplemented
def __eq__(self, other: object) -> bool:
if not isinstance(other, Text):
return NotImplemented
return self.plain == other.plain and self._spans == other._spans
def __contains__(self, other: object) -> bool:
if isinstance(other, str):
return other in self.plain
elif isinstance(other, Text):
return other.plain in self.plain
return False
def __getitem__(self, slice: Union[int, slice]) -> "Text":
def get_text_at(offset: int) -> "Text":
_Span = Span
text = Text(
self.plain[offset],
spans=[
_Span(0, 1, style)
for start, end, style in self._spans
if end > offset >= start
],
end="",
)
return text
if isinstance(slice, int):
return get_text_at(slice)
else:
start, stop, step = slice.indices(len(self.plain))
if step == 1:
lines = self.divide([start, stop])
return lines[1]
else:
# This would be a bit of work to implement efficiently
# For now, its not required
raise TypeError("slices with step!=1 are not supported")
@property
def cell_len(self) -> int:
"""Get the number of cells required to render this text."""
return cell_len(self.plain)
@property
def markup(self) -> str:
"""Get console markup to render this Text.
Returns:
str: A string potentially creating markup tags.
"""
from .markup import escape
output: List[str] = []
plain = self.plain
markup_spans = [
(0, False, self.style),
*((span.start, False, span.style) for span in self._spans),
*((span.end, True, span.style) for span in self._spans),
(len(plain), True, self.style),
]
markup_spans.sort(key=itemgetter(0, 1))
position = 0
append = output.append
for offset, closing, style in markup_spans:
if offset > position:
append(escape(plain[position:offset]))
position = offset
if style:
append(f"[/{style}]" if closing else f"[{style}]")
markup = "".join(output)
return markup
@classmethod
def from_markup(
cls,
text: str,
*,
style: Union[str, Style] = "",
emoji: bool = True,
emoji_variant: Optional[EmojiVariant] = None,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
end: str = "\n",
) -> "Text":
"""Create Text instance from markup.
Args:
text (str): A string containing console markup.
style (Union[str, Style], optional): Base style for text. Defaults to "".
emoji (bool, optional): Also render emoji code. Defaults to True.
emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None.
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
end (str, optional): Character to end text with. Defaults to "\\\\n".
Returns:
Text: A Text instance with markup rendered.
"""
from .markup import render
rendered_text = render(text, style, emoji=emoji, emoji_variant=emoji_variant)
rendered_text.justify = justify
rendered_text.overflow = overflow
rendered_text.end = end
return rendered_text
@classmethod
def from_ansi(
cls,
text: str,
*,
style: Union[str, Style] = "",
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = None,
end: str = "\n",
tab_size: Optional[int] = 8,
) -> "Text":
"""Create a Text object from a string containing ANSI escape codes.
Args:
text (str): A string containing escape codes.
style (Union[str, Style], optional): Base style for text. Defaults to "".
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
end (str, optional): Character to end text with. Defaults to "\\\\n".
tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None.
"""
from .ansi import AnsiDecoder
joiner = Text(
"\n",
justify=justify,
overflow=overflow,
no_wrap=no_wrap,
end=end,
tab_size=tab_size,
style=style,
)
decoder = AnsiDecoder()
result = joiner.join(line for line in decoder.decode(text))
return result
@classmethod
def styled(
cls,
text: str,
style: StyleType = "",
*,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
) -> "Text":
"""Construct a Text instance with a pre-applied styled. A style applied in this way won't be used
to pad the text when it is justified.
Args:
text (str): A string containing console markup.
style (Union[str, Style]): Style to apply to the text. Defaults to "".
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
Returns:
Text: A text instance with a style applied to the entire string.
"""
styled_text = cls(text, justify=justify, overflow=overflow)
styled_text.stylize(style)
return styled_text
@classmethod
def assemble(
cls,
*parts: Union[str, "Text", Tuple[str, StyleType]],
style: Union[str, Style] = "",
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = None,
end: str = "\n",
tab_size: int = 8,
meta: Optional[Dict[str, Any]] = None,
) -> "Text":
"""Construct a text instance by combining a sequence of strings with optional styles.
The positional arguments should be either strings, or a tuple of string + style.
Args:
style (Union[str, Style], optional): Base style for text. Defaults to "".
justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
end (str, optional): Character to end text with. Defaults to "\\\\n".
tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to None.
meta (Dict[str, Any], optional). Meta data to apply to text, or None for no meta data. Default to None
Returns:
Text: A new text instance.
"""
text = cls(
style=style,
justify=justify,
overflow=overflow,
no_wrap=no_wrap,
end=end,
tab_size=tab_size,
)
append = text.append
_Text = Text
for part in parts:
if isinstance(part, (_Text, str)):
append(part)
else:
append(*part)
if meta:
text.apply_meta(meta)
return text
@property
def plain(self) -> str:
"""Get the text as a single string."""
if len(self._text) != 1:
self._text[:] = ["".join(self._text)]
return self._text[0]
@plain.setter
def plain(self, new_text: str) -> None:
"""Set the text to a new value."""
if new_text != self.plain:
sanitized_text = strip_control_codes(new_text)
self._text[:] = [sanitized_text]
old_length = self._length
self._length = len(sanitized_text)
if old_length > self._length:
self._trim_spans()
@property
def spans(self) -> List[Span]:
"""Get a reference to the internal list of spans."""
return self._spans
@spans.setter
def spans(self, spans: List[Span]) -> None:
"""Set spans."""
self._spans = spans[:]
def blank_copy(self, plain: str = "") -> "Text":
"""Return a new Text instance with copied metadata (but not the string or spans)."""
copy_self = Text(
plain,
style=self.style,
justify=self.justify,
overflow=self.overflow,
no_wrap=self.no_wrap,
end=self.end,
tab_size=self.tab_size,
)
return copy_self
def copy(self) -> "Text":
"""Return a copy of this instance."""
copy_self = Text(
self.plain,
style=self.style,
justify=self.justify,
overflow=self.overflow,
no_wrap=self.no_wrap,
end=self.end,
tab_size=self.tab_size,
)
copy_self._spans[:] = self._spans
return copy_self
def stylize(
self,
style: Union[str, Style],
start: int = 0,
end: Optional[int] = None,
) -> None:
"""Apply a style to the text, or a portion of the text.
Args:
style (Union[str, Style]): Style instance or style definition to apply.
start (int): Start offset (negative indexing is supported). Defaults to 0.
end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
"""
if style:
length = len(self)
if start < 0:
start = length + start
if end is None:
end = length
if end < 0:
end = length + end
if start >= length or end <= start:
# Span not in text or not valid
return
self._spans.append(Span(start, min(length, end), style))
def stylize_before(
self,
style: Union[str, Style],
start: int = 0,
end: Optional[int] = None,
) -> None:
"""Apply a style to the text, or a portion of the text. Styles will be applied before other styles already present.
Args:
style (Union[str, Style]): Style instance or style definition to apply.
start (int): Start offset (negative indexing is supported). Defaults to 0.
end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
"""
if style:
length = len(self)
if start < 0:
start = length + start
if end is None:
end = length
if end < 0:
end = length + end
if start >= length or end <= start:
# Span not in text or not valid
return
self._spans.insert(0, Span(start, min(length, end), style))
def apply_meta(
self, meta: Dict[str, Any], start: int = 0, end: Optional[int] = None
) -> None:
"""Apply metadata to the text, or a portion of the text.
Args:
meta (Dict[str, Any]): A dict of meta information.
start (int): Start offset (negative indexing is supported). Defaults to 0.
end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
"""
style = Style.from_meta(meta)
self.stylize(style, start=start, end=end)
def on(self, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> "Text":
"""Apply event handlers (used by Textual project).
Example:
>>> from rich.text import Text
>>> text = Text("hello world")
>>> text.on(click="view.toggle('world')")
Args:
meta (Dict[str, Any]): Mapping of meta information.
**handlers: Keyword args are prefixed with "@" to defined handlers.
Returns:
Text: Self is returned to method may be chained.
"""
meta = {} if meta is None else meta
meta.update({f"@{key}": value for key, value in handlers.items()})
self.stylize(Style.from_meta(meta))
return self
def remove_suffix(self, suffix: str) -> None:
"""Remove a suffix if it exists.
Args:
suffix (str): Suffix to remove.
"""
if self.plain.endswith(suffix):
self.right_crop(len(suffix))
def get_style_at_offset(self, console: "Console", offset: int) -> Style:
"""Get the style of a character at give offset.
Args:
console (~Console): Console where text will be rendered.
offset (int): Offset in to text (negative indexing supported)
Returns:
Style: A Style instance.
"""
# TODO: This is a little inefficient, it is only used by full justify
if offset < 0:
offset = len(self) + offset
get_style = console.get_style
style = get_style(self.style).copy()
for start, end, span_style in self._spans:
if end > offset >= start:
style += get_style(span_style, default="")
return style
def extend_style(self, spaces: int) -> None:
"""Extend the Text given number of spaces where the spaces have the same style as the last character.
Args:
spaces (int): Number of spaces to add to the Text.
"""
if spaces <= 0:
return
spans = self.spans
new_spaces = " " * spaces
if spans:
end_offset = len(self)
self._spans[:] = [
span.extend(spaces) if span.end >= end_offset else span
for span in spans
]
self._text.append(new_spaces)
self._length += spaces
else:
self.plain += new_spaces
def highlight_regex(
self,
re_highlight: Union[Pattern[str], str],
style: Optional[Union[GetStyleCallable, StyleType]] = None,
*,
style_prefix: str = "",
) -> int:
"""Highlight text with a regular expression, where group names are
translated to styles.
Args:
re_highlight (Union[re.Pattern, str]): A regular expression object or string.
style (Union[GetStyleCallable, StyleType]): Optional style to apply to whole match, or a callable
which accepts the matched text and returns a style. Defaults to None.
style_prefix (str, optional): Optional prefix to add to style group names.
Returns:
int: Number of regex matches
"""
count = 0
append_span = self._spans.append
_Span = Span
plain = self.plain
if isinstance(re_highlight, str):
re_highlight = re.compile(re_highlight)
for match in re_highlight.finditer(plain):
get_span = match.span
if style:
start, end = get_span()
match_style = style(plain[start:end]) if callable(style) else style
if match_style is not None and end > start:
append_span(_Span(start, end, match_style))
count += 1
for name in match.groupdict().keys():
start, end = get_span(name)
if start != -1 and end > start:
append_span(_Span(start, end, f"{style_prefix}{name}"))
return count
def highlight_words(
self,
words: Iterable[str],
style: Union[str, Style],
*,
case_sensitive: bool = True,
) -> int:
"""Highlight words with a style.
Args:
words (Iterable[str]): Words to highlight.
style (Union[str, Style]): Style to apply.
case_sensitive (bool, optional): Enable case sensitive matching. Defaults to True.
Returns:
int: Number of words highlighted.
"""
re_words = "|".join(re.escape(word) for word in words)
add_span = self._spans.append
count = 0
_Span = Span
for match in re.finditer(
re_words, self.plain, flags=0 if case_sensitive else re.IGNORECASE
):
start, end = match.span(0)
add_span(_Span(start, end, style))
count += 1
return count
def rstrip(self) -> None:
"""Strip whitespace from end of text."""
self.plain = self.plain.rstrip()
def rstrip_end(self, size: int) -> None:
"""Remove whitespace beyond a certain width at the end of the text.
Args:
size (int): The desired size of the text.
"""
text_length = len(self)
if text_length > size:
excess = text_length - size
whitespace_match = _re_whitespace.search(self.plain)
if whitespace_match is not None:
whitespace_count = len(whitespace_match.group(0))
self.right_crop(min(whitespace_count, excess))
def set_length(self, new_length: int) -> None:
"""Set new length of the text, clipping or padding is required."""
length = len(self)
if length != new_length:
if length < new_length:
self.pad_right(new_length - length)
else:
self.right_crop(length - new_length)
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> Iterable[Segment]:
tab_size: int = console.tab_size if self.tab_size is None else self.tab_size
justify = self.justify or options.justify or DEFAULT_JUSTIFY
overflow = self.overflow or options.overflow or DEFAULT_OVERFLOW
lines = self.wrap(
console,
options.max_width,
justify=justify,
overflow=overflow,
tab_size=tab_size or 8,
no_wrap=pick_bool(self.no_wrap, options.no_wrap, False),
)
all_lines = Text("\n").join(lines)
yield from all_lines.render(console, end=self.end)
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> Measurement:
text = self.plain
lines = text.splitlines()
max_text_width = max(cell_len(line) for line in lines) if lines else 0
words = text.split()
min_text_width = (
max(cell_len(word) for word in words) if words else max_text_width
)
return Measurement(min_text_width, max_text_width)
def render(self, console: "Console", end: str = "") -> Iterable["Segment"]:
"""Render the text as Segments.
Args:
console (Console): Console instance.
end (Optional[str], optional): Optional end character.
Returns:
Iterable[Segment]: Result of render that may be written to the console.
"""
_Segment = Segment
text = self.plain
if not self._spans:
yield Segment(text)
if end:
yield _Segment(end)
return
get_style = partial(console.get_style, default=Style.null())
enumerated_spans = list(enumerate(self._spans, 1))
style_map = {index: get_style(span.style) for index, span in enumerated_spans}
style_map[0] = get_style(self.style)
spans = [
(0, False, 0),
*((span.start, False, index) for index, span in enumerated_spans),
*((span.end, True, index) for index, span in enumerated_spans),
(len(text), True, 0),
]
spans.sort(key=itemgetter(0, 1))
stack: List[int] = []
stack_append = stack.append
stack_pop = stack.remove
style_cache: Dict[Tuple[Style, ...], Style] = {}
style_cache_get = style_cache.get
combine = Style.combine
def get_current_style() -> Style:
"""Construct current style from stack."""
styles = tuple(style_map[_style_id] for _style_id in sorted(stack))
cached_style = style_cache_get(styles)
if cached_style is not None:
return cached_style
current_style = combine(styles)
style_cache[styles] = current_style
return current_style
for (offset, leaving, style_id), (next_offset, _, _) in zip(spans, spans[1:]):
if leaving:
stack_pop(style_id)
else:
stack_append(style_id)
if next_offset > offset:
yield _Segment(text[offset:next_offset], get_current_style())
if end:
yield _Segment(end)
def join(self, lines: Iterable["Text"]) -> "Text":
"""Join text together with this instance as the separator.
Args:
lines (Iterable[Text]): An iterable of Text instances to join.
Returns:
Text: A new text instance containing join text.
"""
new_text = self.blank_copy()
def iter_text() -> Iterable["Text"]:
if self.plain:
for last, line in loop_last(lines):
yield line
if not last:
yield self
else:
yield from lines
extend_text = new_text._text.extend
append_span = new_text._spans.append
extend_spans = new_text._spans.extend
offset = 0
_Span = Span
for text in iter_text():
extend_text(text._text)
if text.style:
append_span(_Span(offset, offset + len(text), text.style))
extend_spans(
_Span(offset + start, offset + end, style)
for start, end, style in text._spans
)
offset += len(text)
new_text._length = offset
return new_text
def expand_tabs(self, tab_size: Optional[int] = None) -> None:
"""Converts tabs to spaces.
Args:
tab_size (int, optional): Size of tabs. Defaults to 8.
"""
if "\t" not in self.plain:
return
if tab_size is None:
tab_size = self.tab_size
if tab_size is None:
tab_size = 8
new_text: List[Text] = []
append = new_text.append
for line in self.split("\n", include_separator=True):
if "\t" not in line.plain:
append(line)
else:
cell_position = 0
parts = line.split("\t", include_separator=True)
for part in parts:
if part.plain.endswith("\t"):
part._text[-1] = part._text[-1][:-1] + " "
cell_position += part.cell_len
tab_remainder = cell_position % tab_size
if tab_remainder:
spaces = tab_size - tab_remainder
part.extend_style(spaces)
cell_position += spaces
else:
cell_position += part.cell_len
append(part)
result = Text("").join(new_text)
self._text = [result.plain]
self._length = len(self.plain)
self._spans[:] = result._spans
def truncate(
self,
max_width: int,
*,
overflow: Optional["OverflowMethod"] = None,
pad: bool = False,
) -> None:
"""Truncate text if it is longer that a given width.
Args:
max_width (int): Maximum number of characters in text.
overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None, to use self.overflow.
pad (bool, optional): Pad with spaces if the length is less than max_width. Defaults to False.
"""
_overflow = overflow or self.overflow or DEFAULT_OVERFLOW
if _overflow != "ignore":
length = cell_len(self.plain)
if length > max_width:
if _overflow == "ellipsis":
self.plain = set_cell_size(self.plain, max_width - 1) + "…"
else:
self.plain = set_cell_size(self.plain, max_width)
if pad and length < max_width:
spaces = max_width - length
self._text = [f"{self.plain}{' ' * spaces}"]
self._length = len(self.plain)
def _trim_spans(self) -> None:
"""Remove or modify any spans that are over the end of the text."""
max_offset = len(self.plain)
_Span = Span
self._spans[:] = [
(
span
if span.end < max_offset
else _Span(span.start, min(max_offset, span.end), span.style)
)
for span in self._spans
if span.start < max_offset
]
def pad(self, count: int, character: str = " ") -> None:
"""Pad left and right with a given number of characters.
Args:
count (int): Width of padding.
character (str): The character to pad with. Must be a string of length 1.
"""
assert len(character) == 1, "Character must be a string of length 1"
if count:
pad_characters = character * count
self.plain = f"{pad_characters}{self.plain}{pad_characters}"
_Span = Span
self._spans[:] = [
_Span(start + count, end + count, style)
for start, end, style in self._spans
]
def pad_left(self, count: int, character: str = " ") -> None:
"""Pad the left with a given character.
Args:
count (int): Number of characters to pad.
character (str, optional): Character to pad with. Defaults to " ".
"""
assert len(character) == 1, "Character must be a string of length 1"
if count:
self.plain = f"{character * count}{self.plain}"
_Span = Span
self._spans[:] = [
_Span(start + count, end + count, style)
for start, end, style in self._spans
]
def pad_right(self, count: int, character: str = " ") -> None:
"""Pad the right with a given character.
Args:
count (int): Number of characters to pad.
character (str, optional): Character to pad with. Defaults to " ".
"""
assert len(character) == 1, "Character must be a string of length 1"
if count:
self.plain = f"{self.plain}{character * count}"
def align(self, align: AlignMethod, width: int, character: str = " ") -> None:
"""Align text to a given width.
Args:
align (AlignMethod): One of "left", "center", or "right".
width (int): Desired width.
character (str, optional): Character to pad with. Defaults to " ".
"""
self.truncate(width)
excess_space = width - cell_len(self.plain)
if excess_space:
if align == "left":
self.pad_right(excess_space, character)
elif align == "center":
left = excess_space // 2
self.pad_left(left, character)
self.pad_right(excess_space - left, character)
else:
self.pad_left(excess_space, character)
def append(
self, text: Union["Text", str], style: Optional[Union[str, "Style"]] = None
) -> "Text":
"""Add text with an optional style.
Args:
text (Union[Text, str]): A str or Text to append.
style (str, optional): A style name. Defaults to None.
Returns:
Text: Returns self for chaining.
"""
if not isinstance(text, (str, Text)):
raise TypeError("Only str or Text can be appended to Text")
if len(text):
if isinstance(text, str):
sanitized_text = strip_control_codes(text)
self._text.append(sanitized_text)
offset = len(self)
text_length = len(sanitized_text)
if style:
self._spans.append(Span(offset, offset + text_length, style))
self._length += text_length
elif isinstance(text, Text):
_Span = Span
if style is not None:
raise ValueError(
"style must not be set when appending Text instance"
)
text_length = self._length
if text.style:
self._spans.append(
_Span(text_length, text_length + len(text), text.style)
)
self._text.append(text.plain)
self._spans.extend(
_Span(start + text_length, end + text_length, style)
for start, end, style in text._spans.copy()
)
self._length += len(text)
return self
def append_text(self, text: "Text") -> "Text":
"""Append another Text instance. This method is more performant that Text.append, but
only works for Text.
Args:
text (Text): The Text instance to append to this instance.
Returns:
Text: Returns self for chaining.
"""
_Span = Span
text_length = self._length
if text.style:
self._spans.append(_Span(text_length, text_length + len(text), text.style))
self._text.append(text.plain)
self._spans.extend(
_Span(start + text_length, end + text_length, style)
for start, end, style in text._spans.copy()
)
self._length += len(text)
return self
def append_tokens(
self, tokens: Iterable[Tuple[str, Optional[StyleType]]]
) -> "Text":
"""Append iterable of str and style. Style may be a Style instance or a str style definition.
Args:
tokens (Iterable[Tuple[str, Optional[StyleType]]]): An iterable of tuples containing str content and style.
Returns:
Text: Returns self for chaining.
"""
append_text = self._text.append
append_span = self._spans.append
_Span = Span
offset = len(self)
for content, style in tokens:
content = strip_control_codes(content)
append_text(content)
if style:
append_span(_Span(offset, offset + len(content), style))
offset += len(content)
self._length = offset
return self
def copy_styles(self, text: "Text") -> None:
"""Copy styles from another Text instance.
Args:
text (Text): A Text instance to copy styles from, must be the same length.
"""
self._spans.extend(text._spans)
def split(
self,
separator: str = "\n",
*,
include_separator: bool = False,
allow_blank: bool = False,
) -> Lines:
"""Split rich text in to lines, preserving styles.
Args:
separator (str, optional): String to split on. Defaults to "\\\\n".
include_separator (bool, optional): Include the separator in the lines. Defaults to False.
allow_blank (bool, optional): Return a blank line if the text ends with a separator. Defaults to False.
Returns:
List[RichText]: A list of rich text, one per line of the original.
"""
assert separator, "separator must not be empty"
text = self.plain
if separator not in text:
return Lines([self.copy()])
if include_separator:
lines = self.divide(
match.end() for match in re.finditer(re.escape(separator), text)
)
else:
def flatten_spans() -> Iterable[int]:
for match in re.finditer(re.escape(separator), text):
start, end = match.span()
yield start
yield end
lines = Lines(
line for line in self.divide(flatten_spans()) if line.plain != separator
)
if not allow_blank and text.endswith(separator):
lines.pop()
return lines
def divide(self, offsets: Iterable[int]) -> Lines:
"""Divide text in to a number of lines at given offsets.
Args:
offsets (Iterable[int]): Offsets used to divide text.
Returns:
Lines: New RichText instances between offsets.
"""
_offsets = list(offsets)
if not _offsets:
return Lines([self.copy()])
text = self.plain
text_length = len(text)
divide_offsets = [0, *_offsets, text_length]
line_ranges = list(zip(divide_offsets, divide_offsets[1:]))
style = self.style
justify = self.justify
overflow = self.overflow
_Text = Text
new_lines = Lines(
_Text(
text[start:end],
style=style,
justify=justify,
overflow=overflow,
)
for start, end in line_ranges
)
if not self._spans:
return new_lines
_line_appends = [line._spans.append for line in new_lines._lines]
line_count = len(line_ranges)
_Span = Span
for span_start, span_end, style in self._spans:
lower_bound = 0
upper_bound = line_count
start_line_no = (lower_bound + upper_bound) // 2
while True:
line_start, line_end = line_ranges[start_line_no]
if span_start < line_start:
upper_bound = start_line_no - 1
elif span_start > line_end:
lower_bound = start_line_no + 1
else:
break
start_line_no = (lower_bound + upper_bound) // 2
if span_end < line_end:
end_line_no = start_line_no
else:
end_line_no = lower_bound = start_line_no
upper_bound = line_count
while True:
line_start, line_end = line_ranges[end_line_no]
if span_end < line_start:
upper_bound = end_line_no - 1
elif span_end > line_end:
lower_bound = end_line_no + 1
else:
break
end_line_no = (lower_bound + upper_bound) // 2
for line_no in range(start_line_no, end_line_no + 1):
line_start, line_end = line_ranges[line_no]
new_start = max(0, span_start - line_start)
new_end = min(span_end - line_start, line_end - line_start)
if new_end > new_start:
_line_appends[line_no](_Span(new_start, new_end, style))
return new_lines
def right_crop(self, amount: int = 1) -> None:
"""Remove a number of characters from the end of the text."""
max_offset = len(self.plain) - amount
_Span = Span
self._spans[:] = [
(
span
if span.end < max_offset
else _Span(span.start, min(max_offset, span.end), span.style)
)
for span in self._spans
if span.start < max_offset
]
self._text = [self.plain[:-amount]]
self._length -= amount
def wrap(
self,
console: "Console",
width: int,
*,
justify: Optional["JustifyMethod"] = None,
overflow: Optional["OverflowMethod"] = None,
tab_size: int = 8,
no_wrap: Optional[bool] = None,
) -> Lines:
"""Word wrap the text.
Args:
console (Console): Console instance.
width (int): Number of cells available per line.
justify (str, optional): Justify method: "default", "left", "center", "full", "right". Defaults to "default".
overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None.
tab_size (int, optional): Default tab size. Defaults to 8.
no_wrap (bool, optional): Disable wrapping, Defaults to False.
Returns:
Lines: Number of lines.
"""
wrap_justify = justify or self.justify or DEFAULT_JUSTIFY
wrap_overflow = overflow or self.overflow or DEFAULT_OVERFLOW
no_wrap = pick_bool(no_wrap, self.no_wrap, False) or overflow == "ignore"
lines = Lines()
for line in self.split(allow_blank=True):
if "\t" in line:
line.expand_tabs(tab_size)
if no_wrap:
new_lines = Lines([line])
else:
offsets = divide_line(str(line), width, fold=wrap_overflow == "fold")
new_lines = line.divide(offsets)
for line in new_lines:
line.rstrip_end(width)
if wrap_justify:
new_lines.justify(
console, width, justify=wrap_justify, overflow=wrap_overflow
)
for line in new_lines:
line.truncate(width, overflow=wrap_overflow)
lines.extend(new_lines)
return lines
def fit(self, width: int) -> Lines:
"""Fit the text in to given width by chopping in to lines.
Args:
width (int): Maximum characters in a line.
Returns:
Lines: Lines container.
"""
lines: Lines = Lines()
append = lines.append
for line in self.split():
line.set_length(width)
append(line)
return lines
def detect_indentation(self) -> int:
"""Auto-detect indentation of code.
Returns:
int: Number of spaces used to indent code.
"""
_indentations = {
len(match.group(1))
for match in re.finditer(r"^( *)(.*)$", self.plain, flags=re.MULTILINE)
}
try:
indentation = (
reduce(gcd, [indent for indent in _indentations if not indent % 2]) or 1
)
except TypeError:
indentation = 1
return indentation
def with_indent_guides(
self,
indent_size: Optional[int] = None,
*,
character: str = "│",
style: StyleType = "dim green",
) -> "Text":
"""Adds indent guide lines to text.
Args:
indent_size (Optional[int]): Size of indentation, or None to auto detect. Defaults to None.
character (str, optional): Character to use for indentation. Defaults to "│".
style (Union[Style, str], optional): Style of indent guides.
Returns:
Text: New text with indentation guides.
"""
_indent_size = self.detect_indentation() if indent_size is None else indent_size
text = self.copy()
text.expand_tabs()
indent_line = f"{character}{' ' * (_indent_size - 1)}"
re_indent = re.compile(r"^( *)(.*)$")
new_lines: List[Text] = []
add_line = new_lines.append
blank_lines = 0
for line in text.split(allow_blank=True):
match = re_indent.match(line.plain)
if not match or not match.group(2):
blank_lines += 1
continue
indent = match.group(1)
full_indents, remaining_space = divmod(len(indent), _indent_size)
new_indent = f"{indent_line * full_indents}{' ' * remaining_space}"
line.plain = new_indent + line.plain[len(new_indent) :]
line.stylize(style, 0, len(new_indent))
if blank_lines:
new_lines.extend([Text(new_indent, style=style)] * blank_lines)
blank_lines = 0
add_line(line)
if blank_lines:
new_lines.extend([Text("", style=style)] * blank_lines)
new_text = text.blank_copy("\n").join(new_lines)
return new_text
if __name__ == "__main__": # pragma: no cover
from rich.console import Console
text = Text(
"""\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n"""
)
text.highlight_words(["Lorem"], "bold")
text.highlight_words(["ipsum"], "italic")
console = Console()
console.rule("justify='left'")
console.print(text, style="red")
console.print()
console.rule("justify='center'")
console.print(text, style="green", justify="center")
console.print()
console.rule("justify='right'")
console.print(text, style="blue", justify="right")
console.print()
console.rule("justify='full'")
console.print(text, style="magenta", justify="full")
console.print()
|
Text
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/index_object.py
|
{
"start": 5581,
"end": 5818
}
|
class ____:
# GH 13166
def setup(self):
N = 100_000
a = np.arange(N, dtype=np.float64)
self.ind = Index(a * 4.8000000418824129e-08)
def time_get_loc(self):
self.ind.get_loc(0)
|
Float64IndexMethod
|
python
|
encode__django-rest-framework
|
tests/test_views.py
|
{
"start": 2841,
"end": 3889
}
|
class ____(TestCase):
def setUp(self):
self.DEFAULT_HANDLER = api_settings.EXCEPTION_HANDLER
def exception_handler(exc, request):
return Response('Error!', status=status.HTTP_400_BAD_REQUEST)
api_settings.EXCEPTION_HANDLER = exception_handler
def tearDown(self):
api_settings.EXCEPTION_HANDLER = self.DEFAULT_HANDLER
def test_class_based_view_exception_handler(self):
view = ErrorView.as_view()
request = factory.get('/', content_type='application/json')
response = view(request)
expected = 'Error!'
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.data == expected
def test_function_based_view_exception_handler(self):
view = error_view
request = factory.get('/', content_type='application/json')
response = view(request)
expected = 'Error!'
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.data == expected
|
TestCustomExceptionHandler
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/templates.py
|
{
"start": 60303,
"end": 60784
}
|
class ____(DelegatingLexer):
"""
Subclass of the `DjangoLexer` that highlights unlexed data with the
`YamlLexer`.
Commonly used in Saltstack salt states.
.. versionadded:: 2.0
"""
name = 'YAML+Jinja'
aliases = ['yaml+jinja', 'salt', 'sls']
filenames = ['*.sls']
mimetypes = ['text/x-yaml+jinja', 'text/x-sls']
def __init__(self, **options):
super(YamlJinjaLexer, self).__init__(YamlLexer, DjangoLexer, **options)
|
YamlJinjaLexer
|
python
|
tornadoweb__tornado
|
tornado/web.py
|
{
"start": 100933,
"end": 101467
}
|
class ____(RequestHandler):
"""Generates an error response with ``status_code`` for all requests."""
def initialize(self, status_code: int) -> None:
self.set_status(status_code)
def prepare(self) -> None:
raise HTTPError(self._status_code)
def check_xsrf_cookie(self) -> None:
# POSTs to an ErrorHandler don't actually have side effects,
# so we don't need to check the xsrf token. This allows POSTs
# to the wrong url to return a 404 instead of 403.
pass
|
ErrorHandler
|
python
|
huggingface__transformers
|
tests/models/clap/test_modeling_clap.py
|
{
"start": 10017,
"end": 14060
}
|
class ____:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
initializer_range=0.02,
scope=None,
projection_hidden_act="relu",
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = scope
self.projection_hidden_act = projection_hidden_act
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return ClapTextConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
projection_hidden_act=self.projection_hidden_act,
)
def create_and_check_model(self, config, input_ids, input_mask):
model = ClapTextModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_model_with_projection(self, config, input_ids, input_mask):
model = ClapTextModelWithProjection(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.text_embeds.shape, (self.batch_size, self.projection_dim))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
|
ClapTextModelTester
|
python
|
realpython__materials
|
build-a-django-content-aggregator/source_code_final/podcasts/models.py
|
{
"start": 31,
"end": 417
}
|
class ____(models.Model):
title = models.CharField(max_length=200)
description = models.TextField()
pub_date = models.DateTimeField()
link = models.URLField()
image = models.URLField()
podcast_name = models.CharField(max_length=100)
guid = models.CharField(max_length=50)
def __str__(self) -> str:
return f"{self.podcast_name}: {self.title}"
|
Episode
|
python
|
apache__airflow
|
providers/apache/pinot/tests/unit/apache/pinot/hooks/test_pinot.py
|
{
"start": 7164,
"end": 7452
}
|
class ____:
def test_exception_when_overriding_cmd_path(self):
with pytest.raises(RuntimeError):
PinotAdminHook(cmd_path="some_path.sh")
def test_exception_when_keeping_cmd_path(self):
PinotAdminHook(cmd_path="pinot-admin.sh")
|
TestPinotAdminHookCreation
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/multimethod_base/package.py
|
{
"start": 188,
"end": 628
}
|
class ____(Package):
"""This is a base class for the Multimethod test case.
It tests whether mutlimethod properly invokes methods in a base
class when subclass multi-methods do not match.
"""
homepage = "http://www.example.com/"
url = "http://www.example.com/example-1.0.tar.gz"
def base_method(self):
return "base_method"
def diamond_inheritance(self):
return "base_package"
|
MultimethodBase
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/elements/media_test.py
|
{
"start": 1185,
"end": 1254
}
|
class ____(Enum):
AUDIO = "audio"
VIDEO = "video"
|
MockMediaKind
|
python
|
getsentry__sentry
|
src/sentry/db/models/base.py
|
{
"start": 1426,
"end": 13224
}
|
class ____(models.Model):
class Meta:
abstract = True
__relocation_scope__: RelocationScope | set[RelocationScope]
__relocation_dependencies__: set[str]
# Some models have a globally unique identifier, like a UUID. This should be a set of one or
# more fields, none of which are foreign keys, that are `unique=True` or `unique_together` for
# an entire Sentry instance.
__relocation_custom_ordinal__: list[str] | None = None
objects: ClassVar[BaseManager[Self]] = BaseManager()
update = update
def __getstate__(self) -> dict[str, Any]:
d = self.__dict__.copy()
# we can't serialize weakrefs
d.pop("_Model__data", None)
return d
def __hash__(self) -> int:
# Django decided that it shouldn't let us hash objects even though they have
# memory addresses. We need that behavior, so let's revert.
if self.pk:
return models.Model.__hash__(self)
return id(self)
def __reduce__(
self,
) -> tuple[Callable[[int], models.Model], tuple[tuple[str, str]], Mapping[str, Any]]:
reduced = super().__reduce__()
assert isinstance(reduced, tuple), reduced
(model_unpickle, stuff, _) = reduced
return (model_unpickle, stuff, self.__getstate__())
def __setstate__(self, state: Mapping[str, Any]) -> None:
self.__dict__.update(state)
def _get_relational_field(self, field_name: str) -> models.ForeignKey:
ret = self._meta.get_field(field_name)
if not isinstance(ret, models.ForeignKey):
raise TypeError(f"expected {field_name=} to be ForeignKey")
return ret
def set_cached_field_value(self, field_name: str, value: Any) -> None:
# Explicitly set a field's cached value.
# This only works for relational fields, and is useful when
# you already have the value and can therefore use this
# to populate Django's cache before accessing the attribute
# and triggering a duplicate, unnecessary query.
self._get_relational_field(field_name).set_cached_value(self, value)
def get_cached_field_value(self, field_name: str) -> Any:
# Get a relational field's cached value.
# It's recommended to only use this in testing code,
# for when you would like to inspect the cache.
# In production, you should guard `model.field` with an
# `if model.is_field_cached`.
name = self._get_relational_field(field_name).cache_name
return self._state.fields_cache.get(name, None)
def delete_cached_field_value(self, field_name: str) -> None:
name = self._get_relational_field(field_name).cache_name
if name in self._state.fields_cache:
del self._state.fields_cache[name]
def is_field_cached(self, field_name: str) -> bool:
# Ask if a relational field has a cached value.
name = self._get_relational_field(field_name).cache_name
return name in self._state.fields_cache
def get_relocation_scope(self) -> RelocationScope:
"""
Retrieves the `RelocationScope` for a `Model` subclass. It generally just forwards `__relocation_scope__`, but some models have instance-specific logic for deducing the scope.
"""
if isinstance(self.__relocation_scope__, set):
raise ValueError(
"Must define `get_relocation_scope` override if using multiple relocation scopes."
)
return self.__relocation_scope__
@classmethod
def get_relocation_ordinal_fields(cls, _json_model: Any) -> list[str] | None:
"""
Retrieves the custom ordinal fields for models that may be re-used at import time (that is,
the `write_relocation_import()` method may return an `ImportKind` besides
`ImportKind.Inserted`). In such cases, we want an ordering of models by a globally unique
value that is not the `pk`, to ensure that merged and inserted models are still ordered
correctly with respect to one another.
"""
if cls.__relocation_custom_ordinal__ is None:
return None
return cls.__relocation_custom_ordinal__
@classmethod
def get_possible_relocation_scopes(cls) -> set[RelocationScope]:
"""
Retrieves the `RelocationScope` for a `Model` subclass. It always returns a set, to account for models that support multiple scopes on a situational, per-instance basis.
"""
return (
cls.__relocation_scope__
if isinstance(cls.__relocation_scope__, set)
else {cls.__relocation_scope__}
)
@classmethod
def query_for_relocation_export(cls, q: models.Q, pk_map: PrimaryKeyMap) -> models.Q:
"""
Create a custom query for performing exports. This is useful when we can't use the usual
method of filtering by foreign keys of already-seen models, and allows us to export a
smaller subset of data than "all models of this kind".
The `q` argument represents the exist query. This method should modify that query, then return it.
"""
model_name = get_model_name(cls)
model_relations = dependencies()[model_name]
# Create a filter for each possible FK reference to constrain the amount of data being sent
# over from the database. We only want models where every FK field references into a model
# whose PK we've already exported (or `NULL`, if the FK field is nullable).
for field_name, foreign_field in model_relations.foreign_keys.items():
foreign_field_model_name = get_model_name(foreign_field.model)
matched_fks = set(pk_map.get_pks(foreign_field_model_name))
matched_fks_query = dict()
matched_fks_query[field_name + "__in"] = matched_fks
if foreign_field.nullable:
match_on_null_query = dict()
match_on_null_query[field_name + "__isnull"] = True
q &= models.Q(**matched_fks_query) | models.Q(**match_on_null_query)
else:
q &= models.Q(**matched_fks_query)
return q
@classmethod
def sanitize_relocation_json(
cls, json: Any, sanitizer: Sanitizer, model_name: NormalizedModelName | None = None
) -> None:
"""
Takes the export JSON representation of this model, and "sanitizes" any data that might be
PII or otherwise user-specific. The JSON is modified in-place to avoid extra copies.
This function operates on the JSON form, rather than the Django model instance, for two
reasons: 1. we want the ability to sanitize exported JSON without first deserializing it,
and 2. to avoid risky situations where a model is modified in-place and then saved to the
production database by some far flung code that touches it later.
"""
model_name = get_model_name(cls) if model_name is None else model_name
fields = cls._meta.get_fields()
field_names = [f.name for f in fields]
str_field_types = [models.CharField, models.TextField]
sensitive_words = ["password", "token", "secret"]
# All `models.CharField` fields called "slug" and "name" can be auto-sanitized as strings.
if "name" in field_names and "slug" in field_names:
sanitizer.set_name_and_slug_pair(
json, SanitizableField(model_name, "name"), SanitizableField(model_name, "slug")
)
elif "name" in field_names:
sanitizer.set_name(json, SanitizableField(model_name, "name"))
for f in fields:
# Auto-sanitize all `models.DateTimeField` fields on this class.
if isinstance(f, models.DateTimeField):
sanitizer.set_datetime(json, SanitizableField(model_name, f.name))
# Auto-sanitize all `models.EmailField` fields on this class.
if isinstance(f, models.EmailField):
sanitizer.set_email(json, SanitizableField(model_name, f.name))
# Auto-sanitize all IP Address fields.
if isinstance(f, models.IPAddressField) or isinstance(f, models.GenericIPAddressField):
sanitizer.set_ip(json, SanitizableField(model_name, f.name))
# Auto-sanitize all URL fields.
if isinstance(f, models.URLField) or f.name.endswith("url") or f.name.endswith("uri"):
sanitizer.set_url(json, SanitizableField(model_name, f.name))
# Auto-sanitize all UUID fields.
if (
isinstance(f, models.UUIDField)
or isinstance(f, UUIDField)
or f.name.endswith("guid")
or f.name.endswith("uuid")
):
sanitizer.set_uuid(json, SanitizableField(model_name, f.name))
# Auto-sanitize all string fields that contain any sensitive words in their name.
is_str_field_type = next(filter(lambda t: isinstance(f, t), str_field_types), None)
contains_sensitive_word = next(filter(lambda w: w in f.name, sensitive_words), None)
if is_str_field_type and contains_sensitive_word:
sanitizer.set_string(json, SanitizableField(model_name, f.name))
return None
def normalize_before_relocation_import(
self, pk_map: PrimaryKeyMap, _s: ImportScope, _f: ImportFlags
) -> int | None:
"""
A helper function that normalizes a deserialized model. Note that this modifies the model in
place, so it should generally be done immediately prior to a companion
`write_relocation_import()` method, to avoid data skew or corrupted local state. The method
returns the old `pk` that was replaced, or `None` if normalization failed.
The primary reason this function is left as a standalone, rather than being folded into
`write_relocation_import`, is that it is often useful to adjust just the normalization logic
by itself without affecting the writing logic.
Overrides should take care NOT to push the updated changes to the database (ie, no calls to
`.save()` or `.update()`), as this functionality is delegated to the
`write_relocation_import()` method.
The default normalization logic merely replaces foreign keys with their new values from the
provided `pk_map`.
"""
deps = dependencies()
model_name = get_model_name(self)
for field, model_relation in deps[model_name].foreign_keys.items():
field_id = field if field.endswith("_id") else f"{field}_id"
fk = getattr(self, field_id, None)
if fk is not None:
new_fk = pk_map.get_pk(get_model_name(model_relation.model), fk)
if new_fk is None:
return None
setattr(self, field_id, new_fk)
old_pk = self.pk
self.pk = None
return old_pk
def write_relocation_import(
self, _s: ImportScope, _f: ImportFlags
) -> tuple[int, ImportKind] | None:
"""
Writes a deserialized model to the database. If this write is successful, this method will
return a tuple of the new `pk` and the `ImportKind` (ie, whether we created a new model or
re-used an existing one).
Overrides of this method can throw either `django.core.exceptions.ValidationError` or
`rest_framework.serializers.ValidationError`.
This function should only be executed after `normalize_before_relocation_import()` has fired
and returned a not-null `old_pk` input.
"""
self.save(force_insert=True)
return (self.pk, ImportKind.Inserted)
|
BaseModel
|
python
|
pytorch__pytorch
|
test/package/package_a/fake_interface.py
|
{
"start": 520,
"end": 800
}
|
class ____(torch.nn.Module):
"""A *different* module that implements ModuleInterface."""
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
return inp1 * inp2 + 1
def forward(self, input: Tensor) -> Tensor:
return self.one(input, input + 1)
|
NewModule
|
python
|
lepture__authlib
|
authlib/common/errors.py
|
{
"start": 50,
"end": 743
}
|
class ____(Exception):
"""Base Exception for all errors in Authlib."""
#: short-string error code
error = None
#: long-string to describe this error
description = ""
#: web page that describes this error
uri = None
def __init__(self, error=None, description=None, uri=None):
if error is not None:
self.error = error
if description is not None:
self.description = description
if uri is not None:
self.uri = uri
message = f"{self.error}: {self.description}"
super().__init__(message)
def __repr__(self):
return f'<{self.__class__.__name__} "{self.error}">'
|
AuthlibBaseError
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 184490,
"end": 185918
}
|
class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"pull_request_id",
"base_ref_name",
"title",
"body",
"state",
"maintainer_can_modify",
"assignee_ids",
"milestone_id",
"label_ids",
"project_ids",
"client_mutation_id",
)
pull_request_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="pullRequestId"
)
base_ref_name = sgqlc.types.Field(String, graphql_name="baseRefName")
title = sgqlc.types.Field(String, graphql_name="title")
body = sgqlc.types.Field(String, graphql_name="body")
state = sgqlc.types.Field(PullRequestUpdateState, graphql_name="state")
maintainer_can_modify = sgqlc.types.Field(
Boolean, graphql_name="maintainerCanModify"
)
assignee_ids = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(ID)), graphql_name="assigneeIds"
)
milestone_id = sgqlc.types.Field(ID, graphql_name="milestoneId")
label_ids = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(ID)), graphql_name="labelIds"
)
project_ids = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(ID)), graphql_name="projectIds"
)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
|
UpdatePullRequestInput
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/data_flow_ops.py
|
{
"start": 25337,
"end": 29058
}
|
class ____(QueueBase):
"""A queue implementation that dequeues elements in a random order.
See `tf.queue.QueueBase` for a description of the methods on
this class.
"""
def __init__(self,
capacity,
min_after_dequeue,
dtypes,
shapes=None,
names=None,
seed=None,
shared_name=None,
name="random_shuffle_queue"):
"""Create a queue that dequeues elements in a random order.
A `RandomShuffleQueue` has bounded capacity; supports multiple
concurrent producers and consumers; and provides exactly-once
delivery.
A `RandomShuffleQueue` holds a list of up to `capacity`
elements. Each element is a fixed-length tuple of tensors whose
dtypes are described by `dtypes`, and whose shapes are optionally
described by the `shapes` argument.
If the `shapes` argument is specified, each component of a queue
element must have the respective fixed shape. If it is
unspecified, different queue elements may have different shapes,
but the use of `dequeue_many` is disallowed.
The `min_after_dequeue` argument allows the caller to specify a
minimum number of elements that will remain in the queue after a
`dequeue` or `dequeue_many` operation completes, to ensure a
minimum level of mixing of elements. This invariant is maintained
by blocking those operations until sufficient elements have been
enqueued. The `min_after_dequeue` argument is ignored after the
queue has been closed.
Args:
capacity: An integer. The upper bound on the number of elements
that may be stored in this queue.
min_after_dequeue: An integer (described above).
dtypes: A list of `DType` objects. The length of `dtypes` must equal
the number of tensors in each queue element.
shapes: (Optional.) A list of fully-defined `TensorShape` objects
with the same length as `dtypes`, or `None`.
names: (Optional.) A list of string naming the components in the queue
with the same length as `dtypes`, or `None`. If specified the dequeue
methods return a dictionary with the names as keys.
seed: A Python integer. Used to create a random seed. See
`tf.compat.v1.set_random_seed`
for behavior.
shared_name: (Optional.) If non-empty, this queue will be shared under
the given name across multiple sessions.
name: Optional name for the queue operation.
"""
dtypes = _as_type_list(dtypes)
shapes = _as_shape_list(shapes, dtypes)
names = _as_name_list(names, dtypes)
seed1, seed2 = random_seed.get_seed(seed)
if seed1 is None and seed2 is None:
seed1, seed2 = 0, 0
elif seed is None and shared_name is not None:
# This means that graph seed is provided but op seed is not provided.
# If shared_name is also provided, make seed2 depend only on the graph
# seed and shared_name. (seed2 from get_seed() is generally dependent on
# the id of the last op created.)
string = (str(seed1) + shared_name).encode("utf-8")
seed2 = int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
queue_ref = gen_data_flow_ops.random_shuffle_queue_v2(
component_types=dtypes,
shapes=shapes,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
seed=seed1,
seed2=seed2,
shared_name=_shared_name(shared_name),
name=name)
super(RandomShuffleQueue, self).__init__(dtypes, shapes, names, queue_ref)
@tf_export("queue.FIFOQueue", v1=["queue.FIFOQueue", "FIFOQueue"])
@deprecation.deprecated_endpoints("FIFOQueue")
|
RandomShuffleQueue
|
python
|
pytorch__pytorch
|
torch/ao/nn/intrinsic/quantized/modules/conv_add.py
|
{
"start": 241,
"end": 2325
}
|
class ____(nnq.Conv2d):
r"""
A ConvAdd2d module is a fused module of Conv2d and Add
We adopt the same interface as :class:`torch.ao.nn.quantized.Conv2d`.
Attributes:
Same as torch.ao.nn.quantized.Conv2d
"""
_FLOAT_MODULE = torch.ao.nn.intrinsic.ConvAdd2d # type: ignore[assignment]
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode="zeros",
device=None,
dtype=None,
):
super().__init__(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
padding_mode=padding_mode,
device=device,
dtype=dtype,
)
def forward(self, input, extra_input): # type: ignore[override]
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 4:
raise ValueError("Input shape must be `(N, C, H, W)`!")
if self.padding_mode != "zeros":
_reversed_padding_repeated_twice = _reverse_repeat_padding(self.padding)
input = F.pad(
input, _reversed_padding_repeated_twice, mode=self.padding_mode
)
return torch.ops.quantized.conv2d_add(
input, extra_input, self._packed_params, self.scale, self.zero_point
)
def _get_name(self):
return "QuantizedConvAdd2d"
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False): # type: ignore[override]
return super().from_float(
mod, use_precomputed_fake_quant=use_precomputed_fake_quant
)
@classmethod
def from_reference(cls, ref_qconv, output_scale, output_zero_point):
return super().from_reference(ref_qconv[0], output_scale, output_zero_point)
|
ConvAdd2d
|
python
|
getsentry__sentry
|
src/sentry/discover/migrations/0002_link_migrated_explore_query_in_discover.py
|
{
"start": 222,
"end": 1760
}
|
class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("discover", "0001_move_discover_models"),
("explore", "0006_add_changed_reason_field_explore"),
]
operations = [
migrations.AddField(
model_name="discoversavedquery",
name="explore_query",
field=sentry.db.models.fields.foreignkey.FlexibleForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="explore.exploresavedquery",
),
),
]
|
Migration
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/transfers/gdrive_to_local.py
|
{
"start": 1109,
"end": 3700
}
|
class ____(BaseOperator):
"""
Writes a Google Drive file into local Storage.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleDriveToLocalOperator`
:param output_file: Path to downloaded file
:param folder_id: The folder id of the folder in which the Google Drive file resides
:param file_name: The name of the file residing in Google Drive
:param gcp_conn_id: The GCP connection ID to use when fetching connection info.
:param drive_id: Optional. The id of the shared Google Drive in which the file resides.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"output_file",
"folder_id",
"file_name",
"drive_id",
"impersonation_chain",
)
def __init__(
self,
*,
output_file: str,
file_name: str,
folder_id: str,
drive_id: str | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.output_file = output_file
self.folder_id = folder_id
self.drive_id = drive_id
self.file_name = file_name
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
self.log.info("Executing download: %s into %s", self.file_name, self.output_file)
gdrive_hook = GoogleDriveHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
file_metadata = gdrive_hook.get_file_id(
folder_id=self.folder_id, file_name=self.file_name, drive_id=self.drive_id
)
with open(self.output_file, "wb") as file:
gdrive_hook.download_file(file_id=file_metadata["id"], file_handle=file)
|
GoogleDriveToLocalOperator
|
python
|
pypa__virtualenv
|
src/virtualenv/util/error.py
|
{
"start": 52,
"end": 323
}
|
class ____(RuntimeError):
"""Failed a process call."""
def __init__(self, code, out, err, cmd) -> None:
super().__init__(code, out, err, cmd)
self.code = code
self.out = out
self.err = err
self.cmd = cmd
|
ProcessCallFailedError
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-docker/tests/test_containers.py
|
{
"start": 1012,
"end": 1495
}
|
class ____:
async def test_logs_kwargs(self, mock_docker_host: MagicMock):
logs_kwargs = dict(container_id="42")
with disable_run_logger():
logs = await get_docker_container_logs.fn(
docker_host=mock_docker_host, **logs_kwargs
)
assert logs == "here are logs"
client = mock_docker_host.get_client()
client.__enter__.return_value.containers.get.assert_called_once_with("42")
|
TestGetDockerContainerLogs
|
python
|
pydantic__pydantic
|
tests/mypy/modules/plugin_fail_baseConfig.py
|
{
"start": 3455,
"end": 3639
}
|
class ____(BaseModel):
x: int = Field(..., alias='y')
class Config: # type: ignore[pydantic-alias]
alias_generator = lambda x: x + '_' # noqa E731
|
AliasGeneratorModel2
|
python
|
scipy__scipy
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_L.py
|
{
"start": 8572,
"end": 9864
}
|
class ____(Benchmark):
r"""
Levy13 objective function.
This class defines the Levy13 [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Levy13}}(x) = \left(x_{1} -1\right)^{2} \left[\sin^{2}
\left(3 \pi x_{2}\right) + 1\right] + \left(x_{2}
- 1\right)^{2} \left[\sin^{2}\left(2 \pi x_{2}\right)
+ 1\right] + \sin^{2}\left(3 \pi x_{1}\right)
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 1]`
.. [1] Mishra, S. Some new test functions for global optimization and
performance of repulsive particle swarm method.
Munich Personal RePEc Archive, 2006, 2718
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [[1 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
u = sin(3 * pi * x[0]) ** 2
v = (x[0] - 1) ** 2 * (1 + (sin(3 * pi * x[1])) ** 2)
w = (x[1] - 1) ** 2 * (1 + (sin(2 * pi * x[1])) ** 2)
return u + v + w
|
Levy13
|
python
|
celery__celery
|
t/unit/contrib/test_abortable.py
|
{
"start": 75,
"end": 1394
}
|
class ____:
def setup_method(self):
@self.app.task(base=AbortableTask, shared=False)
def abortable():
return True
self.abortable = abortable
def test_async_result_is_abortable(self):
result = self.abortable.apply_async()
tid = result.id
assert isinstance(
self.abortable.AsyncResult(tid), AbortableAsyncResult)
def test_is_not_aborted(self):
self.abortable.push_request()
try:
result = self.abortable.apply_async()
tid = result.id
assert not self.abortable.is_aborted(task_id=tid)
finally:
self.abortable.pop_request()
def test_is_aborted_not_abort_result(self):
self.abortable.AsyncResult = self.app.AsyncResult
self.abortable.push_request()
try:
self.abortable.request.id = 'foo'
assert not self.abortable.is_aborted()
finally:
self.abortable.pop_request()
def test_abort_yields_aborted(self):
self.abortable.push_request()
try:
result = self.abortable.apply_async()
result.abort()
tid = result.id
assert self.abortable.is_aborted(task_id=tid)
finally:
self.abortable.pop_request()
|
test_AbortableTask
|
python
|
numpy__numpy
|
numpy/polynomial/tests/test_laguerre.py
|
{
"start": 12532,
"end": 15042
}
|
class ____:
def test_lagfit(self):
def f(x):
return x * (x - 1) * (x - 2)
# Test exceptions
assert_raises(ValueError, lag.lagfit, [1], [1], -1)
assert_raises(TypeError, lag.lagfit, [[1]], [1], 0)
assert_raises(TypeError, lag.lagfit, [], [1], 0)
assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0)
assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0)
assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0)
assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1])
assert_raises(ValueError, lag.lagfit, [1], [1], [-1,])
assert_raises(ValueError, lag.lagfit, [1], [1], [2, -1, 6])
assert_raises(TypeError, lag.lagfit, [1], [1], [])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = lag.lagfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(lag.lagval(x, coef3), y)
coef3 = lag.lagfit(x, y, [0, 1, 2, 3])
assert_equal(len(coef3), 4)
assert_almost_equal(lag.lagval(x, coef3), y)
#
coef4 = lag.lagfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(lag.lagval(x, coef4), y)
coef4 = lag.lagfit(x, y, [0, 1, 2, 3, 4])
assert_equal(len(coef4), 5)
assert_almost_equal(lag.lagval(x, coef4), y)
#
coef2d = lag.lagfit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
coef2d = lag.lagfit(x, np.array([y, y]).T, [0, 1, 2, 3])
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = lag.lagfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
wcoef3 = lag.lagfit(x, yw, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(lag.lagfit(x, x, 1), [1, -1])
assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1])
|
TestFitting
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/legacy_tf_layers/convolutional.py
|
{
"start": 19421,
"end": 29027
}
|
class ____(keras_layers.Conv3D, base.Layer):
"""3D convolution layer (e.g. spatial convolution over volumes).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Args:
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the depth,
height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
dilation_rate: An integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
"""
def __init__(self, filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Conv3D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name, **kwargs)
def conv3d(inputs,
filters,
kernel_size,
strides=(1, 1, 1),
padding='valid',
data_format='channels_last',
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the 3D convolution layer.
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Args:
inputs: Tensor input.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of 3 integers, specifying the
depth, height and width of the 3D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the convolution along the depth,
height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
dilation_rate: An integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: A string, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
warnings.warn('`tf.layers.conv3d` is deprecated and '
'will be removed in a future version. '
'Please Use `tf.keras.layers.Conv3D` instead.')
layer = Conv3D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs)
|
Conv3D
|
python
|
keras-team__keras
|
keras/src/trainers/data_adapters/torch_data_loader_adapter_test.py
|
{
"start": 329,
"end": 7436
}
|
class ____(testing.TestCase):
def test_basic_dataloader(self):
x = torch.normal(2, 3, size=(34, 4))
y = torch.normal(1, 3, size=(34, 2))
ds = torch.utils.data.TensorDataset(x, y)
dataloader = torch.utils.data.DataLoader(ds, batch_size=16)
adapter = TorchDataLoaderAdapter(dataloader)
self.assertEqual(adapter.num_batches, 3)
self.assertEqual(adapter.batch_size, 16)
self.assertEqual(adapter.has_partial_batch, True)
self.assertEqual(adapter.partial_batch_size, 2)
if backend.backend() == "numpy":
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
elif backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.Tensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
expected_class = np.ndarray
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
expected_class = torch.Tensor
for i, batch in enumerate(it):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(bx.dtype, by.dtype)
self.assertContainsExactSubsequence(str(bx.dtype), "float32")
if i < 2:
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
else:
self.assertEqual(bx.shape, (2, 4))
self.assertEqual(by.shape, (2, 2))
@parameterized.named_parameters(
named_product(batch_size=[None, 3], implements_len=[True, False])
)
def test_dataloader_iterable_dataset(self, batch_size, implements_len):
class TestIterableDataset(torch.utils.data.IterableDataset):
def __init__(self):
self.x = torch.normal(2, 3, size=(16, 4))
self.y = torch.normal(1, 3, size=(16, 2))
def __iter__(self):
for _ in range(10):
yield (self.x, self.y)
class TestIterableDatasetWithLen(TestIterableDataset):
def __len__(self):
return 10
ds = (
TestIterableDatasetWithLen()
if implements_len
else TestIterableDataset()
)
dataloader = torch.utils.data.DataLoader(ds, batch_size=batch_size)
adapter = TorchDataLoaderAdapter(dataloader)
if implements_len and batch_size:
self.assertEqual(adapter.num_batches, math.ceil(10 / batch_size))
self.assertEqual(adapter.batch_size, batch_size)
self.assertEqual(adapter.has_partial_batch, True)
self.assertEqual(adapter.partial_batch_size, 10 % batch_size)
elif implements_len:
self.assertEqual(adapter.num_batches, 10)
self.assertEqual(adapter.batch_size, None)
self.assertEqual(adapter.has_partial_batch, None)
self.assertEqual(adapter.partial_batch_size, None)
else:
self.assertIsNone(adapter.num_batches)
self.assertEqual(adapter.batch_size, batch_size)
self.assertIsNone(adapter.has_partial_batch)
self.assertIsNone(adapter.partial_batch_size)
if backend.backend() == "numpy":
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
elif backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.Tensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
expected_class = np.ndarray
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
expected_class = torch.Tensor
batch_count = 0
for i, batch in enumerate(it):
batch_count += 1
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(bx.dtype, by.dtype)
self.assertContainsExactSubsequence(str(bx.dtype), "float32")
if batch_size:
if i < 3:
self.assertEqual(bx.shape, (batch_size, 16, 4))
self.assertEqual(by.shape, (batch_size, 16, 2))
else:
self.assertEqual(bx.shape, (10 % batch_size, 16, 4))
self.assertEqual(by.shape, (10 % batch_size, 16, 2))
else:
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
if batch_size:
self.assertEqual(batch_count, math.ceil(10 / batch_size))
else:
self.assertEqual(batch_count, 10)
def test_with_different_shapes(self):
x = (
[np.ones([4], "float32")] * 16
+ [np.ones([5], "float32")] * 16
+ [np.ones([6], "float32")] * 2
)
y = np.ones((34, 2), "float32")
ds = torch.utils.data.StackDataset(x, y)
dataloader = torch.utils.data.DataLoader(ds, batch_size=16)
adapter = TorchDataLoaderAdapter(dataloader)
self.assertEqual(adapter.num_batches, 3)
self.assertEqual(adapter.batch_size, 16)
self.assertEqual(adapter.has_partial_batch, True)
self.assertEqual(adapter.partial_batch_size, 2)
if backend.backend() == "numpy":
it = adapter.get_numpy_iterator()
elif backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
for i, batch in enumerate(it):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertEqual(bx.dtype, by.dtype)
self.assertContainsExactSubsequence(str(bx.dtype), "float32")
if i == 0:
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
elif i == 1:
self.assertEqual(bx.shape, (16, 5))
self.assertEqual(by.shape, (16, 2))
else:
self.assertEqual(bx.shape, (2, 6))
self.assertEqual(by.shape, (2, 2))
@parameterized.named_parameters(named_product(num_workers=[0, 2]))
def test_builtin_prefetch(self, num_workers):
x = torch.normal(2, 3, size=(34, 4))
y = torch.normal(1, 3, size=(34, 2))
ds = torch.utils.data.TensorDataset(x, y)
dataloader = torch.utils.data.DataLoader(
ds, batch_size=16, num_workers=num_workers
)
adapter = TorchDataLoaderAdapter(dataloader)
if num_workers > 0:
self.assertTrue(adapter.builtin_prefetch)
else:
self.assertFalse(adapter.builtin_prefetch)
|
TestTorchDataLoaderAdapter
|
python
|
huggingface__transformers
|
src/transformers/models/phi4_multimodal/modular_phi4_multimodal.py
|
{
"start": 29063,
"end": 29778
}
|
class ____(SiglipMultiheadAttentionPoolingHead):
def __init__(self, config: Phi4MultimodalVisionConfig):
super().__init__(config)
self.mlp = Phi4MultimodalVisionMLP(config)
def forward(self, hidden_state, attention_mask):
batch_size = hidden_state.shape[0]
probe = self.probe.repeat(batch_size, 1, 1)
hidden_state = self.attention(
query=probe, key=hidden_state, value=hidden_state, key_padding_mask=~attention_mask
)[0]
residual = hidden_state
hidden_state = self.layernorm(hidden_state)
hidden_state = residual + self.mlp(hidden_state)
return hidden_state[:, 0]
|
Phi4MultimodalVisionMultiheadAttentionPoolingHead
|
python
|
getsentry__sentry
|
src/sentry/monitors/endpoints/project_monitor_environment_details.py
|
{
"start": 798,
"end": 2496
}
|
class ____(
ProjectMonitorEnvironmentEndpoint, MonitorEnvironmentDetailsMixin
):
publish_status = {
"DELETE": ApiPublishStatus.EXPERIMENTAL,
"PUT": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.CRONS
@extend_schema(
operation_id="Update a Monitor Environment for a Project",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
MonitorParams.MONITOR_ID_OR_SLUG,
MonitorParams.ENVIRONMENT,
],
responses={
200: MonitorSerializer,
400: RESPONSE_BAD_REQUEST,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def put(self, request: Request, project, monitor, monitor_environment) -> Response:
"""
Update a monitor environment.
"""
return self.update_monitor_environment(request, project, monitor, monitor_environment)
@extend_schema(
operation_id="Delete a Monitor Environment for a Project",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
MonitorParams.MONITOR_ID_OR_SLUG,
MonitorParams.ENVIRONMENT,
],
responses={
202: RESPONSE_ACCEPTED,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def delete(self, request: Request, project, monitor, monitor_environment) -> Response:
return self.delete_monitor_environment(request, project, monitor, monitor_environment)
|
ProjectMonitorEnvironmentDetailsEndpoint
|
python
|
Textualize__textual
|
tests/css/test_parse.py
|
{
"start": 35584,
"end": 36290
}
|
class ____:
@pytest.mark.parametrize(
"valid_align", ["left", "start", "center", "right", "end", "justify"]
)
def test_text_align(self, valid_align):
css = f"#foo {{ text-align: {valid_align} }}"
stylesheet = Stylesheet()
stylesheet.add_source(css)
assert stylesheet.rules[0].styles.text_align == valid_align
def test_text_align_invalid(self):
css = "#foo { text-align: invalid-value; }"
stylesheet = Stylesheet()
with pytest.raises(StylesheetParseError):
stylesheet.add_source(css)
stylesheet.parse()
rules = stylesheet._parse_rules(css, "foo")
assert rules[0].errors
|
TestParseTextAlign
|
python
|
getsentry__sentry
|
src/sentry/grouping/component.py
|
{
"start": 12944,
"end": 14192
}
|
class ____(BaseGroupingComponent[ExceptionGroupingComponentChildren]):
id: str = "exception"
frame_counts: Counter[str]
def __init__(
self,
values: Sequence[ExceptionGroupingComponentChildren] | None = None,
hint: str | None = None,
contributes: bool | None = None,
frame_counts: Counter[str] | None = None,
):
super().__init__(hint=hint, contributes=contributes, values=values)
self.frame_counts = frame_counts or Counter()
@property
def key(self) -> str:
return _get_exception_component_key(self)
def as_dict(self) -> dict[str, Any]:
"""
Convert to a dictionary, first rearranging the values so they show up in the order we want
in grouping info.
TODO: Once we're fully transitioned off of the `newstyle:2023-01-11` config, this method can
be deleted
"""
ordered_values: Any = []
for component_id in ["type", "value", "ns_error", "stacktrace"]:
subcomponent = self.get_subcomponent(component_id)
if subcomponent:
ordered_values.append(subcomponent)
self.values = ordered_values
return super().as_dict()
|
ExceptionGroupingComponent
|
python
|
PrefectHQ__prefect
|
src/prefect/events/actions.py
|
{
"start": 7163,
"end": 7298
}
|
class ____(WorkQueueAction):
"""Resumes a Work Queue"""
type: Literal["resume-work-queue"] = "resume-work-queue"
|
ResumeWorkQueue
|
python
|
mlflow__mlflow
|
mlflow/server/graphql/autogenerated_graphql_schema.py
|
{
"start": 3876,
"end": 4107
}
|
class ____(graphene.ObjectType):
root_uri = graphene.String()
files = graphene.List(graphene.NonNull(MlflowFileInfo))
next_page_token = graphene.String()
apiError = graphene.Field(ApiError)
|
MlflowListArtifactsResponse
|
python
|
hynek__structlog
|
tests/test_dev.py
|
{
"start": 21374,
"end": 22507
}
|
class ____:
def test_wrong_name(self):
"""
Do nothing if name is not exception.
"""
assert {} == dev.set_exc_info(None, "foo", {})
@pytest.mark.parametrize("ei", [False, None, ()])
def test_already_set(self, ei):
"""
Do nothing if exc_info is already set.
"""
assert {"exc_info": ei} == dev.set_exc_info(
None, "foo", {"exc_info": ei}
)
def test_set_it(self):
"""
Set exc_info to True if its not set and if the method name is
exception.
"""
assert {"exc_info": True} == dev.set_exc_info(None, "exception", {})
@pytest.mark.skipif(dev.rich is not None, reason="Needs missing Rich.")
def test_rich_traceback_formatter_no_rich():
"""
Trying to use RichTracebackFormatter without Rich should raise an helpful
error.
"""
with pytest.raises(
ModuleNotFoundError,
match="RichTracebackFormatter requires Rich to be installed",
):
dev.rich_traceback(StringIO(), sys.exc_info())
@pytest.mark.skipif(dev.rich is None, reason="Needs Rich.")
|
TestSetExcInfo
|
python
|
python__mypy
|
mypyc/transform/copy_propagation.py
|
{
"start": 3062,
"end": 3435
}
|
class ____(IRTransform):
def __init__(self, builder: LowLevelIRBuilder, map: dict[Value, Value]) -> None:
super().__init__(builder)
self.op_map.update(map)
self.removed = set(map)
def visit_assign(self, op: Assign) -> Value | None:
if op.dest in self.removed:
return None
return self.add(op)
|
CopyPropagationTransform
|
python
|
ansible__ansible
|
lib/ansible/errors/__init__.py
|
{
"start": 10042,
"end": 10553
}
|
class ____(AnsibleTemplateError):
"""
Raised when the result of a template operation was the Omit singleton. This exception purposely does
not derive from AnsibleError to avoid elision of the traceback, since uncaught errors of this type always
indicate a bug.
"""
_default_message = "A template was resolved to an Omit scalar."
_default_help_text = "Callers must be prepared to handle this value. This is most likely a bug in the code requesting templating."
|
AnsibleValueOmittedError
|
python
|
readthedocs__readthedocs.org
|
readthedocs/proxito/tests/test_full.py
|
{
"start": 32011,
"end": 57871
}
|
class ____(BaseDocServing):
# Test that robots.txt and sitemap.xml work
def tearDown(self):
super().tearDown()
# Cleanup cache to avoid throttling on tests
cache.clear()
@mock.patch.object(BuildMediaFileSystemStorageTest, "exists")
def test_default_robots_txt(self, storage_exists):
storage_exists.return_value = False
self.project.versions.update(active=True, built=True)
response = self.client.get(
reverse("robots_txt"), headers={"host": "project.readthedocs.io"}
)
self.assertEqual(response.status_code, 200)
expected = dedent(
"""
User-agent: *
Disallow: # Allow everything
Sitemap: https://project.readthedocs.io/sitemap.xml
"""
).lstrip()
self.assertContains(response, expected)
@mock.patch.object(BuildMediaFileSystemStorageTest, "exists")
def test_default_robots_txt_disallow_hidden_versions(self, storage_exists):
storage_exists.return_value = False
self.project.versions.update(active=True, built=True)
fixture.get(
Version,
project=self.project,
slug="hidden",
active=True,
hidden=True,
privacy_level=PUBLIC,
)
fixture.get(
Version,
project=self.project,
slug="hidden-2",
active=True,
hidden=True,
privacy_level=PUBLIC,
)
fixture.get(
Version,
project=self.project,
slug="hidden-and-inactive",
active=False,
hidden=True,
privacy_level=PUBLIC,
)
fixture.get(
Version,
project=self.project,
slug="hidden-and-private",
active=False,
hidden=True,
privacy_level=PRIVATE,
)
response = self.client.get(
reverse("robots_txt"), headers={"host": "project.readthedocs.io"}
)
self.assertEqual(response.status_code, 200)
expected = dedent(
"""
User-agent: *
Disallow: /en/hidden-2/ # Hidden version
Disallow: /en/hidden/ # Hidden version
Sitemap: https://project.readthedocs.io/sitemap.xml
"""
).lstrip()
self.assertContains(response, expected)
@mock.patch.object(BuildMediaFileSystemStorageTest, "exists")
def test_default_robots_txt_private_version(self, storage_exists):
storage_exists.return_value = False
self.project.versions.update(
active=True, built=True, privacy_level=constants.PRIVATE
)
response = self.client.get(
reverse("robots_txt"), headers={"host": "project.readthedocs.io"}
)
self.assertEqual(response.status_code, 404)
def test_custom_robots_txt(self):
self.project.versions.update(active=True, built=True)
response = self.client.get(
reverse("robots_txt"), headers={"host": "project.readthedocs.io"}
)
self.assertEqual(
response["x-accel-redirect"],
"/proxito/media/html/project/latest/robots.txt",
)
def test_custom_robots_txt_private_version(self):
self.project.versions.update(
active=True, built=True, privacy_level=constants.PRIVATE
)
response = self.client.get(
reverse("robots_txt"), headers={"host": "project.readthedocs.io"}
)
self.assertEqual(response.status_code, 404)
def test_directory_indexes(self):
self.project.versions.update(active=True, built=True)
get(
HTMLFile,
project=self.project,
version=self.version,
path="index-exists/index.html",
name="index.html",
)
# Confirm we've serving from storage for the `index-exists/index.html` file
response = self.client.get(
reverse(
"proxito_404_handler",
kwargs={"proxito_path": "/en/latest/index-exists"},
),
headers={"host": "project.readthedocs.io"},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response["location"],
"/en/latest/index-exists/",
)
def test_versioned_no_slash(self):
self.project.versions.update(active=True, built=True)
get(
HTMLFile,
project=self.project,
version=self.version,
path="index.html",
name="index.html",
)
response = self.client.get(
reverse("proxito_404_handler", kwargs={"proxito_path": "/en/latest"}),
headers={"host": "project.readthedocs.io"},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response["location"],
"/en/latest/",
)
def test_directory_indexes_get_args(self):
self.project.versions.update(active=True, built=True)
get(
HTMLFile,
project=self.project,
version=self.version,
path="index-exists/index.html",
name="index.html",
)
# Confirm we've serving from storage for the `index-exists/index.html` file
response = self.client.get(
reverse(
"proxito_404_handler",
kwargs={"proxito_path": "/en/latest/index-exists"},
)
+ "?foo=bar",
headers={"host": "project.readthedocs.io"},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response["location"],
"/en/latest/index-exists/?foo=bar",
)
@mock.patch.object(BuildMediaFileSystemStorageTest, "open")
def test_404_storage_serves_custom_404_sphinx(self, storage_open):
self.project.versions.update(active=True, built=True)
fancy_version = fixture.get(
Version,
slug="fancy-version",
privacy_level=constants.PUBLIC,
active=True,
built=True,
project=self.project,
documentation_type=SPHINX,
)
get(
HTMLFile,
project=self.project,
version=fancy_version,
path="404.html",
name="404.html",
)
response = self.client.get(
reverse(
"proxito_404_handler",
kwargs={"proxito_path": "/en/fancy-version/not-found"},
),
headers={"host": "project.readthedocs.io"},
)
self.assertEqual(response.status_code, 404)
storage_open.assert_called_once_with("html/project/fancy-version/404.html")
@mock.patch.object(BuildMediaFileSystemStorageTest, "open")
def test_404_index_redirect_skips_not_built_versions(self, storage_open):
self.version.built = False
self.version.save()
get(
HTMLFile,
project=self.project,
version=self.version,
path="foo/index.html",
name="index.html",
)
response = self.client.get(
reverse(
"proxito_404_handler",
kwargs={"proxito_path": "/en/latest/foo"},
),
headers={"host": "project.readthedocs.io"},
)
self.assertEqual(response.status_code, 404)
storage_open.assert_not_called()
@mock.patch.object(BuildMediaFileSystemStorageTest, "open")
def test_custom_404_skips_not_built_versions(self, storage_open):
self.version.built = False
self.version.save()
fancy_version = fixture.get(
Version,
slug="fancy-version",
privacy_level=constants.PUBLIC,
active=True,
built=False,
project=self.project,
)
get(
HTMLFile,
project=self.project,
version=self.version,
path="404.html",
name="404.html",
)
get(
HTMLFile,
project=self.project,
version=fancy_version,
path="404.html",
name="404.html",
)
response = self.client.get(
reverse(
"proxito_404_handler",
kwargs={"proxito_path": "/en/fancy-version/not-found"},
),
headers={"host": "project.readthedocs.io"},
)
self.assertEqual(response.status_code, 404)
storage_open.assert_not_called()
@mock.patch.object(BuildMediaFileSystemStorageTest, "open")
def test_custom_404_doesnt_exist_in_storage(self, storage_open):
storage_open.side_effect = FileNotFoundError
get(
HTMLFile,
project=self.project,
version=self.version,
path="404.html",
name="404.html",
)
response = self.client.get(
reverse(
"proxito_404_handler",
kwargs={"proxito_path": "/en/latest/not-found"},
),
headers={"host": "project.readthedocs.io"},
)
self.assertEqual(response.status_code, 404)
storage_open.assert_called_once_with("html/project/latest/404.html")
@mock.patch.object(BuildMediaFileSystemStorageTest, "open")
def test_404_storage_serves_custom_404_sphinx_single_html(self, storage_open):
self.project.versions.update(active=True, built=True)
fancy_version = fixture.get(
Version,
slug="fancy-version",
privacy_level=constants.PUBLIC,
active=True,
built=True,
project=self.project,
documentation_type=SPHINX_SINGLEHTML,
)
get(
HTMLFile,
project=self.project,
version=fancy_version,
path="404.html",
name="404.html",
)
response = self.client.get(
reverse(
"proxito_404_handler",
kwargs={"proxito_path": "/en/fancy-version/not-found"},
),
headers={"host": "project.readthedocs.io"},
)
self.assertEqual(response.status_code, 404)
storage_open.assert_called_once_with("html/project/fancy-version/404.html")
@mock.patch.object(BuildMediaFileSystemStorageTest, "open")
def test_404_storage_serves_custom_404_sphinx_htmldir(self, storage_open):
self.project.versions.update(active=True, built=True)
fancy_version = fixture.get(
Version,
slug="fancy-version",
privacy_level=constants.PUBLIC,
active=True,
built=True,
project=self.project,
documentation_type=SPHINX_HTMLDIR,
)
get(
HTMLFile,
project=self.project,
version=fancy_version,
path="404.html",
name="404.html",
)
response = self.client.get(
reverse(
"proxito_404_handler",
kwargs={"proxito_path": "/en/fancy-version/not-found"},
),
headers={"host": "project.readthedocs.io"},
)
self.assertEqual(response.status_code, 404)
storage_open.assert_called_once_with("html/project/fancy-version/404.html")
@mock.patch.object(BuildMediaFileSystemStorageTest, "open")
def test_404_storage_serves_custom_404_mkdocs(self, storage_open):
self.project.versions.update(active=True, built=True)
fancy_version = fixture.get(
Version,
slug="fancy-version",
privacy_level=constants.PUBLIC,
active=True,
built=True,
project=self.project,
documentation_type=MKDOCS,
)
get(
HTMLFile,
project=self.project,
version=fancy_version,
path="404.html",
name="404.html",
)
response = self.client.get(
reverse(
"proxito_404_handler",
kwargs={"proxito_path": "/en/fancy-version/not-found"},
),
headers={"host": "project.readthedocs.io"},
)
self.assertEqual(response.status_code, 404)
storage_open.assert_called_once_with("html/project/fancy-version/404.html")
@mock.patch.object(BuildMediaFileSystemStorageTest, "open")
def test_404_all_paths_checked_sphinx(self, storage_open):
self.project.versions.update(active=True, built=True)
fancy_version = fixture.get(
Version,
slug="fancy-version",
privacy_level=constants.PUBLIC,
active=True,
built=True,
project=self.project,
documentation_type=SPHINX,
)
latest = self.project.versions.get(slug=LATEST)
latest.documentation_type = SPHINX
latest.save()
r = self.client.get(
reverse(
"proxito_404_handler",
kwargs={"proxito_path": "/en/fancy-version/not-found"},
),
headers={"host": "project.readthedocs.io"},
)
self.assertEqual(r.status_code, 404)
storage_open.assert_not_called()
@mock.patch.object(BuildMediaFileSystemStorageTest, "open")
def test_404_all_paths_checked_sphinx_single_html(self, storage_open):
self.project.versions.update(active=True, built=True)
fancy_version = fixture.get(
Version,
slug="fancy-version",
privacy_level=constants.PUBLIC,
active=True,
built=True,
project=self.project,
documentation_type=SPHINX_SINGLEHTML,
)
latest = self.project.versions.get(slug=LATEST)
latest.documentation_type = SPHINX_SINGLEHTML
latest.save()
r = self.client.get(
reverse(
"proxito_404_handler",
kwargs={"proxito_path": "/en/fancy-version/not-found"},
),
headers={"host": "project.readthedocs.io"},
)
self.assertEqual(r.status_code, 404)
storage_open.assert_not_called()
@mock.patch.object(BuildMediaFileSystemStorageTest, "open")
def test_404_all_paths_checked_sphinx_html_dir(self, storage_open):
self.project.versions.update(active=True, built=True)
fancy_version = fixture.get(
Version,
slug="fancy-version",
privacy_level=constants.PUBLIC,
active=True,
built=True,
project=self.project,
documentation_type=SPHINX_HTMLDIR,
)
latest = self.project.versions.get(slug=LATEST)
latest.documentation_type = SPHINX_HTMLDIR
latest.save()
r = self.client.get(
reverse(
"proxito_404_handler",
kwargs={"proxito_path": "/en/fancy-version/not-found"},
),
headers={"host": "project.readthedocs.io"},
)
self.assertEqual(r.status_code, 404)
storage_open.assert_not_called()
@mock.patch.object(BuildMediaFileSystemStorageTest, "open")
def test_404_all_paths_checked_mkdocs(self, storage_open):
self.project.versions.update(active=True, built=True)
fancy_version = fixture.get(
Version,
slug="fancy-version",
privacy_level=constants.PUBLIC,
active=True,
built=True,
project=self.project,
documentation_type=MKDOCS,
)
latest = self.project.versions.get(slug=LATEST)
latest.documentation_type = MKDOCS
latest.save()
r = self.client.get(
reverse(
"proxito_404_handler",
kwargs={"proxito_path": "/en/fancy-version/not-found"},
),
headers={"host": "project.readthedocs.io"},
)
self.assertEqual(r.status_code, 404)
storage_open.assert_not_called()
@mock.patch.object(BuildMediaFileSystemStorageTest, "open")
def test_404_all_paths_checked_default_version_different_doc_type(
self, storage_open
):
self.project.versions.update(active=True, built=True)
fancy_version = fixture.get(
Version,
slug="fancy-version",
privacy_level=constants.PUBLIC,
active=True,
built=True,
project=self.project,
documentation_type=SPHINX,
)
latest = self.project.versions.get(slug=LATEST)
latest.documentation_type = SPHINX_HTMLDIR
latest.save()
r = self.client.get(
reverse(
"proxito_404_handler",
kwargs={"proxito_path": "/en/fancy-version/not-found"},
),
headers={"host": "project.readthedocs.io"},
)
self.assertEqual(r.status_code, 404)
storage_open.assert_not_called()
def test_sitemap_xml(self):
self.project.versions.update(active=True)
private_version = fixture.get(
Version,
privacy_level=constants.PRIVATE,
project=self.project,
)
not_translated_public_version = fixture.get(
Version,
identifier="not-translated-version",
verbose_name="not-translated-version",
slug="not-translated-version",
privacy_level=constants.PUBLIC,
project=self.project,
active=True,
)
stable_version = fixture.get(
Version,
identifier="stable",
verbose_name="stable",
slug="stable",
privacy_level=constants.PUBLIC,
project=self.project,
active=True,
)
# This is a EXTERNAL Version
external_version = fixture.get(
Version,
identifier="pr-version",
verbose_name="pr-version",
slug="pr-9999",
project=self.project,
active=True,
type=EXTERNAL,
)
hidden_version = fixture.get(
Version,
identifier="hidden-version",
verbose_name="hidden-version",
slug="hidden-version",
privacy_level=constants.PUBLIC,
project=self.project,
active=True,
hidden=True,
)
# This also creates a Version `latest` Automatically for this project
translation = fixture.get(
Project,
main_language_project=self.project,
language="translation-es",
privacy_level=constants.PUBLIC,
)
translation.versions.update(privacy_level=constants.PUBLIC)
# sitemap hreflang should follow correct format.
# ref: https://en.wikipedia.org/wiki/Hreflang#Common_Mistakes
hreflang_test_translation_project = fixture.get(
Project,
main_language_project=self.project,
language="zh_CN",
privacy_level=constants.PUBLIC,
)
hreflang_test_translation_project.versions.update(
privacy_level=constants.PUBLIC,
)
response = self.client.get(
reverse("sitemap_xml"), headers={"host": "project.readthedocs.io"}
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response["Content-Type"], "application/xml")
for version in self.project.versions(manager=INTERNAL).filter(
privacy_level=constants.PUBLIC,
hidden=False,
):
self.assertContains(
response,
self.project.get_docs_url(
version_slug=version.slug,
lang_slug=self.project.language,
),
)
# PRIVATE version should not appear here
self.assertNotContains(
response,
self.project.get_docs_url(
version_slug=private_version.slug,
lang_slug=self.project.language,
),
)
# Hidden version should not appear here
self.assertNotContains(
response,
self.project.get_docs_url(
version_slug=hidden_version.slug,
lang_slug=self.project.language,
),
)
# The `translation` project doesn't have a version named `not-translated-version`
# so, the sitemap should not have a doc url for
# `not-translated-version` with `translation-es` language.
# ie: http://project.readthedocs.io/translation-es/not-translated-version/
self.assertNotContains(
response,
self.project.get_docs_url(
version_slug=not_translated_public_version.slug,
lang_slug=translation.language,
),
)
# hreflang should use hyphen instead of underscore
# in language and country value. (zh_CN should be zh-CN)
self.assertContains(response, "zh-CN")
# External Versions should not be in the sitemap_xml.
self.assertNotContains(
response,
self.project.get_docs_url(
version_slug=external_version.slug,
lang_slug=self.project.language,
),
)
# Check if STABLE version has 'priority of 1 and changefreq of weekly.
self.assertEqual(
response.context["versions"][0]["loc"],
self.project.get_docs_url(
version_slug=stable_version.slug,
lang_slug=self.project.language,
),
)
self.assertEqual(response.context["versions"][0]["priority"], 1)
self.assertEqual(response.context["versions"][0]["changefreq"], "weekly")
# Check if LATEST version has priority of 0.9 and changefreq of daily.
self.assertEqual(
response.context["versions"][1]["loc"],
self.project.get_docs_url(
version_slug="latest",
lang_slug=self.project.language,
),
)
self.assertEqual(response.context["versions"][1]["priority"], 0.9)
self.assertEqual(response.context["versions"][1]["changefreq"], "daily")
def test_sitemap_all_private_versions(self):
self.project.versions.update(
active=True, built=True, privacy_level=constants.PRIVATE
)
response = self.client.get(
reverse("sitemap_xml"), headers={"host": "project.readthedocs.io"}
)
self.assertEqual(response.status_code, 404)
@mock.patch(
"readthedocs.proxito.views.mixins.staticfiles_storage",
new=StaticFileSystemStorageTest(),
)
def test_serve_static_files(self):
resp = self.client.get(
reverse(
"proxito_static_files",
args=["javascript/readthedocs-doc-embed.js"],
),
headers={"host": "project.readthedocs.io"},
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp.headers["x-accel-redirect"],
"/proxito-static/media/javascript/readthedocs-doc-embed.js",
)
self.assertEqual(
resp.headers["Cache-Tag"], "project,project:rtd-staticfiles,rtd-staticfiles"
)
@mock.patch(
"readthedocs.proxito.views.mixins.staticfiles_storage",
new=StaticFileSystemStorageTest(),
)
def test_serve_static_files_internal_nginx_redirect_always_appended(self):
"""Test for #11080."""
resp = self.client.get(
reverse(
"proxito_static_files",
args=["proxito-static/javascript/readthedocs-doc-embed.js"],
),
headers={"host": "project.readthedocs.io"},
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp.headers["x-accel-redirect"],
"/proxito-static/media/proxito-static/javascript/readthedocs-doc-embed.js",
)
self.assertEqual(
resp.headers["Cache-Tag"], "project,project:rtd-staticfiles,rtd-staticfiles"
)
@mock.patch("readthedocs.proxito.views.mixins.staticfiles_storage")
def test_serve_invalid_static_file(self, staticfiles_storage):
staticfiles_storage.url.side_effect = Exception
paths = ["../", "foo/../bar"]
for path in paths:
resp = self.client.get(
reverse(
"proxito_static_files",
args=[path],
),
headers={"host": "project.readthedocs.io"},
)
self.assertEqual(resp.status_code, 404)
def test_404_download(self):
response = self.client.get(
reverse(
"proxito_404_handler",
kwargs={"proxito_path": "/_/downloads/en/latest/pdf/"},
),
headers={"host": "project.readthedocs.io"},
)
self.assertEqual(response.status_code, 404)
@override_settings(
ALLOW_PRIVATE_REPOS=True,
PUBLIC_DOMAIN="dev.readthedocs.io",
PUBLIC_DOMAIN_USES_HTTPS=True,
RTD_DEFAULT_FEATURES=dict([RTDProductFeature(type=TYPE_CNAME, value=2).to_item()]),
)
# We are overriding the storage class instead of using RTD_BUILD_MEDIA_STORAGE,
# since the setting is evaluated just once (first test to use the storage
# backend will set it for the whole test suite).
@mock.patch(
"readthedocs.proxito.views.mixins.staticfiles_storage",
new=StaticFileSystemStorageTest(),
)
|
TestAdditionalDocViews
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typedDict13.py
|
{
"start": 904,
"end": 1026
}
|
class ____(ParentE, total=False):
# This should generate an error because "x" is Required in the parent.
x: int
|
ChildE
|
python
|
ray-project__ray
|
rllib/algorithms/dqn/dqn.py
|
{
"start": 24984,
"end": 37031
}
|
class ____(Algorithm):
@classmethod
@override(Algorithm)
def get_default_config(cls) -> DQNConfig:
return DQNConfig()
@classmethod
@override(Algorithm)
def get_default_policy_class(
cls, config: AlgorithmConfig
) -> Optional[Type[Policy]]:
if config["framework"] == "torch":
return DQNTorchPolicy
else:
return DQNTFPolicy
@override(Algorithm)
def setup(self, config: AlgorithmConfig) -> None:
super().setup(config)
if self.config.enable_env_runner_and_connector_v2 and self.env_runner_group:
if self.env_runner is None:
self._module_is_stateful = self.env_runner_group.foreach_env_runner(
lambda er: er.module.is_stateful(),
remote_worker_ids=[1],
local_env_runner=False,
)[0]
else:
self._module_is_stateful = self.env_runner.module.is_stateful()
@override(Algorithm)
def training_step(self) -> None:
"""DQN training iteration function.
Each training iteration, we:
- Sample (MultiAgentBatch) from workers.
- Store new samples in replay buffer.
- Sample training batch (MultiAgentBatch) from replay buffer.
- Learn on training batch.
- Update remote workers' new policy weights.
- Update target network every `target_network_update_freq` sample steps.
- Return all collected metrics for the iteration.
Returns:
The results dict from executing the training iteration.
"""
# Old API stack (Policy, RolloutWorker, Connector).
if not self.config.enable_env_runner_and_connector_v2:
return self._training_step_old_api_stack()
# New API stack (RLModule, Learner, EnvRunner, ConnectorV2).
return self._training_step_new_api_stack()
def _training_step_new_api_stack(self):
# Alternate between storing and sampling and training.
store_weight, sample_and_train_weight = calculate_rr_weights(self.config)
# Run multiple sampling + storing to buffer iterations.
for _ in range(store_weight):
with self.metrics.log_time((TIMERS, ENV_RUNNER_SAMPLING_TIMER)):
# Sample in parallel from workers.
episodes, env_runner_results = synchronous_parallel_sample(
worker_set=self.env_runner_group,
concat=True,
sample_timeout_s=self.config.sample_timeout_s,
_uses_new_env_runners=True,
_return_metrics=True,
)
# Reduce EnvRunner metrics over the n EnvRunners.
self.metrics.aggregate(env_runner_results, key=ENV_RUNNER_RESULTS)
# Add the sampled experiences to the replay buffer.
with self.metrics.log_time((TIMERS, REPLAY_BUFFER_ADD_DATA_TIMER)):
self.local_replay_buffer.add(episodes)
if self.config.count_steps_by == "agent_steps":
current_ts = sum(
self.metrics.peek(
(ENV_RUNNER_RESULTS, NUM_AGENT_STEPS_SAMPLED_LIFETIME), default={}
).values()
)
else:
current_ts = self.metrics.peek(
(ENV_RUNNER_RESULTS, NUM_ENV_STEPS_SAMPLED_LIFETIME), default=0
)
# If enough experiences have been sampled start training.
if current_ts >= self.config.num_steps_sampled_before_learning_starts:
# Run multiple sample-from-buffer and update iterations.
for _ in range(sample_and_train_weight):
# Sample a list of episodes used for learning from the replay buffer.
with self.metrics.log_time((TIMERS, REPLAY_BUFFER_SAMPLE_TIMER)):
episodes = self.local_replay_buffer.sample(
num_items=self.config.total_train_batch_size,
n_step=self.config.n_step,
# In case an `EpisodeReplayBuffer` is used we need to provide
# the sequence length.
batch_length_T=(
self._module_is_stateful
* self.config.model_config.get("max_seq_len", 0)
),
lookback=int(self._module_is_stateful),
# TODO (simon): Implement `burn_in_len` in SAC and remove this
# if-else clause.
min_batch_length_T=self.config.burn_in_len
if hasattr(self.config, "burn_in_len")
else 0,
gamma=self.config.gamma,
beta=self.config.replay_buffer_config.get("beta"),
sample_episodes=True,
)
# Get the replay buffer metrics.
replay_buffer_results = self.local_replay_buffer.get_metrics()
self.metrics.aggregate(
[replay_buffer_results], key=REPLAY_BUFFER_RESULTS
)
# Perform an update on the buffer-sampled train batch.
with self.metrics.log_time((TIMERS, LEARNER_UPDATE_TIMER)):
learner_results = self.learner_group.update(
episodes=episodes,
timesteps={
NUM_ENV_STEPS_SAMPLED_LIFETIME: (
self.metrics.peek(
(ENV_RUNNER_RESULTS, NUM_ENV_STEPS_SAMPLED_LIFETIME)
)
),
NUM_AGENT_STEPS_SAMPLED_LIFETIME: (
self.metrics.peek(
(
ENV_RUNNER_RESULTS,
NUM_AGENT_STEPS_SAMPLED_LIFETIME,
)
)
),
},
)
# Isolate TD-errors from result dicts (we should not log these to
# disk or WandB, they might be very large).
td_errors = defaultdict(list)
for res in learner_results:
for module_id, module_results in res.items():
if TD_ERROR_KEY in module_results:
td_errors[module_id].extend(
convert_to_numpy(
module_results.pop(TD_ERROR_KEY).peek()
)
)
td_errors = {
module_id: {TD_ERROR_KEY: np.concatenate(s, axis=0)}
for module_id, s in td_errors.items()
}
self.metrics.aggregate(learner_results, key=LEARNER_RESULTS)
# Update replay buffer priorities.
with self.metrics.log_time((TIMERS, REPLAY_BUFFER_UPDATE_PRIOS_TIMER)):
update_priorities_in_episode_replay_buffer(
replay_buffer=self.local_replay_buffer,
td_errors=td_errors,
)
# Update weights and global_vars - after learning on the local worker -
# on all remote workers.
with self.metrics.log_time((TIMERS, SYNCH_WORKER_WEIGHTS_TIMER)):
modules_to_update = set(learner_results[0].keys()) - {ALL_MODULES}
# NOTE: the new API stack does not use global vars.
self.env_runner_group.sync_weights(
from_worker_or_learner_group=self.learner_group,
policies=modules_to_update,
global_vars=None,
inference_only=True,
)
def _training_step_old_api_stack(self) -> ResultDict:
"""Training step for the old API stack.
More specifically this training step relies on `RolloutWorker`.
"""
train_results = {}
# We alternate between storing new samples and sampling and training
store_weight, sample_and_train_weight = calculate_rr_weights(self.config)
for _ in range(store_weight):
# Sample (MultiAgentBatch) from workers.
with self._timers[SAMPLE_TIMER]:
new_sample_batch: SampleBatchType = synchronous_parallel_sample(
worker_set=self.env_runner_group,
concat=True,
sample_timeout_s=self.config.sample_timeout_s,
)
# Return early if all our workers failed.
if not new_sample_batch:
return {}
# Update counters
self._counters[NUM_AGENT_STEPS_SAMPLED] += new_sample_batch.agent_steps()
self._counters[NUM_ENV_STEPS_SAMPLED] += new_sample_batch.env_steps()
# Store new samples in replay buffer.
self.local_replay_buffer.add(new_sample_batch)
global_vars = {
"timestep": self._counters[NUM_ENV_STEPS_SAMPLED],
}
# Update target network every `target_network_update_freq` sample steps.
cur_ts = self._counters[
(
NUM_AGENT_STEPS_SAMPLED
if self.config.count_steps_by == "agent_steps"
else NUM_ENV_STEPS_SAMPLED
)
]
if cur_ts > self.config.num_steps_sampled_before_learning_starts:
for _ in range(sample_and_train_weight):
# Sample training batch (MultiAgentBatch) from replay buffer.
train_batch = sample_min_n_steps_from_buffer(
self.local_replay_buffer,
self.config.total_train_batch_size,
count_by_agent_steps=self.config.count_steps_by == "agent_steps",
)
# Postprocess batch before we learn on it
post_fn = self.config.get("before_learn_on_batch") or (lambda b, *a: b)
train_batch = post_fn(train_batch, self.env_runner_group, self.config)
# Learn on training batch.
# Use simple optimizer (only for multi-agent or tf-eager; all other
# cases should use the multi-GPU optimizer, even if only using 1 GPU)
if self.config.get("simple_optimizer") is True:
train_results = train_one_step(self, train_batch)
else:
train_results = multi_gpu_train_one_step(self, train_batch)
# Update replay buffer priorities.
update_priorities_in_replay_buffer(
self.local_replay_buffer,
self.config,
train_batch,
train_results,
)
last_update = self._counters[LAST_TARGET_UPDATE_TS]
if cur_ts - last_update >= self.config.target_network_update_freq:
to_update = self.env_runner.get_policies_to_train()
self.env_runner.foreach_policy_to_train(
lambda p, pid, to_update=to_update: (
pid in to_update and p.update_target()
)
)
self._counters[NUM_TARGET_UPDATES] += 1
self._counters[LAST_TARGET_UPDATE_TS] = cur_ts
# Update weights and global_vars - after learning on the local worker -
# on all remote workers.
with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]:
self.env_runner_group.sync_weights(global_vars=global_vars)
# Return all collected metrics for the iteration.
return train_results
|
DQN
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.