diff --git a/lib/python3.10/site-packages/grpc/aio/_call.py b/lib/python3.10/site-packages/grpc/aio/_call.py new file mode 100644 index 0000000000000000000000000000000000000000..24f2090651a8afb5564d8dc01995ec93470f953a --- /dev/null +++ b/lib/python3.10/site-packages/grpc/aio/_call.py @@ -0,0 +1,764 @@ +# Copyright 2019 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Invocation-side implementation of gRPC Asyncio Python.""" + +import asyncio +import enum +from functools import partial +import inspect +import logging +import traceback +from typing import ( + Any, + AsyncIterator, + Generator, + Generic, + Optional, + Tuple, + Union, +) + +import grpc +from grpc import _common +from grpc._cython import cygrpc + +from . import _base_call +from ._metadata import Metadata +from ._typing import DeserializingFunction +from ._typing import DoneCallbackType +from ._typing import EOFType +from ._typing import MetadatumType +from ._typing import RequestIterableType +from ._typing import RequestType +from ._typing import ResponseType +from ._typing import SerializingFunction + +__all__ = "AioRpcError", "Call", "UnaryUnaryCall", "UnaryStreamCall" + +_LOCAL_CANCELLATION_DETAILS = "Locally cancelled by application!" +_GC_CANCELLATION_DETAILS = "Cancelled upon garbage collection!" +_RPC_ALREADY_FINISHED_DETAILS = "RPC already finished." +_RPC_HALF_CLOSED_DETAILS = 'RPC is half closed after calling "done_writing".' +_API_STYLE_ERROR = ( + "The iterator and read/write APIs may not be mixed on a single RPC." +) + +_OK_CALL_REPRESENTATION = ( + '<{} of RPC that terminated with:\n\tstatus = {}\n\tdetails = "{}"\n>' +) + +_NON_OK_CALL_REPRESENTATION = ( + "<{} of RPC that terminated with:\n" + "\tstatus = {}\n" + '\tdetails = "{}"\n' + '\tdebug_error_string = "{}"\n' + ">" +) + +_LOGGER = logging.getLogger(__name__) + + +class AioRpcError(grpc.RpcError): + """An implementation of RpcError to be used by the asynchronous API. + + Raised RpcError is a snapshot of the final status of the RPC, values are + determined. Hence, its methods no longer needs to be coroutines. + """ + + _code: grpc.StatusCode + _details: Optional[str] + _initial_metadata: Optional[Metadata] + _trailing_metadata: Optional[Metadata] + _debug_error_string: Optional[str] + + def __init__( + self, + code: grpc.StatusCode, + initial_metadata: Metadata, + trailing_metadata: Metadata, + details: Optional[str] = None, + debug_error_string: Optional[str] = None, + ) -> None: + """Constructor. + + Args: + code: The status code with which the RPC has been finalized. + details: Optional details explaining the reason of the error. + initial_metadata: Optional initial metadata that could be sent by the + Server. + trailing_metadata: Optional metadata that could be sent by the Server. + """ + + super().__init__() + self._code = code + self._details = details + self._initial_metadata = initial_metadata + self._trailing_metadata = trailing_metadata + self._debug_error_string = debug_error_string + + def code(self) -> grpc.StatusCode: + """Accesses the status code sent by the server. + + Returns: + The `grpc.StatusCode` status code. + """ + return self._code + + def details(self) -> Optional[str]: + """Accesses the details sent by the server. + + Returns: + The description of the error. + """ + return self._details + + def initial_metadata(self) -> Metadata: + """Accesses the initial metadata sent by the server. + + Returns: + The initial metadata received. + """ + return self._initial_metadata + + def trailing_metadata(self) -> Metadata: + """Accesses the trailing metadata sent by the server. + + Returns: + The trailing metadata received. + """ + return self._trailing_metadata + + def debug_error_string(self) -> str: + """Accesses the debug error string sent by the server. + + Returns: + The debug error string received. + """ + return self._debug_error_string + + def _repr(self) -> str: + """Assembles the error string for the RPC error.""" + return _NON_OK_CALL_REPRESENTATION.format( + self.__class__.__name__, + self._code, + self._details, + self._debug_error_string, + ) + + def __repr__(self) -> str: + return self._repr() + + def __str__(self) -> str: + return self._repr() + + def __reduce__(self): + return ( + type(self), + ( + self._code, + self._initial_metadata, + self._trailing_metadata, + self._details, + self._debug_error_string, + ), + ) + + +def _create_rpc_error( + initial_metadata: Metadata, status: cygrpc.AioRpcStatus +) -> AioRpcError: + return AioRpcError( + _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE[status.code()], + Metadata.from_tuple(initial_metadata), + Metadata.from_tuple(status.trailing_metadata()), + details=status.details(), + debug_error_string=status.debug_error_string(), + ) + + +class Call: + """Base implementation of client RPC Call object. + + Implements logic around final status, metadata and cancellation. + """ + + _loop: asyncio.AbstractEventLoop + _code: grpc.StatusCode + _cython_call: cygrpc._AioCall + _metadata: Tuple[MetadatumType, ...] + _request_serializer: SerializingFunction + _response_deserializer: DeserializingFunction + + def __init__( + self, + cython_call: cygrpc._AioCall, + metadata: Metadata, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + loop: asyncio.AbstractEventLoop, + ) -> None: + self._loop = loop + self._cython_call = cython_call + self._metadata = tuple(metadata) + self._request_serializer = request_serializer + self._response_deserializer = response_deserializer + + def __del__(self) -> None: + # The '_cython_call' object might be destructed before Call object + if hasattr(self, "_cython_call"): + if not self._cython_call.done(): + self._cancel(_GC_CANCELLATION_DETAILS) + + def cancelled(self) -> bool: + return self._cython_call.cancelled() + + def _cancel(self, details: str) -> bool: + """Forwards the application cancellation reasoning.""" + if not self._cython_call.done(): + self._cython_call.cancel(details) + return True + else: + return False + + def cancel(self) -> bool: + return self._cancel(_LOCAL_CANCELLATION_DETAILS) + + def done(self) -> bool: + return self._cython_call.done() + + def add_done_callback(self, callback: DoneCallbackType) -> None: + cb = partial(callback, self) + self._cython_call.add_done_callback(cb) + + def time_remaining(self) -> Optional[float]: + return self._cython_call.time_remaining() + + async def initial_metadata(self) -> Metadata: + raw_metadata_tuple = await self._cython_call.initial_metadata() + return Metadata.from_tuple(raw_metadata_tuple) + + async def trailing_metadata(self) -> Metadata: + raw_metadata_tuple = ( + await self._cython_call.status() + ).trailing_metadata() + return Metadata.from_tuple(raw_metadata_tuple) + + async def code(self) -> grpc.StatusCode: + cygrpc_code = (await self._cython_call.status()).code() + return _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE[cygrpc_code] + + async def details(self) -> str: + return (await self._cython_call.status()).details() + + async def debug_error_string(self) -> str: + return (await self._cython_call.status()).debug_error_string() + + async def _raise_for_status(self) -> None: + if self._cython_call.is_locally_cancelled(): + raise asyncio.CancelledError() + code = await self.code() + if code != grpc.StatusCode.OK: + raise _create_rpc_error( + await self.initial_metadata(), await self._cython_call.status() + ) + + def _repr(self) -> str: + return repr(self._cython_call) + + def __repr__(self) -> str: + return self._repr() + + def __str__(self) -> str: + return self._repr() + + +class _APIStyle(enum.IntEnum): + UNKNOWN = 0 + ASYNC_GENERATOR = 1 + READER_WRITER = 2 + + +class _UnaryResponseMixin(Call, Generic[ResponseType]): + _call_response: asyncio.Task + + def _init_unary_response_mixin(self, response_task: asyncio.Task): + self._call_response = response_task + + def cancel(self) -> bool: + if super().cancel(): + self._call_response.cancel() + return True + else: + return False + + def __await__(self) -> Generator[Any, None, ResponseType]: + """Wait till the ongoing RPC request finishes.""" + try: + response = yield from self._call_response + except asyncio.CancelledError: + # Even if we caught all other CancelledError, there is still + # this corner case. If the application cancels immediately after + # the Call object is created, we will observe this + # `CancelledError`. + if not self.cancelled(): + self.cancel() + raise + + # NOTE(lidiz) If we raise RpcError in the task, and users doesn't + # 'await' on it. AsyncIO will log 'Task exception was never retrieved'. + # Instead, if we move the exception raising here, the spam stops. + # Unfortunately, there can only be one 'yield from' in '__await__'. So, + # we need to access the private instance variable. + if response is cygrpc.EOF: + if self._cython_call.is_locally_cancelled(): + raise asyncio.CancelledError() + else: + raise _create_rpc_error( + self._cython_call._initial_metadata, + self._cython_call._status, + ) + else: + return response + + +class _StreamResponseMixin(Call): + _message_aiter: AsyncIterator[ResponseType] + _preparation: asyncio.Task + _response_style: _APIStyle + + def _init_stream_response_mixin(self, preparation: asyncio.Task): + self._message_aiter = None + self._preparation = preparation + self._response_style = _APIStyle.UNKNOWN + + def _update_response_style(self, style: _APIStyle): + if self._response_style is _APIStyle.UNKNOWN: + self._response_style = style + elif self._response_style is not style: + raise cygrpc.UsageError(_API_STYLE_ERROR) + + def cancel(self) -> bool: + if super().cancel(): + self._preparation.cancel() + return True + else: + return False + + async def _fetch_stream_responses(self) -> ResponseType: + message = await self._read() + while message is not cygrpc.EOF: + yield message + message = await self._read() + + # If the read operation failed, Core should explain why. + await self._raise_for_status() + + def __aiter__(self) -> AsyncIterator[ResponseType]: + self._update_response_style(_APIStyle.ASYNC_GENERATOR) + if self._message_aiter is None: + self._message_aiter = self._fetch_stream_responses() + return self._message_aiter + + async def _read(self) -> ResponseType: + # Wait for the request being sent + await self._preparation + + # Reads response message from Core + try: + raw_response = await self._cython_call.receive_serialized_message() + except asyncio.CancelledError: + if not self.cancelled(): + self.cancel() + raise + + if raw_response is cygrpc.EOF: + return cygrpc.EOF + else: + return _common.deserialize( + raw_response, self._response_deserializer + ) + + async def read(self) -> Union[EOFType, ResponseType]: + if self.done(): + await self._raise_for_status() + return cygrpc.EOF + self._update_response_style(_APIStyle.READER_WRITER) + + response_message = await self._read() + + if response_message is cygrpc.EOF: + # If the read operation failed, Core should explain why. + await self._raise_for_status() + return response_message + + +class _StreamRequestMixin(Call): + _metadata_sent: asyncio.Event + _done_writing_flag: bool + _async_request_poller: Optional[asyncio.Task] + _request_style: _APIStyle + + def _init_stream_request_mixin( + self, request_iterator: Optional[RequestIterableType] + ): + self._metadata_sent = asyncio.Event() + self._done_writing_flag = False + + # If user passes in an async iterator, create a consumer Task. + if request_iterator is not None: + self._async_request_poller = self._loop.create_task( + self._consume_request_iterator(request_iterator) + ) + self._request_style = _APIStyle.ASYNC_GENERATOR + else: + self._async_request_poller = None + self._request_style = _APIStyle.READER_WRITER + + def _raise_for_different_style(self, style: _APIStyle): + if self._request_style is not style: + raise cygrpc.UsageError(_API_STYLE_ERROR) + + def cancel(self) -> bool: + if super().cancel(): + if self._async_request_poller is not None: + self._async_request_poller.cancel() + return True + else: + return False + + def _metadata_sent_observer(self): + self._metadata_sent.set() + + async def _consume_request_iterator( + self, request_iterator: RequestIterableType + ) -> None: + try: + if inspect.isasyncgen(request_iterator) or hasattr( + request_iterator, "__aiter__" + ): + async for request in request_iterator: + try: + await self._write(request) + except AioRpcError as rpc_error: + _LOGGER.debug( + ( + "Exception while consuming the" + " request_iterator: %s" + ), + rpc_error, + ) + return + else: + for request in request_iterator: + try: + await self._write(request) + except AioRpcError as rpc_error: + _LOGGER.debug( + ( + "Exception while consuming the" + " request_iterator: %s" + ), + rpc_error, + ) + return + + await self._done_writing() + except: # pylint: disable=bare-except + # Client iterators can raise exceptions, which we should handle by + # cancelling the RPC and logging the client's error. No exceptions + # should escape this function. + _LOGGER.debug( + "Client request_iterator raised exception:\n%s", + traceback.format_exc(), + ) + self.cancel() + + async def _write(self, request: RequestType) -> None: + if self.done(): + raise asyncio.InvalidStateError(_RPC_ALREADY_FINISHED_DETAILS) + if self._done_writing_flag: + raise asyncio.InvalidStateError(_RPC_HALF_CLOSED_DETAILS) + if not self._metadata_sent.is_set(): + await self._metadata_sent.wait() + if self.done(): + await self._raise_for_status() + + serialized_request = _common.serialize( + request, self._request_serializer + ) + try: + await self._cython_call.send_serialized_message(serialized_request) + except cygrpc.InternalError as err: + self._cython_call.set_internal_error(str(err)) + await self._raise_for_status() + except asyncio.CancelledError: + if not self.cancelled(): + self.cancel() + raise + + async def _done_writing(self) -> None: + if self.done(): + # If the RPC is finished, do nothing. + return + if not self._done_writing_flag: + # If the done writing is not sent before, try to send it. + self._done_writing_flag = True + try: + await self._cython_call.send_receive_close() + except asyncio.CancelledError: + if not self.cancelled(): + self.cancel() + raise + + async def write(self, request: RequestType) -> None: + self._raise_for_different_style(_APIStyle.READER_WRITER) + await self._write(request) + + async def done_writing(self) -> None: + """Signal peer that client is done writing. + + This method is idempotent. + """ + self._raise_for_different_style(_APIStyle.READER_WRITER) + await self._done_writing() + + async def wait_for_connection(self) -> None: + await self._metadata_sent.wait() + if self.done(): + await self._raise_for_status() + + +class UnaryUnaryCall(_UnaryResponseMixin, Call, _base_call.UnaryUnaryCall): + """Object for managing unary-unary RPC calls. + + Returned when an instance of `UnaryUnaryMultiCallable` object is called. + """ + + _request: RequestType + _invocation_task: asyncio.Task + + # pylint: disable=too-many-arguments + def __init__( + self, + request: RequestType, + deadline: Optional[float], + metadata: Metadata, + credentials: Optional[grpc.CallCredentials], + wait_for_ready: Optional[bool], + channel: cygrpc.AioChannel, + method: bytes, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + loop: asyncio.AbstractEventLoop, + ) -> None: + super().__init__( + channel.call(method, deadline, credentials, wait_for_ready), + metadata, + request_serializer, + response_deserializer, + loop, + ) + self._request = request + self._context = cygrpc.build_census_context() + self._invocation_task = loop.create_task(self._invoke()) + self._init_unary_response_mixin(self._invocation_task) + + async def _invoke(self) -> ResponseType: + serialized_request = _common.serialize( + self._request, self._request_serializer + ) + + # NOTE(lidiz) asyncio.CancelledError is not a good transport for status, + # because the asyncio.Task class do not cache the exception object. + # https://github.com/python/cpython/blob/edad4d89e357c92f70c0324b937845d652b20afd/Lib/asyncio/tasks.py#L785 + try: + serialized_response = await self._cython_call.unary_unary( + serialized_request, self._metadata, self._context + ) + except asyncio.CancelledError: + if not self.cancelled(): + self.cancel() + + if self._cython_call.is_ok(): + return _common.deserialize( + serialized_response, self._response_deserializer + ) + else: + return cygrpc.EOF + + async def wait_for_connection(self) -> None: + await self._invocation_task + if self.done(): + await self._raise_for_status() + + +class UnaryStreamCall(_StreamResponseMixin, Call, _base_call.UnaryStreamCall): + """Object for managing unary-stream RPC calls. + + Returned when an instance of `UnaryStreamMultiCallable` object is called. + """ + + _request: RequestType + _send_unary_request_task: asyncio.Task + + # pylint: disable=too-many-arguments + def __init__( + self, + request: RequestType, + deadline: Optional[float], + metadata: Metadata, + credentials: Optional[grpc.CallCredentials], + wait_for_ready: Optional[bool], + channel: cygrpc.AioChannel, + method: bytes, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + loop: asyncio.AbstractEventLoop, + ) -> None: + super().__init__( + channel.call(method, deadline, credentials, wait_for_ready), + metadata, + request_serializer, + response_deserializer, + loop, + ) + self._request = request + self._context = cygrpc.build_census_context() + self._send_unary_request_task = loop.create_task( + self._send_unary_request() + ) + self._init_stream_response_mixin(self._send_unary_request_task) + + async def _send_unary_request(self) -> ResponseType: + serialized_request = _common.serialize( + self._request, self._request_serializer + ) + try: + await self._cython_call.initiate_unary_stream( + serialized_request, self._metadata, self._context + ) + except asyncio.CancelledError: + if not self.cancelled(): + self.cancel() + raise + + async def wait_for_connection(self) -> None: + await self._send_unary_request_task + if self.done(): + await self._raise_for_status() + + +# pylint: disable=too-many-ancestors +class StreamUnaryCall( + _StreamRequestMixin, _UnaryResponseMixin, Call, _base_call.StreamUnaryCall +): + """Object for managing stream-unary RPC calls. + + Returned when an instance of `StreamUnaryMultiCallable` object is called. + """ + + # pylint: disable=too-many-arguments + def __init__( + self, + request_iterator: Optional[RequestIterableType], + deadline: Optional[float], + metadata: Metadata, + credentials: Optional[grpc.CallCredentials], + wait_for_ready: Optional[bool], + channel: cygrpc.AioChannel, + method: bytes, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + loop: asyncio.AbstractEventLoop, + ) -> None: + super().__init__( + channel.call(method, deadline, credentials, wait_for_ready), + metadata, + request_serializer, + response_deserializer, + loop, + ) + + self._context = cygrpc.build_census_context() + self._init_stream_request_mixin(request_iterator) + self._init_unary_response_mixin(loop.create_task(self._conduct_rpc())) + + async def _conduct_rpc(self) -> ResponseType: + try: + serialized_response = await self._cython_call.stream_unary( + self._metadata, self._metadata_sent_observer, self._context + ) + except asyncio.CancelledError: + if not self.cancelled(): + self.cancel() + raise + + if self._cython_call.is_ok(): + return _common.deserialize( + serialized_response, self._response_deserializer + ) + else: + return cygrpc.EOF + + +class StreamStreamCall( + _StreamRequestMixin, _StreamResponseMixin, Call, _base_call.StreamStreamCall +): + """Object for managing stream-stream RPC calls. + + Returned when an instance of `StreamStreamMultiCallable` object is called. + """ + + _initializer: asyncio.Task + + # pylint: disable=too-many-arguments + def __init__( + self, + request_iterator: Optional[RequestIterableType], + deadline: Optional[float], + metadata: Metadata, + credentials: Optional[grpc.CallCredentials], + wait_for_ready: Optional[bool], + channel: cygrpc.AioChannel, + method: bytes, + request_serializer: SerializingFunction, + response_deserializer: DeserializingFunction, + loop: asyncio.AbstractEventLoop, + ) -> None: + super().__init__( + channel.call(method, deadline, credentials, wait_for_ready), + metadata, + request_serializer, + response_deserializer, + loop, + ) + self._context = cygrpc.build_census_context() + self._initializer = self._loop.create_task(self._prepare_rpc()) + self._init_stream_request_mixin(request_iterator) + self._init_stream_response_mixin(self._initializer) + + async def _prepare_rpc(self): + """This method prepares the RPC for receiving/sending messages. + + All other operations around the stream should only happen after the + completion of this method. + """ + try: + await self._cython_call.initiate_stream_stream( + self._metadata, self._metadata_sent_observer, self._context + ) + except asyncio.CancelledError: + if not self.cancelled(): + self.cancel() + # No need to raise RpcError here, because no one will `await` this task. diff --git a/lib/python3.10/site-packages/grpc/aio/_typing.py b/lib/python3.10/site-packages/grpc/aio/_typing.py new file mode 100644 index 0000000000000000000000000000000000000000..0bc32b22e6fe89a8183df9eeeb3f775a61ebadc4 --- /dev/null +++ b/lib/python3.10/site-packages/grpc/aio/_typing.py @@ -0,0 +1,43 @@ +# Copyright 2019 The gRPC Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Common types for gRPC Async API""" + +from typing import ( + Any, + AsyncIterable, + Callable, + Iterable, + Sequence, + Tuple, + TypeVar, + Union, +) + +from grpc._cython.cygrpc import EOF + +from ._metadata import Metadata +from ._metadata import MetadataKey +from ._metadata import MetadataValue + +RequestType = TypeVar("RequestType") +ResponseType = TypeVar("ResponseType") +SerializingFunction = Callable[[Any], bytes] +DeserializingFunction = Callable[[bytes], Any] +MetadatumType = Tuple[MetadataKey, MetadataValue] +MetadataType = Union[Metadata, Sequence[MetadatumType]] +ChannelArgumentType = Sequence[Tuple[str, Any]] +EOFType = type(EOF) +DoneCallbackType = Callable[[Any], None] +RequestIterableType = Union[Iterable[Any], AsyncIterable[Any]] +ResponseIterableType = AsyncIterable[Any] diff --git a/lib/python3.10/site-packages/grpc/beta/__init__.py b/lib/python3.10/site-packages/grpc/beta/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5fb4f3c3cfd5622f4067f3dd22eb49318855325a --- /dev/null +++ b/lib/python3.10/site-packages/grpc/beta/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/lib/python3.10/site-packages/grpc/beta/_client_adaptations.py b/lib/python3.10/site-packages/grpc/beta/_client_adaptations.py new file mode 100644 index 0000000000000000000000000000000000000000..012149212a23817e824df9453950a1721f41ed7b --- /dev/null +++ b/lib/python3.10/site-packages/grpc/beta/_client_adaptations.py @@ -0,0 +1,1015 @@ +# Copyright 2016 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Translates gRPC's client-side API into gRPC's client-side Beta API.""" + +import grpc +from grpc import _common +from grpc.beta import _metadata +from grpc.beta import interfaces +from grpc.framework.common import cardinality +from grpc.framework.foundation import future +from grpc.framework.interfaces.face import face + +# pylint: disable=too-many-arguments,too-many-locals,unused-argument + +_STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS = { + grpc.StatusCode.CANCELLED: ( + face.Abortion.Kind.CANCELLED, + face.CancellationError, + ), + grpc.StatusCode.UNKNOWN: ( + face.Abortion.Kind.REMOTE_FAILURE, + face.RemoteError, + ), + grpc.StatusCode.DEADLINE_EXCEEDED: ( + face.Abortion.Kind.EXPIRED, + face.ExpirationError, + ), + grpc.StatusCode.UNIMPLEMENTED: ( + face.Abortion.Kind.LOCAL_FAILURE, + face.LocalError, + ), +} + + +def _effective_metadata(metadata, metadata_transformer): + non_none_metadata = () if metadata is None else metadata + if metadata_transformer is None: + return non_none_metadata + else: + return metadata_transformer(non_none_metadata) + + +def _credentials(grpc_call_options): + return None if grpc_call_options is None else grpc_call_options.credentials + + +def _abortion(rpc_error_call): + code = rpc_error_call.code() + pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code) + error_kind = face.Abortion.Kind.LOCAL_FAILURE if pair is None else pair[0] + return face.Abortion( + error_kind, + rpc_error_call.initial_metadata(), + rpc_error_call.trailing_metadata(), + code, + rpc_error_call.details(), + ) + + +def _abortion_error(rpc_error_call): + code = rpc_error_call.code() + pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code) + exception_class = face.AbortionError if pair is None else pair[1] + return exception_class( + rpc_error_call.initial_metadata(), + rpc_error_call.trailing_metadata(), + code, + rpc_error_call.details(), + ) + + +class _InvocationProtocolContext(interfaces.GRPCInvocationContext): + def disable_next_request_compression(self): + pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement. + + +class _Rendezvous(future.Future, face.Call): + def __init__(self, response_future, response_iterator, call): + self._future = response_future + self._iterator = response_iterator + self._call = call + + def cancel(self): + return self._call.cancel() + + def cancelled(self): + return self._future.cancelled() + + def running(self): + return self._future.running() + + def done(self): + return self._future.done() + + def result(self, timeout=None): + try: + return self._future.result(timeout=timeout) + except grpc.RpcError as rpc_error_call: + raise _abortion_error(rpc_error_call) + except grpc.FutureTimeoutError: + raise future.TimeoutError() + except grpc.FutureCancelledError: + raise future.CancelledError() + + def exception(self, timeout=None): + try: + rpc_error_call = self._future.exception(timeout=timeout) + if rpc_error_call is None: + return None + else: + return _abortion_error(rpc_error_call) + except grpc.FutureTimeoutError: + raise future.TimeoutError() + except grpc.FutureCancelledError: + raise future.CancelledError() + + def traceback(self, timeout=None): + try: + return self._future.traceback(timeout=timeout) + except grpc.FutureTimeoutError: + raise future.TimeoutError() + except grpc.FutureCancelledError: + raise future.CancelledError() + + def add_done_callback(self, fn): + self._future.add_done_callback(lambda ignored_callback: fn(self)) + + def __iter__(self): + return self + + def _next(self): + try: + return next(self._iterator) + except grpc.RpcError as rpc_error_call: + raise _abortion_error(rpc_error_call) + + def __next__(self): + return self._next() + + def next(self): + return self._next() + + def is_active(self): + return self._call.is_active() + + def time_remaining(self): + return self._call.time_remaining() + + def add_abortion_callback(self, abortion_callback): + def done_callback(): + if self.code() is not grpc.StatusCode.OK: + abortion_callback(_abortion(self._call)) + + registered = self._call.add_callback(done_callback) + return None if registered else done_callback() + + def protocol_context(self): + return _InvocationProtocolContext() + + def initial_metadata(self): + return _metadata.beta(self._call.initial_metadata()) + + def terminal_metadata(self): + return _metadata.beta(self._call.terminal_metadata()) + + def code(self): + return self._call.code() + + def details(self): + return self._call.details() + + +def _blocking_unary_unary( + channel, + group, + method, + timeout, + with_call, + protocol_options, + metadata, + metadata_transformer, + request, + request_serializer, + response_deserializer, +): + try: + multi_callable = channel.unary_unary( + _common.fully_qualified_method(group, method), + request_serializer=request_serializer, + response_deserializer=response_deserializer, + ) + effective_metadata = _effective_metadata(metadata, metadata_transformer) + if with_call: + response, call = multi_callable.with_call( + request, + timeout=timeout, + metadata=_metadata.unbeta(effective_metadata), + credentials=_credentials(protocol_options), + ) + return response, _Rendezvous(None, None, call) + else: + return multi_callable( + request, + timeout=timeout, + metadata=_metadata.unbeta(effective_metadata), + credentials=_credentials(protocol_options), + ) + except grpc.RpcError as rpc_error_call: + raise _abortion_error(rpc_error_call) + + +def _future_unary_unary( + channel, + group, + method, + timeout, + protocol_options, + metadata, + metadata_transformer, + request, + request_serializer, + response_deserializer, +): + multi_callable = channel.unary_unary( + _common.fully_qualified_method(group, method), + request_serializer=request_serializer, + response_deserializer=response_deserializer, + ) + effective_metadata = _effective_metadata(metadata, metadata_transformer) + response_future = multi_callable.future( + request, + timeout=timeout, + metadata=_metadata.unbeta(effective_metadata), + credentials=_credentials(protocol_options), + ) + return _Rendezvous(response_future, None, response_future) + + +def _unary_stream( + channel, + group, + method, + timeout, + protocol_options, + metadata, + metadata_transformer, + request, + request_serializer, + response_deserializer, +): + multi_callable = channel.unary_stream( + _common.fully_qualified_method(group, method), + request_serializer=request_serializer, + response_deserializer=response_deserializer, + ) + effective_metadata = _effective_metadata(metadata, metadata_transformer) + response_iterator = multi_callable( + request, + timeout=timeout, + metadata=_metadata.unbeta(effective_metadata), + credentials=_credentials(protocol_options), + ) + return _Rendezvous(None, response_iterator, response_iterator) + + +def _blocking_stream_unary( + channel, + group, + method, + timeout, + with_call, + protocol_options, + metadata, + metadata_transformer, + request_iterator, + request_serializer, + response_deserializer, +): + try: + multi_callable = channel.stream_unary( + _common.fully_qualified_method(group, method), + request_serializer=request_serializer, + response_deserializer=response_deserializer, + ) + effective_metadata = _effective_metadata(metadata, metadata_transformer) + if with_call: + response, call = multi_callable.with_call( + request_iterator, + timeout=timeout, + metadata=_metadata.unbeta(effective_metadata), + credentials=_credentials(protocol_options), + ) + return response, _Rendezvous(None, None, call) + else: + return multi_callable( + request_iterator, + timeout=timeout, + metadata=_metadata.unbeta(effective_metadata), + credentials=_credentials(protocol_options), + ) + except grpc.RpcError as rpc_error_call: + raise _abortion_error(rpc_error_call) + + +def _future_stream_unary( + channel, + group, + method, + timeout, + protocol_options, + metadata, + metadata_transformer, + request_iterator, + request_serializer, + response_deserializer, +): + multi_callable = channel.stream_unary( + _common.fully_qualified_method(group, method), + request_serializer=request_serializer, + response_deserializer=response_deserializer, + ) + effective_metadata = _effective_metadata(metadata, metadata_transformer) + response_future = multi_callable.future( + request_iterator, + timeout=timeout, + metadata=_metadata.unbeta(effective_metadata), + credentials=_credentials(protocol_options), + ) + return _Rendezvous(response_future, None, response_future) + + +def _stream_stream( + channel, + group, + method, + timeout, + protocol_options, + metadata, + metadata_transformer, + request_iterator, + request_serializer, + response_deserializer, +): + multi_callable = channel.stream_stream( + _common.fully_qualified_method(group, method), + request_serializer=request_serializer, + response_deserializer=response_deserializer, + ) + effective_metadata = _effective_metadata(metadata, metadata_transformer) + response_iterator = multi_callable( + request_iterator, + timeout=timeout, + metadata=_metadata.unbeta(effective_metadata), + credentials=_credentials(protocol_options), + ) + return _Rendezvous(None, response_iterator, response_iterator) + + +class _UnaryUnaryMultiCallable(face.UnaryUnaryMultiCallable): + def __init__( + self, + channel, + group, + method, + metadata_transformer, + request_serializer, + response_deserializer, + ): + self._channel = channel + self._group = group + self._method = method + self._metadata_transformer = metadata_transformer + self._request_serializer = request_serializer + self._response_deserializer = response_deserializer + + def __call__( + self, + request, + timeout, + metadata=None, + with_call=False, + protocol_options=None, + ): + return _blocking_unary_unary( + self._channel, + self._group, + self._method, + timeout, + with_call, + protocol_options, + metadata, + self._metadata_transformer, + request, + self._request_serializer, + self._response_deserializer, + ) + + def future(self, request, timeout, metadata=None, protocol_options=None): + return _future_unary_unary( + self._channel, + self._group, + self._method, + timeout, + protocol_options, + metadata, + self._metadata_transformer, + request, + self._request_serializer, + self._response_deserializer, + ) + + def event( + self, + request, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + raise NotImplementedError() + + +class _UnaryStreamMultiCallable(face.UnaryStreamMultiCallable): + def __init__( + self, + channel, + group, + method, + metadata_transformer, + request_serializer, + response_deserializer, + ): + self._channel = channel + self._group = group + self._method = method + self._metadata_transformer = metadata_transformer + self._request_serializer = request_serializer + self._response_deserializer = response_deserializer + + def __call__(self, request, timeout, metadata=None, protocol_options=None): + return _unary_stream( + self._channel, + self._group, + self._method, + timeout, + protocol_options, + metadata, + self._metadata_transformer, + request, + self._request_serializer, + self._response_deserializer, + ) + + def event( + self, + request, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + raise NotImplementedError() + + +class _StreamUnaryMultiCallable(face.StreamUnaryMultiCallable): + def __init__( + self, + channel, + group, + method, + metadata_transformer, + request_serializer, + response_deserializer, + ): + self._channel = channel + self._group = group + self._method = method + self._metadata_transformer = metadata_transformer + self._request_serializer = request_serializer + self._response_deserializer = response_deserializer + + def __call__( + self, + request_iterator, + timeout, + metadata=None, + with_call=False, + protocol_options=None, + ): + return _blocking_stream_unary( + self._channel, + self._group, + self._method, + timeout, + with_call, + protocol_options, + metadata, + self._metadata_transformer, + request_iterator, + self._request_serializer, + self._response_deserializer, + ) + + def future( + self, request_iterator, timeout, metadata=None, protocol_options=None + ): + return _future_stream_unary( + self._channel, + self._group, + self._method, + timeout, + protocol_options, + metadata, + self._metadata_transformer, + request_iterator, + self._request_serializer, + self._response_deserializer, + ) + + def event( + self, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + raise NotImplementedError() + + +class _StreamStreamMultiCallable(face.StreamStreamMultiCallable): + def __init__( + self, + channel, + group, + method, + metadata_transformer, + request_serializer, + response_deserializer, + ): + self._channel = channel + self._group = group + self._method = method + self._metadata_transformer = metadata_transformer + self._request_serializer = request_serializer + self._response_deserializer = response_deserializer + + def __call__( + self, request_iterator, timeout, metadata=None, protocol_options=None + ): + return _stream_stream( + self._channel, + self._group, + self._method, + timeout, + protocol_options, + metadata, + self._metadata_transformer, + request_iterator, + self._request_serializer, + self._response_deserializer, + ) + + def event( + self, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + raise NotImplementedError() + + +class _GenericStub(face.GenericStub): + def __init__( + self, + channel, + metadata_transformer, + request_serializers, + response_deserializers, + ): + self._channel = channel + self._metadata_transformer = metadata_transformer + self._request_serializers = request_serializers or {} + self._response_deserializers = response_deserializers or {} + + def blocking_unary_unary( + self, + group, + method, + request, + timeout, + metadata=None, + with_call=None, + protocol_options=None, + ): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _blocking_unary_unary( + self._channel, + group, + method, + timeout, + with_call, + protocol_options, + metadata, + self._metadata_transformer, + request, + request_serializer, + response_deserializer, + ) + + def future_unary_unary( + self, + group, + method, + request, + timeout, + metadata=None, + protocol_options=None, + ): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _future_unary_unary( + self._channel, + group, + method, + timeout, + protocol_options, + metadata, + self._metadata_transformer, + request, + request_serializer, + response_deserializer, + ) + + def inline_unary_stream( + self, + group, + method, + request, + timeout, + metadata=None, + protocol_options=None, + ): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _unary_stream( + self._channel, + group, + method, + timeout, + protocol_options, + metadata, + self._metadata_transformer, + request, + request_serializer, + response_deserializer, + ) + + def blocking_stream_unary( + self, + group, + method, + request_iterator, + timeout, + metadata=None, + with_call=None, + protocol_options=None, + ): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _blocking_stream_unary( + self._channel, + group, + method, + timeout, + with_call, + protocol_options, + metadata, + self._metadata_transformer, + request_iterator, + request_serializer, + response_deserializer, + ) + + def future_stream_unary( + self, + group, + method, + request_iterator, + timeout, + metadata=None, + protocol_options=None, + ): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _future_stream_unary( + self._channel, + group, + method, + timeout, + protocol_options, + metadata, + self._metadata_transformer, + request_iterator, + request_serializer, + response_deserializer, + ) + + def inline_stream_stream( + self, + group, + method, + request_iterator, + timeout, + metadata=None, + protocol_options=None, + ): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _stream_stream( + self._channel, + group, + method, + timeout, + protocol_options, + metadata, + self._metadata_transformer, + request_iterator, + request_serializer, + response_deserializer, + ) + + def event_unary_unary( + self, + group, + method, + request, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + raise NotImplementedError() + + def event_unary_stream( + self, + group, + method, + request, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + raise NotImplementedError() + + def event_stream_unary( + self, + group, + method, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + raise NotImplementedError() + + def event_stream_stream( + self, + group, + method, + receiver, + abortion_callback, + timeout, + metadata=None, + protocol_options=None, + ): + raise NotImplementedError() + + def unary_unary(self, group, method): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _UnaryUnaryMultiCallable( + self._channel, + group, + method, + self._metadata_transformer, + request_serializer, + response_deserializer, + ) + + def unary_stream(self, group, method): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _UnaryStreamMultiCallable( + self._channel, + group, + method, + self._metadata_transformer, + request_serializer, + response_deserializer, + ) + + def stream_unary(self, group, method): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _StreamUnaryMultiCallable( + self._channel, + group, + method, + self._metadata_transformer, + request_serializer, + response_deserializer, + ) + + def stream_stream(self, group, method): + request_serializer = self._request_serializers.get( + ( + group, + method, + ) + ) + response_deserializer = self._response_deserializers.get( + ( + group, + method, + ) + ) + return _StreamStreamMultiCallable( + self._channel, + group, + method, + self._metadata_transformer, + request_serializer, + response_deserializer, + ) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + return False + + +class _DynamicStub(face.DynamicStub): + def __init__(self, backing_generic_stub, group, cardinalities): + self._generic_stub = backing_generic_stub + self._group = group + self._cardinalities = cardinalities + + def __getattr__(self, attr): + method_cardinality = self._cardinalities.get(attr) + if method_cardinality is cardinality.Cardinality.UNARY_UNARY: + return self._generic_stub.unary_unary(self._group, attr) + elif method_cardinality is cardinality.Cardinality.UNARY_STREAM: + return self._generic_stub.unary_stream(self._group, attr) + elif method_cardinality is cardinality.Cardinality.STREAM_UNARY: + return self._generic_stub.stream_unary(self._group, attr) + elif method_cardinality is cardinality.Cardinality.STREAM_STREAM: + return self._generic_stub.stream_stream(self._group, attr) + else: + raise AttributeError( + '_DynamicStub object has no attribute "%s"!' % attr + ) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + return False + + +def generic_stub( + channel, + host, + metadata_transformer, + request_serializers, + response_deserializers, +): + return _GenericStub( + channel, + metadata_transformer, + request_serializers, + response_deserializers, + ) + + +def dynamic_stub( + channel, + service, + cardinalities, + host, + metadata_transformer, + request_serializers, + response_deserializers, +): + return _DynamicStub( + _GenericStub( + channel, + metadata_transformer, + request_serializers, + response_deserializers, + ), + service, + cardinalities, + ) diff --git a/lib/python3.10/site-packages/grpc/beta/_metadata.py b/lib/python3.10/site-packages/grpc/beta/_metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..301010878d67f4a87cc09e051a70e058dd4f34e4 --- /dev/null +++ b/lib/python3.10/site-packages/grpc/beta/_metadata.py @@ -0,0 +1,56 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""API metadata conversion utilities.""" + +import collections + +_Metadatum = collections.namedtuple( + "_Metadatum", + ( + "key", + "value", + ), +) + + +def _beta_metadatum(key, value): + beta_key = key if isinstance(key, (bytes,)) else key.encode("ascii") + beta_value = value if isinstance(value, (bytes,)) else value.encode("ascii") + return _Metadatum(beta_key, beta_value) + + +def _metadatum(beta_key, beta_value): + key = beta_key if isinstance(beta_key, (str,)) else beta_key.decode("utf8") + if isinstance(beta_value, (str,)) or key[-4:] == "-bin": + value = beta_value + else: + value = beta_value.decode("utf8") + return _Metadatum(key, value) + + +def beta(metadata): + if metadata is None: + return () + else: + return tuple(_beta_metadatum(key, value) for key, value in metadata) + + +def unbeta(beta_metadata): + if beta_metadata is None: + return () + else: + return tuple( + _metadatum(beta_key, beta_value) + for beta_key, beta_value in beta_metadata + ) diff --git a/lib/python3.10/site-packages/grpc/beta/_server_adaptations.py b/lib/python3.10/site-packages/grpc/beta/_server_adaptations.py new file mode 100644 index 0000000000000000000000000000000000000000..a6f730bb29bf6d18efd446cb8e6a453015c00e67 --- /dev/null +++ b/lib/python3.10/site-packages/grpc/beta/_server_adaptations.py @@ -0,0 +1,465 @@ +# Copyright 2016 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Translates gRPC's server-side API into gRPC's server-side Beta API.""" + +import collections +import threading + +import grpc +from grpc import _common +from grpc.beta import _metadata +from grpc.beta import interfaces +from grpc.framework.common import cardinality +from grpc.framework.common import style +from grpc.framework.foundation import abandonment +from grpc.framework.foundation import logging_pool +from grpc.framework.foundation import stream +from grpc.framework.interfaces.face import face + +# pylint: disable=too-many-return-statements + +_DEFAULT_POOL_SIZE = 8 + + +class _ServerProtocolContext(interfaces.GRPCServicerContext): + def __init__(self, servicer_context): + self._servicer_context = servicer_context + + def peer(self): + return self._servicer_context.peer() + + def disable_next_response_compression(self): + pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement. + + +class _FaceServicerContext(face.ServicerContext): + def __init__(self, servicer_context): + self._servicer_context = servicer_context + + def is_active(self): + return self._servicer_context.is_active() + + def time_remaining(self): + return self._servicer_context.time_remaining() + + def add_abortion_callback(self, abortion_callback): + raise NotImplementedError( + "add_abortion_callback no longer supported server-side!" + ) + + def cancel(self): + self._servicer_context.cancel() + + def protocol_context(self): + return _ServerProtocolContext(self._servicer_context) + + def invocation_metadata(self): + return _metadata.beta(self._servicer_context.invocation_metadata()) + + def initial_metadata(self, initial_metadata): + self._servicer_context.send_initial_metadata( + _metadata.unbeta(initial_metadata) + ) + + def terminal_metadata(self, terminal_metadata): + self._servicer_context.set_terminal_metadata( + _metadata.unbeta(terminal_metadata) + ) + + def code(self, code): + self._servicer_context.set_code(code) + + def details(self, details): + self._servicer_context.set_details(details) + + +def _adapt_unary_request_inline(unary_request_inline): + def adaptation(request, servicer_context): + return unary_request_inline( + request, _FaceServicerContext(servicer_context) + ) + + return adaptation + + +def _adapt_stream_request_inline(stream_request_inline): + def adaptation(request_iterator, servicer_context): + return stream_request_inline( + request_iterator, _FaceServicerContext(servicer_context) + ) + + return adaptation + + +class _Callback(stream.Consumer): + def __init__(self): + self._condition = threading.Condition() + self._values = [] + self._terminated = False + self._cancelled = False + + def consume(self, value): + with self._condition: + self._values.append(value) + self._condition.notify_all() + + def terminate(self): + with self._condition: + self._terminated = True + self._condition.notify_all() + + def consume_and_terminate(self, value): + with self._condition: + self._values.append(value) + self._terminated = True + self._condition.notify_all() + + def cancel(self): + with self._condition: + self._cancelled = True + self._condition.notify_all() + + def draw_one_value(self): + with self._condition: + while True: + if self._cancelled: + raise abandonment.Abandoned() + elif self._values: + return self._values.pop(0) + elif self._terminated: + return None + else: + self._condition.wait() + + def draw_all_values(self): + with self._condition: + while True: + if self._cancelled: + raise abandonment.Abandoned() + elif self._terminated: + all_values = tuple(self._values) + self._values = None + return all_values + else: + self._condition.wait() + + +def _run_request_pipe_thread( + request_iterator, request_consumer, servicer_context +): + thread_joined = threading.Event() + + def pipe_requests(): + for request in request_iterator: + if not servicer_context.is_active() or thread_joined.is_set(): + return + request_consumer.consume(request) + if not servicer_context.is_active() or thread_joined.is_set(): + return + request_consumer.terminate() + + request_pipe_thread = threading.Thread(target=pipe_requests) + request_pipe_thread.daemon = True + request_pipe_thread.start() + + +def _adapt_unary_unary_event(unary_unary_event): + def adaptation(request, servicer_context): + callback = _Callback() + if not servicer_context.add_callback(callback.cancel): + raise abandonment.Abandoned() + unary_unary_event( + request, + callback.consume_and_terminate, + _FaceServicerContext(servicer_context), + ) + return callback.draw_all_values()[0] + + return adaptation + + +def _adapt_unary_stream_event(unary_stream_event): + def adaptation(request, servicer_context): + callback = _Callback() + if not servicer_context.add_callback(callback.cancel): + raise abandonment.Abandoned() + unary_stream_event( + request, callback, _FaceServicerContext(servicer_context) + ) + while True: + response = callback.draw_one_value() + if response is None: + return + else: + yield response + + return adaptation + + +def _adapt_stream_unary_event(stream_unary_event): + def adaptation(request_iterator, servicer_context): + callback = _Callback() + if not servicer_context.add_callback(callback.cancel): + raise abandonment.Abandoned() + request_consumer = stream_unary_event( + callback.consume_and_terminate, + _FaceServicerContext(servicer_context), + ) + _run_request_pipe_thread( + request_iterator, request_consumer, servicer_context + ) + return callback.draw_all_values()[0] + + return adaptation + + +def _adapt_stream_stream_event(stream_stream_event): + def adaptation(request_iterator, servicer_context): + callback = _Callback() + if not servicer_context.add_callback(callback.cancel): + raise abandonment.Abandoned() + request_consumer = stream_stream_event( + callback, _FaceServicerContext(servicer_context) + ) + _run_request_pipe_thread( + request_iterator, request_consumer, servicer_context + ) + while True: + response = callback.draw_one_value() + if response is None: + return + else: + yield response + + return adaptation + + +class _SimpleMethodHandler( + collections.namedtuple( + "_MethodHandler", + ( + "request_streaming", + "response_streaming", + "request_deserializer", + "response_serializer", + "unary_unary", + "unary_stream", + "stream_unary", + "stream_stream", + ), + ), + grpc.RpcMethodHandler, +): + pass + + +def _simple_method_handler( + implementation, request_deserializer, response_serializer +): + if implementation.style is style.Service.INLINE: + if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY: + return _SimpleMethodHandler( + False, + False, + request_deserializer, + response_serializer, + _adapt_unary_request_inline(implementation.unary_unary_inline), + None, + None, + None, + ) + elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM: + return _SimpleMethodHandler( + False, + True, + request_deserializer, + response_serializer, + None, + _adapt_unary_request_inline(implementation.unary_stream_inline), + None, + None, + ) + elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY: + return _SimpleMethodHandler( + True, + False, + request_deserializer, + response_serializer, + None, + None, + _adapt_stream_request_inline( + implementation.stream_unary_inline + ), + None, + ) + elif ( + implementation.cardinality is cardinality.Cardinality.STREAM_STREAM + ): + return _SimpleMethodHandler( + True, + True, + request_deserializer, + response_serializer, + None, + None, + None, + _adapt_stream_request_inline( + implementation.stream_stream_inline + ), + ) + elif implementation.style is style.Service.EVENT: + if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY: + return _SimpleMethodHandler( + False, + False, + request_deserializer, + response_serializer, + _adapt_unary_unary_event(implementation.unary_unary_event), + None, + None, + None, + ) + elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM: + return _SimpleMethodHandler( + False, + True, + request_deserializer, + response_serializer, + None, + _adapt_unary_stream_event(implementation.unary_stream_event), + None, + None, + ) + elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY: + return _SimpleMethodHandler( + True, + False, + request_deserializer, + response_serializer, + None, + None, + _adapt_stream_unary_event(implementation.stream_unary_event), + None, + ) + elif ( + implementation.cardinality is cardinality.Cardinality.STREAM_STREAM + ): + return _SimpleMethodHandler( + True, + True, + request_deserializer, + response_serializer, + None, + None, + None, + _adapt_stream_stream_event(implementation.stream_stream_event), + ) + raise ValueError() + + +def _flatten_method_pair_map(method_pair_map): + method_pair_map = method_pair_map or {} + flat_map = {} + for method_pair in method_pair_map: + method = _common.fully_qualified_method(method_pair[0], method_pair[1]) + flat_map[method] = method_pair_map[method_pair] + return flat_map + + +class _GenericRpcHandler(grpc.GenericRpcHandler): + def __init__( + self, + method_implementations, + multi_method_implementation, + request_deserializers, + response_serializers, + ): + self._method_implementations = _flatten_method_pair_map( + method_implementations + ) + self._request_deserializers = _flatten_method_pair_map( + request_deserializers + ) + self._response_serializers = _flatten_method_pair_map( + response_serializers + ) + self._multi_method_implementation = multi_method_implementation + + def service(self, handler_call_details): + method_implementation = self._method_implementations.get( + handler_call_details.method + ) + if method_implementation is not None: + return _simple_method_handler( + method_implementation, + self._request_deserializers.get(handler_call_details.method), + self._response_serializers.get(handler_call_details.method), + ) + elif self._multi_method_implementation is None: + return None + else: + try: + return None # TODO(nathaniel): call the multimethod. + except face.NoSuchMethodError: + return None + + +class _Server(interfaces.Server): + def __init__(self, grpc_server): + self._grpc_server = grpc_server + + def add_insecure_port(self, address): + return self._grpc_server.add_insecure_port(address) + + def add_secure_port(self, address, server_credentials): + return self._grpc_server.add_secure_port(address, server_credentials) + + def start(self): + self._grpc_server.start() + + def stop(self, grace): + return self._grpc_server.stop(grace) + + def __enter__(self): + self._grpc_server.start() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self._grpc_server.stop(None) + return False + + +def server( + service_implementations, + multi_method_implementation, + request_deserializers, + response_serializers, + thread_pool, + thread_pool_size, +): + generic_rpc_handler = _GenericRpcHandler( + service_implementations, + multi_method_implementation, + request_deserializers, + response_serializers, + ) + if thread_pool is None: + effective_thread_pool = logging_pool.pool( + _DEFAULT_POOL_SIZE if thread_pool_size is None else thread_pool_size + ) + else: + effective_thread_pool = thread_pool + return _Server( + grpc.server(effective_thread_pool, handlers=(generic_rpc_handler,)) + ) diff --git a/lib/python3.10/site-packages/grpc/beta/implementations.py b/lib/python3.10/site-packages/grpc/beta/implementations.py new file mode 100644 index 0000000000000000000000000000000000000000..ffa4f0d4bfe00a8cd743afbaf99cea97077680ac --- /dev/null +++ b/lib/python3.10/site-packages/grpc/beta/implementations.py @@ -0,0 +1,345 @@ +# Copyright 2015-2016 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Entry points into the Beta API of gRPC Python.""" + +# threading is referenced from specification in this module. +import threading # pylint: disable=unused-import + +# interfaces, cardinality, and face are referenced from specification in this +# module. +import grpc +from grpc import _auth +from grpc.beta import _client_adaptations +from grpc.beta import _metadata +from grpc.beta import _server_adaptations +from grpc.beta import interfaces # pylint: disable=unused-import +from grpc.framework.common import cardinality # pylint: disable=unused-import +from grpc.framework.interfaces.face import face # pylint: disable=unused-import + +# pylint: disable=too-many-arguments + +ChannelCredentials = grpc.ChannelCredentials +ssl_channel_credentials = grpc.ssl_channel_credentials +CallCredentials = grpc.CallCredentials + + +def metadata_call_credentials(metadata_plugin, name=None): + def plugin(context, callback): + def wrapped_callback(beta_metadata, error): + callback(_metadata.unbeta(beta_metadata), error) + + metadata_plugin(context, wrapped_callback) + + return grpc.metadata_call_credentials(plugin, name=name) + + +def google_call_credentials(credentials): + """Construct CallCredentials from GoogleCredentials. + + Args: + credentials: A GoogleCredentials object from the oauth2client library. + + Returns: + A CallCredentials object for use in a GRPCCallOptions object. + """ + return metadata_call_credentials(_auth.GoogleCallCredentials(credentials)) + + +access_token_call_credentials = grpc.access_token_call_credentials +composite_call_credentials = grpc.composite_call_credentials +composite_channel_credentials = grpc.composite_channel_credentials + + +class Channel(object): + """A channel to a remote host through which RPCs may be conducted. + + Only the "subscribe" and "unsubscribe" methods are supported for application + use. This class' instance constructor and all other attributes are + unsupported. + """ + + def __init__(self, channel): + self._channel = channel + + def subscribe(self, callback, try_to_connect=None): + """Subscribes to this Channel's connectivity. + + Args: + callback: A callable to be invoked and passed an + interfaces.ChannelConnectivity identifying this Channel's connectivity. + The callable will be invoked immediately upon subscription and again for + every change to this Channel's connectivity thereafter until it is + unsubscribed. + try_to_connect: A boolean indicating whether or not this Channel should + attempt to connect if it is not already connected and ready to conduct + RPCs. + """ + self._channel.subscribe(callback, try_to_connect=try_to_connect) + + def unsubscribe(self, callback): + """Unsubscribes a callback from this Channel's connectivity. + + Args: + callback: A callable previously registered with this Channel from having + been passed to its "subscribe" method. + """ + self._channel.unsubscribe(callback) + + +def insecure_channel(host, port): + """Creates an insecure Channel to a remote host. + + Args: + host: The name of the remote host to which to connect. + port: The port of the remote host to which to connect. + If None only the 'host' part will be used. + + Returns: + A Channel to the remote host through which RPCs may be conducted. + """ + channel = grpc.insecure_channel( + host if port is None else "%s:%d" % (host, port) + ) + return Channel(channel) + + +def secure_channel(host, port, channel_credentials): + """Creates a secure Channel to a remote host. + + Args: + host: The name of the remote host to which to connect. + port: The port of the remote host to which to connect. + If None only the 'host' part will be used. + channel_credentials: A ChannelCredentials. + + Returns: + A secure Channel to the remote host through which RPCs may be conducted. + """ + channel = grpc.secure_channel( + host if port is None else "%s:%d" % (host, port), channel_credentials + ) + return Channel(channel) + + +class StubOptions(object): + """A value encapsulating the various options for creation of a Stub. + + This class and its instances have no supported interface - it exists to define + the type of its instances and its instances exist to be passed to other + functions. + """ + + def __init__( + self, + host, + request_serializers, + response_deserializers, + metadata_transformer, + thread_pool, + thread_pool_size, + ): + self.host = host + self.request_serializers = request_serializers + self.response_deserializers = response_deserializers + self.metadata_transformer = metadata_transformer + self.thread_pool = thread_pool + self.thread_pool_size = thread_pool_size + + +_EMPTY_STUB_OPTIONS = StubOptions(None, None, None, None, None, None) + + +def stub_options( + host=None, + request_serializers=None, + response_deserializers=None, + metadata_transformer=None, + thread_pool=None, + thread_pool_size=None, +): + """Creates a StubOptions value to be passed at stub creation. + + All parameters are optional and should always be passed by keyword. + + Args: + host: A host string to set on RPC calls. + request_serializers: A dictionary from service name-method name pair to + request serialization behavior. + response_deserializers: A dictionary from service name-method name pair to + response deserialization behavior. + metadata_transformer: A callable that given a metadata object produces + another metadata object to be used in the underlying communication on the + wire. + thread_pool: A thread pool to use in stubs. + thread_pool_size: The size of thread pool to create for use in stubs; + ignored if thread_pool has been passed. + + Returns: + A StubOptions value created from the passed parameters. + """ + return StubOptions( + host, + request_serializers, + response_deserializers, + metadata_transformer, + thread_pool, + thread_pool_size, + ) + + +def generic_stub(channel, options=None): + """Creates a face.GenericStub on which RPCs can be made. + + Args: + channel: A Channel for use by the created stub. + options: A StubOptions customizing the created stub. + + Returns: + A face.GenericStub on which RPCs can be made. + """ + effective_options = _EMPTY_STUB_OPTIONS if options is None else options + return _client_adaptations.generic_stub( + channel._channel, # pylint: disable=protected-access + effective_options.host, + effective_options.metadata_transformer, + effective_options.request_serializers, + effective_options.response_deserializers, + ) + + +def dynamic_stub(channel, service, cardinalities, options=None): + """Creates a face.DynamicStub with which RPCs can be invoked. + + Args: + channel: A Channel for the returned face.DynamicStub to use. + service: The package-qualified full name of the service. + cardinalities: A dictionary from RPC method name to cardinality.Cardinality + value identifying the cardinality of the RPC method. + options: An optional StubOptions value further customizing the functionality + of the returned face.DynamicStub. + + Returns: + A face.DynamicStub with which RPCs can be invoked. + """ + effective_options = _EMPTY_STUB_OPTIONS if options is None else options + return _client_adaptations.dynamic_stub( + channel._channel, # pylint: disable=protected-access + service, + cardinalities, + effective_options.host, + effective_options.metadata_transformer, + effective_options.request_serializers, + effective_options.response_deserializers, + ) + + +ServerCredentials = grpc.ServerCredentials +ssl_server_credentials = grpc.ssl_server_credentials + + +class ServerOptions(object): + """A value encapsulating the various options for creation of a Server. + + This class and its instances have no supported interface - it exists to define + the type of its instances and its instances exist to be passed to other + functions. + """ + + def __init__( + self, + multi_method_implementation, + request_deserializers, + response_serializers, + thread_pool, + thread_pool_size, + default_timeout, + maximum_timeout, + ): + self.multi_method_implementation = multi_method_implementation + self.request_deserializers = request_deserializers + self.response_serializers = response_serializers + self.thread_pool = thread_pool + self.thread_pool_size = thread_pool_size + self.default_timeout = default_timeout + self.maximum_timeout = maximum_timeout + + +_EMPTY_SERVER_OPTIONS = ServerOptions(None, None, None, None, None, None, None) + + +def server_options( + multi_method_implementation=None, + request_deserializers=None, + response_serializers=None, + thread_pool=None, + thread_pool_size=None, + default_timeout=None, + maximum_timeout=None, +): + """Creates a ServerOptions value to be passed at server creation. + + All parameters are optional and should always be passed by keyword. + + Args: + multi_method_implementation: A face.MultiMethodImplementation to be called + to service an RPC if the server has no specific method implementation for + the name of the RPC for which service was requested. + request_deserializers: A dictionary from service name-method name pair to + request deserialization behavior. + response_serializers: A dictionary from service name-method name pair to + response serialization behavior. + thread_pool: A thread pool to use in stubs. + thread_pool_size: The size of thread pool to create for use in stubs; + ignored if thread_pool has been passed. + default_timeout: A duration in seconds to allow for RPC service when + servicing RPCs that did not include a timeout value when invoked. + maximum_timeout: A duration in seconds to allow for RPC service when + servicing RPCs no matter what timeout value was passed when the RPC was + invoked. + + Returns: + A StubOptions value created from the passed parameters. + """ + return ServerOptions( + multi_method_implementation, + request_deserializers, + response_serializers, + thread_pool, + thread_pool_size, + default_timeout, + maximum_timeout, + ) + + +def server(service_implementations, options=None): + """Creates an interfaces.Server with which RPCs can be serviced. + + Args: + service_implementations: A dictionary from service name-method name pair to + face.MethodImplementation. + options: An optional ServerOptions value further customizing the + functionality of the returned Server. + + Returns: + An interfaces.Server with which RPCs can be serviced. + """ + effective_options = _EMPTY_SERVER_OPTIONS if options is None else options + return _server_adaptations.server( + service_implementations, + effective_options.multi_method_implementation, + effective_options.request_deserializers, + effective_options.response_serializers, + effective_options.thread_pool, + effective_options.thread_pool_size, + ) diff --git a/lib/python3.10/site-packages/grpc/beta/interfaces.py b/lib/python3.10/site-packages/grpc/beta/interfaces.py new file mode 100644 index 0000000000000000000000000000000000000000..c29b2915854919b46691046f8135da53553e055b --- /dev/null +++ b/lib/python3.10/site-packages/grpc/beta/interfaces.py @@ -0,0 +1,163 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Constants and interfaces of the Beta API of gRPC Python.""" + +import abc + +import grpc + +ChannelConnectivity = grpc.ChannelConnectivity +# FATAL_FAILURE was a Beta-API name for SHUTDOWN +ChannelConnectivity.FATAL_FAILURE = ChannelConnectivity.SHUTDOWN + +StatusCode = grpc.StatusCode + + +class GRPCCallOptions(object): + """A value encapsulating gRPC-specific options passed on RPC invocation. + + This class and its instances have no supported interface - it exists to + define the type of its instances and its instances exist to be passed to + other functions. + """ + + def __init__(self, disable_compression, subcall_of, credentials): + self.disable_compression = disable_compression + self.subcall_of = subcall_of + self.credentials = credentials + + +def grpc_call_options(disable_compression=False, credentials=None): + """Creates a GRPCCallOptions value to be passed at RPC invocation. + + All parameters are optional and should always be passed by keyword. + + Args: + disable_compression: A boolean indicating whether or not compression should + be disabled for the request object of the RPC. Only valid for + request-unary RPCs. + credentials: A CallCredentials object to use for the invoked RPC. + """ + return GRPCCallOptions(disable_compression, None, credentials) + + +GRPCAuthMetadataContext = grpc.AuthMetadataContext +GRPCAuthMetadataPluginCallback = grpc.AuthMetadataPluginCallback +GRPCAuthMetadataPlugin = grpc.AuthMetadataPlugin + + +class GRPCServicerContext(abc.ABC): + """Exposes gRPC-specific options and behaviors to code servicing RPCs.""" + + @abc.abstractmethod + def peer(self): + """Identifies the peer that invoked the RPC being serviced. + + Returns: + A string identifying the peer that invoked the RPC being serviced. + """ + raise NotImplementedError() + + @abc.abstractmethod + def disable_next_response_compression(self): + """Disables compression of the next response passed by the application.""" + raise NotImplementedError() + + +class GRPCInvocationContext(abc.ABC): + """Exposes gRPC-specific options and behaviors to code invoking RPCs.""" + + @abc.abstractmethod + def disable_next_request_compression(self): + """Disables compression of the next request passed by the application.""" + raise NotImplementedError() + + +class Server(abc.ABC): + """Services RPCs.""" + + @abc.abstractmethod + def add_insecure_port(self, address): + """Reserves a port for insecure RPC service once this Server becomes active. + + This method may only be called before calling this Server's start method is + called. + + Args: + address: The address for which to open a port. + + Returns: + An integer port on which RPCs will be serviced after this link has been + started. This is typically the same number as the port number contained + in the passed address, but will likely be different if the port number + contained in the passed address was zero. + """ + raise NotImplementedError() + + @abc.abstractmethod + def add_secure_port(self, address, server_credentials): + """Reserves a port for secure RPC service after this Server becomes active. + + This method may only be called before calling this Server's start method is + called. + + Args: + address: The address for which to open a port. + server_credentials: A ServerCredentials. + + Returns: + An integer port on which RPCs will be serviced after this link has been + started. This is typically the same number as the port number contained + in the passed address, but will likely be different if the port number + contained in the passed address was zero. + """ + raise NotImplementedError() + + @abc.abstractmethod + def start(self): + """Starts this Server's service of RPCs. + + This method may only be called while the server is not serving RPCs (i.e. it + is not idempotent). + """ + raise NotImplementedError() + + @abc.abstractmethod + def stop(self, grace): + """Stops this Server's service of RPCs. + + All calls to this method immediately stop service of new RPCs. When existing + RPCs are aborted is controlled by the grace period parameter passed to this + method. + + This method may be called at any time and is idempotent. Passing a smaller + grace value than has been passed in a previous call will have the effect of + stopping the Server sooner. Passing a larger grace value than has been + passed in a previous call will not have the effect of stopping the server + later. + + Args: + grace: A duration of time in seconds to allow existing RPCs to complete + before being aborted by this Server's stopping. May be zero for + immediate abortion of all in-progress RPCs. + + Returns: + A threading.Event that will be set when this Server has completely + stopped. The returned event may not be set until after the full grace + period (if some ongoing RPC continues for the full length of the period) + of it may be set much sooner (such as if this Server had no RPCs underway + at the time it was stopped or if all RPCs that it had underway completed + very early in the grace period). + """ + raise NotImplementedError() diff --git a/lib/python3.10/site-packages/grpc/beta/utilities.py b/lib/python3.10/site-packages/grpc/beta/utilities.py new file mode 100644 index 0000000000000000000000000000000000000000..90e54715cff72002ef4cfdce61867e0c3098e3d0 --- /dev/null +++ b/lib/python3.10/site-packages/grpc/beta/utilities.py @@ -0,0 +1,153 @@ +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities for the gRPC Python Beta API.""" + +import threading +import time + +# implementations is referenced from specification in this module. +from grpc.beta import implementations # pylint: disable=unused-import +from grpc.beta import interfaces +from grpc.framework.foundation import callable_util +from grpc.framework.foundation import future + +_DONE_CALLBACK_EXCEPTION_LOG_MESSAGE = ( + 'Exception calling connectivity future "done" callback!' +) + + +class _ChannelReadyFuture(future.Future): + def __init__(self, channel): + self._condition = threading.Condition() + self._channel = channel + + self._matured = False + self._cancelled = False + self._done_callbacks = [] + + def _block(self, timeout): + until = None if timeout is None else time.time() + timeout + with self._condition: + while True: + if self._cancelled: + raise future.CancelledError() + elif self._matured: + return + else: + if until is None: + self._condition.wait() + else: + remaining = until - time.time() + if remaining < 0: + raise future.TimeoutError() + else: + self._condition.wait(timeout=remaining) + + def _update(self, connectivity): + with self._condition: + if ( + not self._cancelled + and connectivity is interfaces.ChannelConnectivity.READY + ): + self._matured = True + self._channel.unsubscribe(self._update) + self._condition.notify_all() + done_callbacks = tuple(self._done_callbacks) + self._done_callbacks = None + else: + return + + for done_callback in done_callbacks: + callable_util.call_logging_exceptions( + done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self + ) + + def cancel(self): + with self._condition: + if not self._matured: + self._cancelled = True + self._channel.unsubscribe(self._update) + self._condition.notify_all() + done_callbacks = tuple(self._done_callbacks) + self._done_callbacks = None + else: + return False + + for done_callback in done_callbacks: + callable_util.call_logging_exceptions( + done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self + ) + + return True + + def cancelled(self): + with self._condition: + return self._cancelled + + def running(self): + with self._condition: + return not self._cancelled and not self._matured + + def done(self): + with self._condition: + return self._cancelled or self._matured + + def result(self, timeout=None): + self._block(timeout) + return None + + def exception(self, timeout=None): + self._block(timeout) + return None + + def traceback(self, timeout=None): + self._block(timeout) + return None + + def add_done_callback(self, fn): + with self._condition: + if not self._cancelled and not self._matured: + self._done_callbacks.append(fn) + return + + fn(self) + + def start(self): + with self._condition: + self._channel.subscribe(self._update, try_to_connect=True) + + def __del__(self): + with self._condition: + if not self._cancelled and not self._matured: + self._channel.unsubscribe(self._update) + + +def channel_ready_future(channel): + """Creates a future.Future tracking when an implementations.Channel is ready. + + Cancelling the returned future.Future does not tell the given + implementations.Channel to abandon attempts it may have been making to + connect; cancelling merely deactivates the return future.Future's + subscription to the given implementations.Channel's connectivity. + + Args: + channel: An implementations.Channel. + + Returns: + A future.Future that matures when the given Channel has connectivity + interfaces.ChannelConnectivity.READY. + """ + ready_future = _ChannelReadyFuture(channel) + ready_future.start() + return ready_future diff --git a/lib/python3.10/site-packages/nltk-3.8.1.dist-info/entry_points.txt b/lib/python3.10/site-packages/nltk-3.8.1.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..65a3a33374385d4199dda32bb7a3284f52653e66 --- /dev/null +++ b/lib/python3.10/site-packages/nltk-3.8.1.dist-info/entry_points.txt @@ -0,0 +1,3 @@ + +[console_scripts] +nltk=nltk.cli:cli diff --git a/lib/python3.10/site-packages/nltk/VERSION b/lib/python3.10/site-packages/nltk/VERSION new file mode 100644 index 0000000000000000000000000000000000000000..0603aab1e29c286f7779a50891582571b616825c --- /dev/null +++ b/lib/python3.10/site-packages/nltk/VERSION @@ -0,0 +1 @@ +3.8.1 diff --git a/lib/python3.10/site-packages/nltk/__init__.py b/lib/python3.10/site-packages/nltk/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b87cf230510581745ced457e373a7ecc7c3c9006 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/__init__.py @@ -0,0 +1,209 @@ +# Natural Language Toolkit (NLTK) +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +The Natural Language Toolkit (NLTK) is an open source Python library +for Natural Language Processing. A free online book is available. +(If you use the library for academic research, please cite the book.) + +Steven Bird, Ewan Klein, and Edward Loper (2009). +Natural Language Processing with Python. O'Reilly Media Inc. +https://www.nltk.org/book/ + +isort:skip_file +""" + +import os + +# ////////////////////////////////////////////////////// +# Metadata +# ////////////////////////////////////////////////////// + +# Version. For each new release, the version number should be updated +# in the file VERSION. +try: + # If a VERSION file exists, use it! + version_file = os.path.join(os.path.dirname(__file__), "VERSION") + with open(version_file) as infile: + __version__ = infile.read().strip() +except NameError: + __version__ = "unknown (running code interactively?)" +except OSError as ex: + __version__ = "unknown (%s)" % ex + +if __doc__ is not None: # fix for the ``python -OO`` + __doc__ += "\n@version: " + __version__ + + +# Copyright notice +__copyright__ = """\ +Copyright (C) 2001-2023 NLTK Project. + +Distributed and Licensed under the Apache License, Version 2.0, +which is included by reference. +""" + +__license__ = "Apache License, Version 2.0" +# Description of the toolkit, keywords, and the project's primary URL. +__longdescr__ = """\ +The Natural Language Toolkit (NLTK) is a Python package for +natural language processing. NLTK requires Python 3.7, 3.8, 3.9, 3.10 or 3.11.""" +__keywords__ = [ + "NLP", + "CL", + "natural language processing", + "computational linguistics", + "parsing", + "tagging", + "tokenizing", + "syntax", + "linguistics", + "language", + "natural language", + "text analytics", +] +__url__ = "https://www.nltk.org/" + +# Maintainer, contributors, etc. +__maintainer__ = "NLTK Team" +__maintainer_email__ = "nltk.team@gmail.com" +__author__ = __maintainer__ +__author_email__ = __maintainer_email__ + +# "Trove" classifiers for Python Package Index. +__classifiers__ = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Intended Audience :: Education", + "Intended Audience :: Information Technology", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: Human Machine Interfaces", + "Topic :: Scientific/Engineering :: Information Analysis", + "Topic :: Text Processing", + "Topic :: Text Processing :: Filters", + "Topic :: Text Processing :: General", + "Topic :: Text Processing :: Indexing", + "Topic :: Text Processing :: Linguistic", +] + +from nltk.internals import config_java + +# support numpy from pypy +try: + import numpypy +except ImportError: + pass + +# Override missing methods on environments where it cannot be used like GAE. +import subprocess + +if not hasattr(subprocess, "PIPE"): + + def _fake_PIPE(*args, **kwargs): + raise NotImplementedError("subprocess.PIPE is not supported.") + + subprocess.PIPE = _fake_PIPE +if not hasattr(subprocess, "Popen"): + + def _fake_Popen(*args, **kwargs): + raise NotImplementedError("subprocess.Popen is not supported.") + + subprocess.Popen = _fake_Popen + +########################################################### +# TOP-LEVEL MODULES +########################################################### + +# Import top-level functionality into top-level namespace + +from nltk.collocations import * +from nltk.decorators import decorator, memoize +from nltk.featstruct import * +from nltk.grammar import * +from nltk.probability import * +from nltk.text import * +from nltk.util import * +from nltk.jsontags import * + +########################################################### +# PACKAGES +########################################################### + +from nltk.chunk import * +from nltk.classify import * +from nltk.inference import * +from nltk.metrics import * +from nltk.parse import * +from nltk.tag import * +from nltk.tokenize import * +from nltk.translate import * +from nltk.tree import * +from nltk.sem import * +from nltk.stem import * + +# Packages which can be lazily imported +# (a) we don't import * +# (b) they're slow to import or have run-time dependencies +# that can safely fail at run time + +from nltk import lazyimport + +app = lazyimport.LazyModule("app", locals(), globals()) +chat = lazyimport.LazyModule("chat", locals(), globals()) +corpus = lazyimport.LazyModule("corpus", locals(), globals()) +draw = lazyimport.LazyModule("draw", locals(), globals()) +toolbox = lazyimport.LazyModule("toolbox", locals(), globals()) + +# Optional loading + +try: + import numpy +except ImportError: + pass +else: + from nltk import cluster + +from nltk.downloader import download, download_shell + +try: + import tkinter +except ImportError: + pass +else: + try: + from nltk.downloader import download_gui + except RuntimeError as e: + import warnings + + warnings.warn( + "Corpus downloader GUI not loaded " + "(RuntimeError during import: %s)" % str(e) + ) + +# explicitly import all top-level modules (ensuring +# they override the same names inadvertently imported +# from a subpackage) + +from nltk import ccg, chunk, classify, collocations +from nltk import data, featstruct, grammar, help, inference, metrics +from nltk import misc, parse, probability, sem, stem, wsd +from nltk import tag, tbl, text, tokenize, translate, tree, util + + +# FIXME: override any accidentally imported demo, see https://github.com/nltk/nltk/issues/2116 +def demo(): + print("To run the demo code for a module, type nltk.module.demo()") diff --git a/lib/python3.10/site-packages/nltk/book.py b/lib/python3.10/site-packages/nltk/book.py new file mode 100644 index 0000000000000000000000000000000000000000..704f84d426fdf87b4233454c8ceb9915d7db3161 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/book.py @@ -0,0 +1,213 @@ +# Natural Language Toolkit: Some texts for exploration in chapter 1 of the book +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# +# URL: +# For license information, see LICENSE.TXT + +from nltk.corpus import ( + genesis, + gutenberg, + inaugural, + nps_chat, + treebank, + webtext, + wordnet, +) +from nltk.probability import FreqDist +from nltk.text import Text +from nltk.util import bigrams + +print("*** Introductory Examples for the NLTK Book ***") +print("Loading text1, ..., text9 and sent1, ..., sent9") +print("Type the name of the text or sentence to view it.") +print("Type: 'texts()' or 'sents()' to list the materials.") + +text1 = Text(gutenberg.words("melville-moby_dick.txt")) +print("text1:", text1.name) + +text2 = Text(gutenberg.words("austen-sense.txt")) +print("text2:", text2.name) + +text3 = Text(genesis.words("english-kjv.txt"), name="The Book of Genesis") +print("text3:", text3.name) + +text4 = Text(inaugural.words(), name="Inaugural Address Corpus") +print("text4:", text4.name) + +text5 = Text(nps_chat.words(), name="Chat Corpus") +print("text5:", text5.name) + +text6 = Text(webtext.words("grail.txt"), name="Monty Python and the Holy Grail") +print("text6:", text6.name) + +text7 = Text(treebank.words(), name="Wall Street Journal") +print("text7:", text7.name) + +text8 = Text(webtext.words("singles.txt"), name="Personals Corpus") +print("text8:", text8.name) + +text9 = Text(gutenberg.words("chesterton-thursday.txt")) +print("text9:", text9.name) + + +def texts(): + print("text1:", text1.name) + print("text2:", text2.name) + print("text3:", text3.name) + print("text4:", text4.name) + print("text5:", text5.name) + print("text6:", text6.name) + print("text7:", text7.name) + print("text8:", text8.name) + print("text9:", text9.name) + + +sent1 = ["Call", "me", "Ishmael", "."] +sent2 = [ + "The", + "family", + "of", + "Dashwood", + "had", + "long", + "been", + "settled", + "in", + "Sussex", + ".", +] +sent3 = [ + "In", + "the", + "beginning", + "God", + "created", + "the", + "heaven", + "and", + "the", + "earth", + ".", +] +sent4 = [ + "Fellow", + "-", + "Citizens", + "of", + "the", + "Senate", + "and", + "of", + "the", + "House", + "of", + "Representatives", + ":", +] +sent5 = [ + "I", + "have", + "a", + "problem", + "with", + "people", + "PMing", + "me", + "to", + "lol", + "JOIN", +] +sent6 = [ + "SCENE", + "1", + ":", + "[", + "wind", + "]", + "[", + "clop", + "clop", + "clop", + "]", + "KING", + "ARTHUR", + ":", + "Whoa", + "there", + "!", +] +sent7 = [ + "Pierre", + "Vinken", + ",", + "61", + "years", + "old", + ",", + "will", + "join", + "the", + "board", + "as", + "a", + "nonexecutive", + "director", + "Nov.", + "29", + ".", +] +sent8 = [ + "25", + "SEXY", + "MALE", + ",", + "seeks", + "attrac", + "older", + "single", + "lady", + ",", + "for", + "discreet", + "encounters", + ".", +] +sent9 = [ + "THE", + "suburb", + "of", + "Saffron", + "Park", + "lay", + "on", + "the", + "sunset", + "side", + "of", + "London", + ",", + "as", + "red", + "and", + "ragged", + "as", + "a", + "cloud", + "of", + "sunset", + ".", +] + + +def sents(): + print("sent1:", " ".join(sent1)) + print("sent2:", " ".join(sent2)) + print("sent3:", " ".join(sent3)) + print("sent4:", " ".join(sent4)) + print("sent5:", " ".join(sent5)) + print("sent6:", " ".join(sent6)) + print("sent7:", " ".join(sent7)) + print("sent8:", " ".join(sent8)) + print("sent9:", " ".join(sent9)) diff --git a/lib/python3.10/site-packages/nltk/cli.py b/lib/python3.10/site-packages/nltk/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..1a36a14f49e6cce0a0655767eddc4d82894f36d6 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/cli.py @@ -0,0 +1,55 @@ +# Natural Language Toolkit: NLTK Command-Line Interface +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + + +import click +from tqdm import tqdm + +from nltk import word_tokenize +from nltk.util import parallelize_preprocess + +CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) + + +@click.group(context_settings=CONTEXT_SETTINGS) +@click.version_option() +def cli(): + pass + + +@cli.command("tokenize") +@click.option( + "--language", + "-l", + default="en", + help="The language for the Punkt sentence tokenization.", +) +@click.option( + "--preserve-line", + "-l", + default=True, + is_flag=True, + help="An option to keep the preserve the sentence and not sentence tokenize it.", +) +@click.option("--processes", "-j", default=1, help="No. of processes.") +@click.option("--encoding", "-e", default="utf8", help="Specify encoding of file.") +@click.option( + "--delimiter", "-d", default=" ", help="Specify delimiter to join the tokens." +) +def tokenize_file(language, preserve_line, processes, encoding, delimiter): + """This command tokenizes text stream using nltk.word_tokenize""" + with click.get_text_stream("stdin", encoding=encoding) as fin: + with click.get_text_stream("stdout", encoding=encoding) as fout: + # If it's single process, joblib parallelization is slower, + # so just process line by line normally. + if processes == 1: + for line in tqdm(fin.readlines()): + print(delimiter.join(word_tokenize(line)), end="\n", file=fout) + else: + for outline in parallelize_preprocess( + word_tokenize, fin.readlines(), processes, progress_bar=True + ): + print(delimiter.join(outline), end="\n", file=fout) diff --git a/lib/python3.10/site-packages/nltk/collections.py b/lib/python3.10/site-packages/nltk/collections.py new file mode 100644 index 0000000000000000000000000000000000000000..89ade62b665a4b51e63d49e26ef4ce41001efcd1 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/collections.py @@ -0,0 +1,661 @@ +# Natural Language Toolkit: Collections +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +import bisect + +# this unused import is for python 2.7 +from collections import Counter, defaultdict, deque +from functools import total_ordering +from itertools import chain, islice + +from nltk.internals import raise_unorderable_types, slice_bounds + +########################################################################## +# Ordered Dictionary +########################################################################## + + +class OrderedDict(dict): + def __init__(self, data=None, **kwargs): + self._keys = self.keys(data, kwargs.get("keys")) + self._default_factory = kwargs.get("default_factory") + if data is None: + dict.__init__(self) + else: + dict.__init__(self, data) + + def __delitem__(self, key): + dict.__delitem__(self, key) + self._keys.remove(key) + + def __getitem__(self, key): + try: + return dict.__getitem__(self, key) + except KeyError: + return self.__missing__(key) + + def __iter__(self): + return (key for key in self.keys()) + + def __missing__(self, key): + if not self._default_factory and key not in self._keys: + raise KeyError() + return self._default_factory() + + def __setitem__(self, key, item): + dict.__setitem__(self, key, item) + if key not in self._keys: + self._keys.append(key) + + def clear(self): + dict.clear(self) + self._keys.clear() + + def copy(self): + d = dict.copy(self) + d._keys = self._keys + return d + + def items(self): + # returns iterator under python 3 and list under python 2 + return zip(self.keys(), self.values()) + + def keys(self, data=None, keys=None): + if data: + if keys: + assert isinstance(keys, list) + assert len(data) == len(keys) + return keys + else: + assert ( + isinstance(data, dict) + or isinstance(data, OrderedDict) + or isinstance(data, list) + ) + if isinstance(data, dict) or isinstance(data, OrderedDict): + return data.keys() + elif isinstance(data, list): + return [key for (key, value) in data] + elif "_keys" in self.__dict__: + return self._keys + else: + return [] + + def popitem(self): + if not self._keys: + raise KeyError() + + key = self._keys.pop() + value = self[key] + del self[key] + return (key, value) + + def setdefault(self, key, failobj=None): + dict.setdefault(self, key, failobj) + if key not in self._keys: + self._keys.append(key) + + def update(self, data): + dict.update(self, data) + for key in self.keys(data): + if key not in self._keys: + self._keys.append(key) + + def values(self): + # returns iterator under python 3 + return map(self.get, self._keys) + + +###################################################################### +# Lazy Sequences +###################################################################### + + +@total_ordering +class AbstractLazySequence: + """ + An abstract base class for read-only sequences whose values are + computed as needed. Lazy sequences act like tuples -- they can be + indexed, sliced, and iterated over; but they may not be modified. + + The most common application of lazy sequences in NLTK is for + corpus view objects, which provide access to the contents of a + corpus without loading the entire corpus into memory, by loading + pieces of the corpus from disk as needed. + + The result of modifying a mutable element of a lazy sequence is + undefined. In particular, the modifications made to the element + may or may not persist, depending on whether and when the lazy + sequence caches that element's value or reconstructs it from + scratch. + + Subclasses are required to define two methods: ``__len__()`` + and ``iterate_from()``. + """ + + def __len__(self): + """ + Return the number of tokens in the corpus file underlying this + corpus view. + """ + raise NotImplementedError("should be implemented by subclass") + + def iterate_from(self, start): + """ + Return an iterator that generates the tokens in the corpus + file underlying this corpus view, starting at the token number + ``start``. If ``start>=len(self)``, then this iterator will + generate no tokens. + """ + raise NotImplementedError("should be implemented by subclass") + + def __getitem__(self, i): + """ + Return the *i* th token in the corpus file underlying this + corpus view. Negative indices and spans are both supported. + """ + if isinstance(i, slice): + start, stop = slice_bounds(self, i) + return LazySubsequence(self, start, stop) + else: + # Handle negative indices + if i < 0: + i += len(self) + if i < 0: + raise IndexError("index out of range") + # Use iterate_from to extract it. + try: + return next(self.iterate_from(i)) + except StopIteration as e: + raise IndexError("index out of range") from e + + def __iter__(self): + """Return an iterator that generates the tokens in the corpus + file underlying this corpus view.""" + return self.iterate_from(0) + + def count(self, value): + """Return the number of times this list contains ``value``.""" + return sum(1 for elt in self if elt == value) + + def index(self, value, start=None, stop=None): + """Return the index of the first occurrence of ``value`` in this + list that is greater than or equal to ``start`` and less than + ``stop``. Negative start and stop values are treated like negative + slice bounds -- i.e., they count from the end of the list.""" + start, stop = slice_bounds(self, slice(start, stop)) + for i, elt in enumerate(islice(self, start, stop)): + if elt == value: + return i + start + raise ValueError("index(x): x not in list") + + def __contains__(self, value): + """Return true if this list contains ``value``.""" + return bool(self.count(value)) + + def __add__(self, other): + """Return a list concatenating self with other.""" + return LazyConcatenation([self, other]) + + def __radd__(self, other): + """Return a list concatenating other with self.""" + return LazyConcatenation([other, self]) + + def __mul__(self, count): + """Return a list concatenating self with itself ``count`` times.""" + return LazyConcatenation([self] * count) + + def __rmul__(self, count): + """Return a list concatenating self with itself ``count`` times.""" + return LazyConcatenation([self] * count) + + _MAX_REPR_SIZE = 60 + + def __repr__(self): + """ + Return a string representation for this corpus view that is + similar to a list's representation; but if it would be more + than 60 characters long, it is truncated. + """ + pieces = [] + length = 5 + for elt in self: + pieces.append(repr(elt)) + length += len(pieces[-1]) + 2 + if length > self._MAX_REPR_SIZE and len(pieces) > 2: + return "[%s, ...]" % ", ".join(pieces[:-1]) + return "[%s]" % ", ".join(pieces) + + def __eq__(self, other): + return type(self) == type(other) and list(self) == list(other) + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if type(other) != type(self): + raise_unorderable_types("<", self, other) + return list(self) < list(other) + + def __hash__(self): + """ + :raise ValueError: Corpus view objects are unhashable. + """ + raise ValueError("%s objects are unhashable" % self.__class__.__name__) + + +class LazySubsequence(AbstractLazySequence): + """ + A subsequence produced by slicing a lazy sequence. This slice + keeps a reference to its source sequence, and generates its values + by looking them up in the source sequence. + """ + + MIN_SIZE = 100 + """ + The minimum size for which lazy slices should be created. If + ``LazySubsequence()`` is called with a subsequence that is + shorter than ``MIN_SIZE``, then a tuple will be returned instead. + """ + + def __new__(cls, source, start, stop): + """ + Construct a new slice from a given underlying sequence. The + ``start`` and ``stop`` indices should be absolute indices -- + i.e., they should not be negative (for indexing from the back + of a list) or greater than the length of ``source``. + """ + # If the slice is small enough, just use a tuple. + if stop - start < cls.MIN_SIZE: + return list(islice(source.iterate_from(start), stop - start)) + else: + return object.__new__(cls) + + def __init__(self, source, start, stop): + self._source = source + self._start = start + self._stop = stop + + def __len__(self): + return self._stop - self._start + + def iterate_from(self, start): + return islice( + self._source.iterate_from(start + self._start), max(0, len(self) - start) + ) + + +class LazyConcatenation(AbstractLazySequence): + """ + A lazy sequence formed by concatenating a list of lists. This + underlying list of lists may itself be lazy. ``LazyConcatenation`` + maintains an index that it uses to keep track of the relationship + between offsets in the concatenated lists and offsets in the + sublists. + """ + + def __init__(self, list_of_lists): + self._list = list_of_lists + self._offsets = [0] + + def __len__(self): + if len(self._offsets) <= len(self._list): + for _ in self.iterate_from(self._offsets[-1]): + pass + return self._offsets[-1] + + def iterate_from(self, start_index): + if start_index < self._offsets[-1]: + sublist_index = bisect.bisect_right(self._offsets, start_index) - 1 + else: + sublist_index = len(self._offsets) - 1 + + index = self._offsets[sublist_index] + + # Construct an iterator over the sublists. + if isinstance(self._list, AbstractLazySequence): + sublist_iter = self._list.iterate_from(sublist_index) + else: + sublist_iter = islice(self._list, sublist_index, None) + + for sublist in sublist_iter: + if sublist_index == (len(self._offsets) - 1): + assert ( + index + len(sublist) >= self._offsets[-1] + ), "offsets not monotonic increasing!" + self._offsets.append(index + len(sublist)) + else: + assert self._offsets[sublist_index + 1] == index + len( + sublist + ), "inconsistent list value (num elts)" + + yield from sublist[max(0, start_index - index) :] + + index += len(sublist) + sublist_index += 1 + + +class LazyMap(AbstractLazySequence): + """ + A lazy sequence whose elements are formed by applying a given + function to each element in one or more underlying lists. The + function is applied lazily -- i.e., when you read a value from the + list, ``LazyMap`` will calculate that value by applying its + function to the underlying lists' value(s). ``LazyMap`` is + essentially a lazy version of the Python primitive function + ``map``. In particular, the following two expressions are + equivalent: + + >>> from nltk.collections import LazyMap + >>> function = str + >>> sequence = [1,2,3] + >>> map(function, sequence) # doctest: +SKIP + ['1', '2', '3'] + >>> list(LazyMap(function, sequence)) + ['1', '2', '3'] + + Like the Python ``map`` primitive, if the source lists do not have + equal size, then the value None will be supplied for the + 'missing' elements. + + Lazy maps can be useful for conserving memory, in cases where + individual values take up a lot of space. This is especially true + if the underlying list's values are constructed lazily, as is the + case with many corpus readers. + + A typical example of a use case for this class is performing + feature detection on the tokens in a corpus. Since featuresets + are encoded as dictionaries, which can take up a lot of memory, + using a ``LazyMap`` can significantly reduce memory usage when + training and running classifiers. + """ + + def __init__(self, function, *lists, **config): + """ + :param function: The function that should be applied to + elements of ``lists``. It should take as many arguments + as there are ``lists``. + :param lists: The underlying lists. + :param cache_size: Determines the size of the cache used + by this lazy map. (default=5) + """ + if not lists: + raise TypeError("LazyMap requires at least two args") + + self._lists = lists + self._func = function + self._cache_size = config.get("cache_size", 5) + self._cache = {} if self._cache_size > 0 else None + + # If you just take bool() of sum() here _all_lazy will be true just + # in case n >= 1 list is an AbstractLazySequence. Presumably this + # isn't what's intended. + self._all_lazy = sum( + isinstance(lst, AbstractLazySequence) for lst in lists + ) == len(lists) + + def iterate_from(self, index): + # Special case: one lazy sublist + if len(self._lists) == 1 and self._all_lazy: + for value in self._lists[0].iterate_from(index): + yield self._func(value) + return + + # Special case: one non-lazy sublist + elif len(self._lists) == 1: + while True: + try: + yield self._func(self._lists[0][index]) + except IndexError: + return + index += 1 + + # Special case: n lazy sublists + elif self._all_lazy: + iterators = [lst.iterate_from(index) for lst in self._lists] + while True: + elements = [] + for iterator in iterators: + try: + elements.append(next(iterator)) + except: # FIXME: What is this except really catching? StopIteration? + elements.append(None) + if elements == [None] * len(self._lists): + return + yield self._func(*elements) + index += 1 + + # general case + else: + while True: + try: + elements = [lst[index] for lst in self._lists] + except IndexError: + elements = [None] * len(self._lists) + for i, lst in enumerate(self._lists): + try: + elements[i] = lst[index] + except IndexError: + pass + if elements == [None] * len(self._lists): + return + yield self._func(*elements) + index += 1 + + def __getitem__(self, index): + if isinstance(index, slice): + sliced_lists = [lst[index] for lst in self._lists] + return LazyMap(self._func, *sliced_lists) + else: + # Handle negative indices + if index < 0: + index += len(self) + if index < 0: + raise IndexError("index out of range") + # Check the cache + if self._cache is not None and index in self._cache: + return self._cache[index] + # Calculate the value + try: + val = next(self.iterate_from(index)) + except StopIteration as e: + raise IndexError("index out of range") from e + # Update the cache + if self._cache is not None: + if len(self._cache) > self._cache_size: + self._cache.popitem() # discard random entry + self._cache[index] = val + # Return the value + return val + + def __len__(self): + return max(len(lst) for lst in self._lists) + + +class LazyZip(LazyMap): + """ + A lazy sequence whose elements are tuples, each containing the i-th + element from each of the argument sequences. The returned list is + truncated in length to the length of the shortest argument sequence. The + tuples are constructed lazily -- i.e., when you read a value from the + list, ``LazyZip`` will calculate that value by forming a tuple from + the i-th element of each of the argument sequences. + + ``LazyZip`` is essentially a lazy version of the Python primitive function + ``zip``. In particular, an evaluated LazyZip is equivalent to a zip: + + >>> from nltk.collections import LazyZip + >>> sequence1, sequence2 = [1, 2, 3], ['a', 'b', 'c'] + >>> zip(sequence1, sequence2) # doctest: +SKIP + [(1, 'a'), (2, 'b'), (3, 'c')] + >>> list(LazyZip(sequence1, sequence2)) + [(1, 'a'), (2, 'b'), (3, 'c')] + >>> sequences = [sequence1, sequence2, [6,7,8,9]] + >>> list(zip(*sequences)) == list(LazyZip(*sequences)) + True + + Lazy zips can be useful for conserving memory in cases where the argument + sequences are particularly long. + + A typical example of a use case for this class is combining long sequences + of gold standard and predicted values in a classification or tagging task + in order to calculate accuracy. By constructing tuples lazily and + avoiding the creation of an additional long sequence, memory usage can be + significantly reduced. + """ + + def __init__(self, *lists): + """ + :param lists: the underlying lists + :type lists: list(list) + """ + LazyMap.__init__(self, lambda *elts: elts, *lists) + + def iterate_from(self, index): + iterator = LazyMap.iterate_from(self, index) + while index < len(self): + yield next(iterator) + index += 1 + return + + def __len__(self): + return min(len(lst) for lst in self._lists) + + +class LazyEnumerate(LazyZip): + """ + A lazy sequence whose elements are tuples, each containing a count (from + zero) and a value yielded by underlying sequence. ``LazyEnumerate`` is + useful for obtaining an indexed list. The tuples are constructed lazily + -- i.e., when you read a value from the list, ``LazyEnumerate`` will + calculate that value by forming a tuple from the count of the i-th + element and the i-th element of the underlying sequence. + + ``LazyEnumerate`` is essentially a lazy version of the Python primitive + function ``enumerate``. In particular, the following two expressions are + equivalent: + + >>> from nltk.collections import LazyEnumerate + >>> sequence = ['first', 'second', 'third'] + >>> list(enumerate(sequence)) + [(0, 'first'), (1, 'second'), (2, 'third')] + >>> list(LazyEnumerate(sequence)) + [(0, 'first'), (1, 'second'), (2, 'third')] + + Lazy enumerations can be useful for conserving memory in cases where the + argument sequences are particularly long. + + A typical example of a use case for this class is obtaining an indexed + list for a long sequence of values. By constructing tuples lazily and + avoiding the creation of an additional long sequence, memory usage can be + significantly reduced. + """ + + def __init__(self, lst): + """ + :param lst: the underlying list + :type lst: list + """ + LazyZip.__init__(self, range(len(lst)), lst) + + +class LazyIteratorList(AbstractLazySequence): + """ + Wraps an iterator, loading its elements on demand + and making them subscriptable. + __repr__ displays only the first few elements. + """ + + def __init__(self, it, known_len=None): + self._it = it + self._len = known_len + self._cache = [] + + def __len__(self): + if self._len: + return self._len + for _ in self.iterate_from(len(self._cache)): + pass + self._len = len(self._cache) + return self._len + + def iterate_from(self, start): + """Create a new iterator over this list starting at the given offset.""" + while len(self._cache) < start: + v = next(self._it) + self._cache.append(v) + i = start + while i < len(self._cache): + yield self._cache[i] + i += 1 + try: + while True: + v = next(self._it) + self._cache.append(v) + yield v + except StopIteration: + pass + + def __add__(self, other): + """Return a list concatenating self with other.""" + return type(self)(chain(self, other)) + + def __radd__(self, other): + """Return a list concatenating other with self.""" + return type(self)(chain(other, self)) + + +###################################################################### +# Trie Implementation +###################################################################### +class Trie(dict): + """A Trie implementation for strings""" + + LEAF = True + + def __init__(self, strings=None): + """Builds a Trie object, which is built around a ``dict`` + + If ``strings`` is provided, it will add the ``strings``, which + consist of a ``list`` of ``strings``, to the Trie. + Otherwise, it'll construct an empty Trie. + + :param strings: List of strings to insert into the trie + (Default is ``None``) + :type strings: list(str) + + """ + super().__init__() + if strings: + for string in strings: + self.insert(string) + + def insert(self, string): + """Inserts ``string`` into the Trie + + :param string: String to insert into the trie + :type string: str + + :Example: + + >>> from nltk.collections import Trie + >>> trie = Trie(["abc", "def"]) + >>> expected = {'a': {'b': {'c': {True: None}}}, \ + 'd': {'e': {'f': {True: None}}}} + >>> trie == expected + True + + """ + if len(string): + self[string[0]].insert(string[1:]) + else: + # mark the string is complete + self[Trie.LEAF] = None + + def __missing__(self, key): + self[key] = Trie() + return self[key] diff --git a/lib/python3.10/site-packages/nltk/collocations.py b/lib/python3.10/site-packages/nltk/collocations.py new file mode 100644 index 0000000000000000000000000000000000000000..2a1fd83ad38e861f0e8db96c24871d40c4ee185e --- /dev/null +++ b/lib/python3.10/site-packages/nltk/collocations.py @@ -0,0 +1,412 @@ +# Natural Language Toolkit: Collocations and Association Measures +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Joel Nothman +# URL: +# For license information, see LICENSE.TXT +# +""" +Tools to identify collocations --- words that often appear consecutively +--- within corpora. They may also be used to find other associations between +word occurrences. +See Manning and Schutze ch. 5 at https://nlp.stanford.edu/fsnlp/promo/colloc.pdf +and the Text::NSP Perl package at http://ngram.sourceforge.net + +Finding collocations requires first calculating the frequencies of words and +their appearance in the context of other words. Often the collection of words +will then requiring filtering to only retain useful content terms. Each ngram +of words may then be scored according to some association measure, in order +to determine the relative likelihood of each ngram being a collocation. + +The ``BigramCollocationFinder`` and ``TrigramCollocationFinder`` classes provide +these functionalities, dependent on being provided a function which scores a +ngram given appropriate frequency counts. A number of standard association +measures are provided in bigram_measures and trigram_measures. +""" + +# Possible TODOs: +# - consider the distinction between f(x,_) and f(x) and whether our +# approximation is good enough for fragmented data, and mention it +# - add a n-gram collocation finder with measures which only utilise n-gram +# and unigram counts (raw_freq, pmi, student_t) + +import itertools as _itertools + +# these two unused imports are referenced in collocations.doctest +from nltk.metrics import ( + BigramAssocMeasures, + ContingencyMeasures, + QuadgramAssocMeasures, + TrigramAssocMeasures, +) +from nltk.metrics.spearman import ranks_from_scores, spearman_correlation +from nltk.probability import FreqDist +from nltk.util import ngrams + + +class AbstractCollocationFinder: + """ + An abstract base class for collocation finders whose purpose is to + collect collocation candidate frequencies, filter and rank them. + + As a minimum, collocation finders require the frequencies of each + word in a corpus, and the joint frequency of word tuples. This data + should be provided through nltk.probability.FreqDist objects or an + identical interface. + """ + + def __init__(self, word_fd, ngram_fd): + self.word_fd = word_fd + self.N = word_fd.N() + self.ngram_fd = ngram_fd + + @classmethod + def _build_new_documents( + cls, documents, window_size, pad_left=False, pad_right=False, pad_symbol=None + ): + """ + Pad the document with the place holder according to the window_size + """ + padding = (pad_symbol,) * (window_size - 1) + if pad_right: + return _itertools.chain.from_iterable( + _itertools.chain(doc, padding) for doc in documents + ) + if pad_left: + return _itertools.chain.from_iterable( + _itertools.chain(padding, doc) for doc in documents + ) + + @classmethod + def from_documents(cls, documents): + """Constructs a collocation finder given a collection of documents, + each of which is a list (or iterable) of tokens. + """ + # return cls.from_words(_itertools.chain(*documents)) + return cls.from_words( + cls._build_new_documents(documents, cls.default_ws, pad_right=True) + ) + + @staticmethod + def _ngram_freqdist(words, n): + return FreqDist(tuple(words[i : i + n]) for i in range(len(words) - 1)) + + def _apply_filter(self, fn=lambda ngram, freq: False): + """Generic filter removes ngrams from the frequency distribution + if the function returns True when passed an ngram tuple. + """ + tmp_ngram = FreqDist() + for ngram, freq in self.ngram_fd.items(): + if not fn(ngram, freq): + tmp_ngram[ngram] = freq + self.ngram_fd = tmp_ngram + + def apply_freq_filter(self, min_freq): + """Removes candidate ngrams which have frequency less than min_freq.""" + self._apply_filter(lambda ng, freq: freq < min_freq) + + def apply_ngram_filter(self, fn): + """Removes candidate ngrams (w1, w2, ...) where fn(w1, w2, ...) + evaluates to True. + """ + self._apply_filter(lambda ng, f: fn(*ng)) + + def apply_word_filter(self, fn): + """Removes candidate ngrams (w1, w2, ...) where any of (fn(w1), fn(w2), + ...) evaluates to True. + """ + self._apply_filter(lambda ng, f: any(fn(w) for w in ng)) + + def _score_ngrams(self, score_fn): + """Generates of (ngram, score) pairs as determined by the scoring + function provided. + """ + for tup in self.ngram_fd: + score = self.score_ngram(score_fn, *tup) + if score is not None: + yield tup, score + + def score_ngrams(self, score_fn): + """Returns a sequence of (ngram, score) pairs ordered from highest to + lowest score, as determined by the scoring function provided. + """ + return sorted(self._score_ngrams(score_fn), key=lambda t: (-t[1], t[0])) + + def nbest(self, score_fn, n): + """Returns the top n ngrams when scored by the given function.""" + return [p for p, s in self.score_ngrams(score_fn)[:n]] + + def above_score(self, score_fn, min_score): + """Returns a sequence of ngrams, ordered by decreasing score, whose + scores each exceed the given minimum score. + """ + for ngram, score in self.score_ngrams(score_fn): + if score > min_score: + yield ngram + else: + break + + +class BigramCollocationFinder(AbstractCollocationFinder): + """A tool for the finding and ranking of bigram collocations or other + association measures. It is often useful to use from_words() rather than + constructing an instance directly. + """ + + default_ws = 2 + + def __init__(self, word_fd, bigram_fd, window_size=2): + """Construct a BigramCollocationFinder, given FreqDists for + appearances of words and (possibly non-contiguous) bigrams. + """ + AbstractCollocationFinder.__init__(self, word_fd, bigram_fd) + self.window_size = window_size + + @classmethod + def from_words(cls, words, window_size=2): + """Construct a BigramCollocationFinder for all bigrams in the given + sequence. When window_size > 2, count non-contiguous bigrams, in the + style of Church and Hanks's (1990) association ratio. + """ + wfd = FreqDist() + bfd = FreqDist() + + if window_size < 2: + raise ValueError("Specify window_size at least 2") + + for window in ngrams(words, window_size, pad_right=True): + w1 = window[0] + if w1 is None: + continue + wfd[w1] += 1 + for w2 in window[1:]: + if w2 is not None: + bfd[(w1, w2)] += 1 + return cls(wfd, bfd, window_size=window_size) + + def score_ngram(self, score_fn, w1, w2): + """Returns the score for a given bigram using the given scoring + function. Following Church and Hanks (1990), counts are scaled by + a factor of 1/(window_size - 1). + """ + n_all = self.N + n_ii = self.ngram_fd[(w1, w2)] / (self.window_size - 1.0) + if not n_ii: + return + n_ix = self.word_fd[w1] + n_xi = self.word_fd[w2] + return score_fn(n_ii, (n_ix, n_xi), n_all) + + +class TrigramCollocationFinder(AbstractCollocationFinder): + """A tool for the finding and ranking of trigram collocations or other + association measures. It is often useful to use from_words() rather than + constructing an instance directly. + """ + + default_ws = 3 + + def __init__(self, word_fd, bigram_fd, wildcard_fd, trigram_fd): + """Construct a TrigramCollocationFinder, given FreqDists for + appearances of words, bigrams, two words with any word between them, + and trigrams. + """ + AbstractCollocationFinder.__init__(self, word_fd, trigram_fd) + self.wildcard_fd = wildcard_fd + self.bigram_fd = bigram_fd + + @classmethod + def from_words(cls, words, window_size=3): + """Construct a TrigramCollocationFinder for all trigrams in the given + sequence. + """ + if window_size < 3: + raise ValueError("Specify window_size at least 3") + + wfd = FreqDist() + wildfd = FreqDist() + bfd = FreqDist() + tfd = FreqDist() + for window in ngrams(words, window_size, pad_right=True): + w1 = window[0] + if w1 is None: + continue + for w2, w3 in _itertools.combinations(window[1:], 2): + wfd[w1] += 1 + if w2 is None: + continue + bfd[(w1, w2)] += 1 + if w3 is None: + continue + wildfd[(w1, w3)] += 1 + tfd[(w1, w2, w3)] += 1 + return cls(wfd, bfd, wildfd, tfd) + + def bigram_finder(self): + """Constructs a bigram collocation finder with the bigram and unigram + data from this finder. Note that this does not include any filtering + applied to this finder. + """ + return BigramCollocationFinder(self.word_fd, self.bigram_fd) + + def score_ngram(self, score_fn, w1, w2, w3): + """Returns the score for a given trigram using the given scoring + function. + """ + n_all = self.N + n_iii = self.ngram_fd[(w1, w2, w3)] + if not n_iii: + return + n_iix = self.bigram_fd[(w1, w2)] + n_ixi = self.wildcard_fd[(w1, w3)] + n_xii = self.bigram_fd[(w2, w3)] + n_ixx = self.word_fd[w1] + n_xix = self.word_fd[w2] + n_xxi = self.word_fd[w3] + return score_fn(n_iii, (n_iix, n_ixi, n_xii), (n_ixx, n_xix, n_xxi), n_all) + + +class QuadgramCollocationFinder(AbstractCollocationFinder): + """A tool for the finding and ranking of quadgram collocations or other association measures. + It is often useful to use from_words() rather than constructing an instance directly. + """ + + default_ws = 4 + + def __init__(self, word_fd, quadgram_fd, ii, iii, ixi, ixxi, iixi, ixii): + """Construct a QuadgramCollocationFinder, given FreqDists for appearances of words, + bigrams, trigrams, two words with one word and two words between them, three words + with a word between them in both variations. + """ + AbstractCollocationFinder.__init__(self, word_fd, quadgram_fd) + self.iii = iii + self.ii = ii + self.ixi = ixi + self.ixxi = ixxi + self.iixi = iixi + self.ixii = ixii + + @classmethod + def from_words(cls, words, window_size=4): + if window_size < 4: + raise ValueError("Specify window_size at least 4") + ixxx = FreqDist() + iiii = FreqDist() + ii = FreqDist() + iii = FreqDist() + ixi = FreqDist() + ixxi = FreqDist() + iixi = FreqDist() + ixii = FreqDist() + + for window in ngrams(words, window_size, pad_right=True): + w1 = window[0] + if w1 is None: + continue + for w2, w3, w4 in _itertools.combinations(window[1:], 3): + ixxx[w1] += 1 + if w2 is None: + continue + ii[(w1, w2)] += 1 + if w3 is None: + continue + iii[(w1, w2, w3)] += 1 + ixi[(w1, w3)] += 1 + if w4 is None: + continue + iiii[(w1, w2, w3, w4)] += 1 + ixxi[(w1, w4)] += 1 + ixii[(w1, w3, w4)] += 1 + iixi[(w1, w2, w4)] += 1 + + return cls(ixxx, iiii, ii, iii, ixi, ixxi, iixi, ixii) + + def score_ngram(self, score_fn, w1, w2, w3, w4): + n_all = self.N + n_iiii = self.ngram_fd[(w1, w2, w3, w4)] + if not n_iiii: + return + n_iiix = self.iii[(w1, w2, w3)] + n_xiii = self.iii[(w2, w3, w4)] + n_iixi = self.iixi[(w1, w2, w4)] + n_ixii = self.ixii[(w1, w3, w4)] + + n_iixx = self.ii[(w1, w2)] + n_xxii = self.ii[(w3, w4)] + n_xiix = self.ii[(w2, w3)] + n_ixix = self.ixi[(w1, w3)] + n_ixxi = self.ixxi[(w1, w4)] + n_xixi = self.ixi[(w2, w4)] + + n_ixxx = self.word_fd[w1] + n_xixx = self.word_fd[w2] + n_xxix = self.word_fd[w3] + n_xxxi = self.word_fd[w4] + return score_fn( + n_iiii, + (n_iiix, n_iixi, n_ixii, n_xiii), + (n_iixx, n_ixix, n_ixxi, n_xixi, n_xxii, n_xiix), + (n_ixxx, n_xixx, n_xxix, n_xxxi), + n_all, + ) + + +def demo(scorer=None, compare_scorer=None): + """Finds bigram collocations in the files of the WebText corpus.""" + from nltk.metrics import ( + BigramAssocMeasures, + ranks_from_scores, + spearman_correlation, + ) + + if scorer is None: + scorer = BigramAssocMeasures.likelihood_ratio + if compare_scorer is None: + compare_scorer = BigramAssocMeasures.raw_freq + + from nltk.corpus import stopwords, webtext + + ignored_words = stopwords.words("english") + word_filter = lambda w: len(w) < 3 or w.lower() in ignored_words + + for file in webtext.fileids(): + words = [word.lower() for word in webtext.words(file)] + + cf = BigramCollocationFinder.from_words(words) + cf.apply_freq_filter(3) + cf.apply_word_filter(word_filter) + + corr = spearman_correlation( + ranks_from_scores(cf.score_ngrams(scorer)), + ranks_from_scores(cf.score_ngrams(compare_scorer)), + ) + print(file) + print("\t", [" ".join(tup) for tup in cf.nbest(scorer, 15)]) + print(f"\t Correlation to {compare_scorer.__name__}: {corr:0.4f}") + + +# Slows down loading too much +# bigram_measures = BigramAssocMeasures() +# trigram_measures = TrigramAssocMeasures() + +if __name__ == "__main__": + import sys + + from nltk.metrics import BigramAssocMeasures + + try: + scorer = eval("BigramAssocMeasures." + sys.argv[1]) + except IndexError: + scorer = None + try: + compare_scorer = eval("BigramAssocMeasures." + sys.argv[2]) + except IndexError: + compare_scorer = None + + demo(scorer, compare_scorer) + +__all__ = [ + "BigramCollocationFinder", + "TrigramCollocationFinder", + "QuadgramCollocationFinder", +] diff --git a/lib/python3.10/site-packages/nltk/compat.py b/lib/python3.10/site-packages/nltk/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..ceedc3992530e4e523dc9d479c26fbb43c918280 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/compat.py @@ -0,0 +1,43 @@ +# Natural Language Toolkit: Compatibility +# +# Copyright (C) 2001-2023 NLTK Project +# +# URL: +# For license information, see LICENSE.TXT + +import os +from functools import wraps + +# ======= Compatibility for datasets that care about Python versions ======== + +# The following datasets have a /PY3 subdirectory containing +# a full copy of the data which has been re-encoded or repickled. +DATA_UPDATES = [ + ("chunkers", "maxent_ne_chunker"), + ("help", "tagsets"), + ("taggers", "maxent_treebank_pos_tagger"), + ("tokenizers", "punkt"), +] + +_PY3_DATA_UPDATES = [os.path.join(*path_list) for path_list in DATA_UPDATES] + + +def add_py3_data(path): + for item in _PY3_DATA_UPDATES: + if item in str(path) and "/PY3" not in str(path): + pos = path.index(item) + len(item) + if path[pos : pos + 4] == ".zip": + pos += 4 + path = path[:pos] + "/PY3" + path[pos:] + break + return path + + +# for use in adding /PY3 to the second (filename) argument +# of the file pointers in data.py +def py3_data(init_func): + def _decorator(*args, **kwargs): + args = (args[0], add_py3_data(args[1])) + args[2:] + return init_func(*args, **kwargs) + + return wraps(init_func)(_decorator) diff --git a/lib/python3.10/site-packages/nltk/data.py b/lib/python3.10/site-packages/nltk/data.py new file mode 100644 index 0000000000000000000000000000000000000000..fed75d2bfbf2953a2ecc61d1d5a24244f5749be6 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/data.py @@ -0,0 +1,1441 @@ +# Natural Language Toolkit: Utility functions +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +Functions to find and load NLTK resource files, such as corpora, +grammars, and saved processing objects. Resource files are identified +using URLs, such as ``nltk:corpora/abc/rural.txt`` or +``https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg``. +The following URL protocols are supported: + + - ``file:path``: Specifies the file whose path is *path*. + Both relative and absolute paths may be used. + + - ``https://host/path``: Specifies the file stored on the web + server *host* at path *path*. + + - ``nltk:path``: Specifies the file stored in the NLTK data + package at *path*. NLTK will search for these files in the + directories specified by ``nltk.data.path``. + +If no protocol is specified, then the default protocol ``nltk:`` will +be used. + +This module provides to functions that can be used to access a +resource file, given its URL: ``load()`` loads a given resource, and +adds it to a resource cache; and ``retrieve()`` copies a given resource +to a local file. +""" + +import codecs +import functools +import os +import pickle +import re +import sys +import textwrap +import zipfile +from abc import ABCMeta, abstractmethod +from gzip import WRITE as GZ_WRITE +from gzip import GzipFile +from io import BytesIO, TextIOWrapper +from urllib.request import url2pathname, urlopen + +try: + from zlib import Z_SYNC_FLUSH as FLUSH +except ImportError: + from zlib import Z_FINISH as FLUSH + +from nltk import grammar, sem +from nltk.compat import add_py3_data, py3_data +from nltk.internals import deprecated + +textwrap_indent = functools.partial(textwrap.indent, prefix=" ") + +###################################################################### +# Search Path +###################################################################### + +path = [] +"""A list of directories where the NLTK data package might reside. + These directories will be checked in order when looking for a + resource in the data package. Note that this allows users to + substitute in their own versions of resources, if they have them + (e.g., in their home directory under ~/nltk_data).""" + +# User-specified locations: +_paths_from_env = os.environ.get("NLTK_DATA", "").split(os.pathsep) +path += [d for d in _paths_from_env if d] +if "APPENGINE_RUNTIME" not in os.environ and os.path.expanduser("~/") != "~/": + path.append(os.path.expanduser("~/nltk_data")) + +if sys.platform.startswith("win"): + # Common locations on Windows: + path += [ + os.path.join(sys.prefix, "nltk_data"), + os.path.join(sys.prefix, "share", "nltk_data"), + os.path.join(sys.prefix, "lib", "nltk_data"), + os.path.join(os.environ.get("APPDATA", "C:\\"), "nltk_data"), + r"C:\nltk_data", + r"D:\nltk_data", + r"E:\nltk_data", + ] +else: + # Common locations on UNIX & OS X: + path += [ + os.path.join(sys.prefix, "nltk_data"), + os.path.join(sys.prefix, "share", "nltk_data"), + os.path.join(sys.prefix, "lib", "nltk_data"), + "/usr/share/nltk_data", + "/usr/local/share/nltk_data", + "/usr/lib/nltk_data", + "/usr/local/lib/nltk_data", + ] + + +###################################################################### +# Util Functions +###################################################################### + + +def gzip_open_unicode( + filename, + mode="rb", + compresslevel=9, + encoding="utf-8", + fileobj=None, + errors=None, + newline=None, +): + if fileobj is None: + fileobj = GzipFile(filename, mode, compresslevel, fileobj) + return TextIOWrapper(fileobj, encoding, errors, newline) + + +def split_resource_url(resource_url): + """ + Splits a resource url into ":". + + >>> windows = sys.platform.startswith('win') + >>> split_resource_url('nltk:home/nltk') + ('nltk', 'home/nltk') + >>> split_resource_url('nltk:/home/nltk') + ('nltk', '/home/nltk') + >>> split_resource_url('file:/home/nltk') + ('file', '/home/nltk') + >>> split_resource_url('file:///home/nltk') + ('file', '/home/nltk') + >>> split_resource_url('file:///C:/home/nltk') + ('file', '/C:/home/nltk') + """ + protocol, path_ = resource_url.split(":", 1) + if protocol == "nltk": + pass + elif protocol == "file": + if path_.startswith("/"): + path_ = "/" + path_.lstrip("/") + else: + path_ = re.sub(r"^/{0,2}", "", path_) + return protocol, path_ + + +def normalize_resource_url(resource_url): + r""" + Normalizes a resource url + + >>> windows = sys.platform.startswith('win') + >>> os.path.normpath(split_resource_url(normalize_resource_url('file:grammar.fcfg'))[1]) == \ + ... ('\\' if windows else '') + os.path.abspath(os.path.join(os.curdir, 'grammar.fcfg')) + True + >>> not windows or normalize_resource_url('file:C:/dir/file') == 'file:///C:/dir/file' + True + >>> not windows or normalize_resource_url('file:C:\\dir\\file') == 'file:///C:/dir/file' + True + >>> not windows or normalize_resource_url('file:C:\\dir/file') == 'file:///C:/dir/file' + True + >>> not windows or normalize_resource_url('file://C:/dir/file') == 'file:///C:/dir/file' + True + >>> not windows or normalize_resource_url('file:////C:/dir/file') == 'file:///C:/dir/file' + True + >>> not windows or normalize_resource_url('nltk:C:/dir/file') == 'file:///C:/dir/file' + True + >>> not windows or normalize_resource_url('nltk:C:\\dir\\file') == 'file:///C:/dir/file' + True + >>> windows or normalize_resource_url('file:/dir/file/toy.cfg') == 'file:///dir/file/toy.cfg' + True + >>> normalize_resource_url('nltk:home/nltk') + 'nltk:home/nltk' + >>> windows or normalize_resource_url('nltk:/home/nltk') == 'file:///home/nltk' + True + >>> normalize_resource_url('https://example.com/dir/file') + 'https://example.com/dir/file' + >>> normalize_resource_url('dir/file') + 'nltk:dir/file' + """ + try: + protocol, name = split_resource_url(resource_url) + except ValueError: + # the resource url has no protocol, use the nltk protocol by default + protocol = "nltk" + name = resource_url + # use file protocol if the path is an absolute path + if protocol == "nltk" and os.path.isabs(name): + protocol = "file://" + name = normalize_resource_name(name, False, None) + elif protocol == "file": + protocol = "file://" + # name is absolute + name = normalize_resource_name(name, False, None) + elif protocol == "nltk": + protocol = "nltk:" + name = normalize_resource_name(name, True) + else: + # handled by urllib + protocol += "://" + return "".join([protocol, name]) + + +def normalize_resource_name(resource_name, allow_relative=True, relative_path=None): + """ + :type resource_name: str or unicode + :param resource_name: The name of the resource to search for. + Resource names are posix-style relative path names, such as + ``corpora/brown``. Directory names will automatically + be converted to a platform-appropriate path separator. + Directory trailing slashes are preserved + + >>> windows = sys.platform.startswith('win') + >>> normalize_resource_name('.', True) + './' + >>> normalize_resource_name('./', True) + './' + >>> windows or normalize_resource_name('dir/file', False, '/') == '/dir/file' + True + >>> not windows or normalize_resource_name('C:/file', False, '/') == '/C:/file' + True + >>> windows or normalize_resource_name('/dir/file', False, '/') == '/dir/file' + True + >>> windows or normalize_resource_name('../dir/file', False, '/') == '/dir/file' + True + >>> not windows or normalize_resource_name('/dir/file', True, '/') == 'dir/file' + True + >>> windows or normalize_resource_name('/dir/file', True, '/') == '/dir/file' + True + """ + is_dir = bool(re.search(r"[\\/.]$", resource_name)) or resource_name.endswith( + os.path.sep + ) + if sys.platform.startswith("win"): + resource_name = resource_name.lstrip("/") + else: + resource_name = re.sub(r"^/+", "/", resource_name) + if allow_relative: + resource_name = os.path.normpath(resource_name) + else: + if relative_path is None: + relative_path = os.curdir + resource_name = os.path.abspath(os.path.join(relative_path, resource_name)) + resource_name = resource_name.replace("\\", "/").replace(os.path.sep, "/") + if sys.platform.startswith("win") and os.path.isabs(resource_name): + resource_name = "/" + resource_name + if is_dir and not resource_name.endswith("/"): + resource_name += "/" + return resource_name + + +###################################################################### +# Path Pointers +###################################################################### + + +class PathPointer(metaclass=ABCMeta): + """ + An abstract base class for 'path pointers,' used by NLTK's data + package to identify specific paths. Two subclasses exist: + ``FileSystemPathPointer`` identifies a file that can be accessed + directly via a given absolute path. ``ZipFilePathPointer`` + identifies a file contained within a zipfile, that can be accessed + by reading that zipfile. + """ + + @abstractmethod + def open(self, encoding=None): + """ + Return a seekable read-only stream that can be used to read + the contents of the file identified by this path pointer. + + :raise IOError: If the path specified by this pointer does + not contain a readable file. + """ + + @abstractmethod + def file_size(self): + """ + Return the size of the file pointed to by this path pointer, + in bytes. + + :raise IOError: If the path specified by this pointer does + not contain a readable file. + """ + + @abstractmethod + def join(self, fileid): + """ + Return a new path pointer formed by starting at the path + identified by this pointer, and then following the relative + path given by ``fileid``. The path components of ``fileid`` + should be separated by forward slashes, regardless of + the underlying file system's path separator character. + """ + + +class FileSystemPathPointer(PathPointer, str): + """ + A path pointer that identifies a file which can be accessed + directly via a given absolute path. + """ + + @py3_data + def __init__(self, _path): + """ + Create a new path pointer for the given absolute path. + + :raise IOError: If the given path does not exist. + """ + + _path = os.path.abspath(_path) + if not os.path.exists(_path): + raise OSError("No such file or directory: %r" % _path) + self._path = _path + + # There's no need to call str.__init__(), since it's a no-op; + # str does all of its setup work in __new__. + + @property + def path(self): + """The absolute path identified by this path pointer.""" + return self._path + + def open(self, encoding=None): + stream = open(self._path, "rb") + if encoding is not None: + stream = SeekableUnicodeStreamReader(stream, encoding) + return stream + + def file_size(self): + return os.stat(self._path).st_size + + def join(self, fileid): + _path = os.path.join(self._path, fileid) + return FileSystemPathPointer(_path) + + def __repr__(self): + return "FileSystemPathPointer(%r)" % self._path + + def __str__(self): + return self._path + + +@deprecated("Use gzip.GzipFile instead as it also uses a buffer.") +class BufferedGzipFile(GzipFile): + """A ``GzipFile`` subclass for compatibility with older nltk releases. + + Use ``GzipFile`` directly as it also buffers in all supported + Python versions. + """ + + @py3_data + def __init__( + self, filename=None, mode=None, compresslevel=9, fileobj=None, **kwargs + ): + """Return a buffered gzip file object.""" + GzipFile.__init__(self, filename, mode, compresslevel, fileobj) + + def write(self, data): + # This is identical to GzipFile.write but does not return + # the bytes written to retain compatibility. + super().write(data) + + +class GzipFileSystemPathPointer(FileSystemPathPointer): + """ + A subclass of ``FileSystemPathPointer`` that identifies a gzip-compressed + file located at a given absolute path. ``GzipFileSystemPathPointer`` is + appropriate for loading large gzip-compressed pickle objects efficiently. + """ + + def open(self, encoding=None): + stream = GzipFile(self._path, "rb") + if encoding: + stream = SeekableUnicodeStreamReader(stream, encoding) + return stream + + +class ZipFilePathPointer(PathPointer): + """ + A path pointer that identifies a file contained within a zipfile, + which can be accessed by reading that zipfile. + """ + + @py3_data + def __init__(self, zipfile, entry=""): + """ + Create a new path pointer pointing at the specified entry + in the given zipfile. + + :raise IOError: If the given zipfile does not exist, or if it + does not contain the specified entry. + """ + if isinstance(zipfile, str): + zipfile = OpenOnDemandZipFile(os.path.abspath(zipfile)) + + # Check that the entry exists: + if entry: + + # Normalize the entry string, it should be relative: + entry = normalize_resource_name(entry, True, "/").lstrip("/") + + try: + zipfile.getinfo(entry) + except Exception as e: + # Sometimes directories aren't explicitly listed in + # the zip file. So if `entry` is a directory name, + # then check if the zipfile contains any files that + # are under the given directory. + if entry.endswith("/") and [ + n for n in zipfile.namelist() if n.startswith(entry) + ]: + pass # zipfile contains a file in that directory. + else: + # Otherwise, complain. + raise OSError( + f"Zipfile {zipfile.filename!r} does not contain {entry!r}" + ) from e + self._zipfile = zipfile + self._entry = entry + + @property + def zipfile(self): + """ + The zipfile.ZipFile object used to access the zip file + containing the entry identified by this path pointer. + """ + return self._zipfile + + @property + def entry(self): + """ + The name of the file within zipfile that this path + pointer points to. + """ + return self._entry + + def open(self, encoding=None): + data = self._zipfile.read(self._entry) + stream = BytesIO(data) + if self._entry.endswith(".gz"): + stream = GzipFile(self._entry, fileobj=stream) + elif encoding is not None: + stream = SeekableUnicodeStreamReader(stream, encoding) + return stream + + def file_size(self): + return self._zipfile.getinfo(self._entry).file_size + + def join(self, fileid): + entry = f"{self._entry}/{fileid}" + return ZipFilePathPointer(self._zipfile, entry) + + def __repr__(self): + return f"ZipFilePathPointer({self._zipfile.filename!r}, {self._entry!r})" + + def __str__(self): + return os.path.normpath(os.path.join(self._zipfile.filename, self._entry)) + + +###################################################################### +# Access Functions +###################################################################### + +# Don't use a weak dictionary, because in the common case this +# causes a lot more reloading that necessary. +_resource_cache = {} +"""A dictionary used to cache resources so that they won't + need to be loaded more than once.""" + + +def find(resource_name, paths=None): + """ + Find the given resource by searching through the directories and + zip files in paths, where a None or empty string specifies an absolute path. + Returns a corresponding path name. If the given resource is not + found, raise a ``LookupError``, whose message gives a pointer to + the installation instructions for the NLTK downloader. + + Zip File Handling: + + - If ``resource_name`` contains a component with a ``.zip`` + extension, then it is assumed to be a zipfile; and the + remaining path components are used to look inside the zipfile. + + - If any element of ``nltk.data.path`` has a ``.zip`` extension, + then it is assumed to be a zipfile. + + - If a given resource name that does not contain any zipfile + component is not found initially, then ``find()`` will make a + second attempt to find that resource, by replacing each + component *p* in the path with *p.zip/p*. For example, this + allows ``find()`` to map the resource name + ``corpora/chat80/cities.pl`` to a zip file path pointer to + ``corpora/chat80.zip/chat80/cities.pl``. + + - When using ``find()`` to locate a directory contained in a + zipfile, the resource name must end with the forward slash + character. Otherwise, ``find()`` will not locate the + directory. + + :type resource_name: str or unicode + :param resource_name: The name of the resource to search for. + Resource names are posix-style relative path names, such as + ``corpora/brown``. Directory names will be + automatically converted to a platform-appropriate path separator. + :rtype: str + """ + resource_name = normalize_resource_name(resource_name, True) + + # Resolve default paths at runtime in-case the user overrides + # nltk.data.path + if paths is None: + paths = path + + # Check if the resource name includes a zipfile name + m = re.match(r"(.*\.zip)/?(.*)$|", resource_name) + zipfile, zipentry = m.groups() + + # Check each item in our path + for path_ in paths: + # Is the path item a zipfile? + if path_ and (os.path.isfile(path_) and path_.endswith(".zip")): + try: + return ZipFilePathPointer(path_, resource_name) + except OSError: + # resource not in zipfile + continue + + # Is the path item a directory or is resource_name an absolute path? + elif not path_ or os.path.isdir(path_): + if zipfile is None: + p = os.path.join(path_, url2pathname(resource_name)) + if os.path.exists(p): + if p.endswith(".gz"): + return GzipFileSystemPathPointer(p) + else: + return FileSystemPathPointer(p) + else: + p = os.path.join(path_, url2pathname(zipfile)) + if os.path.exists(p): + try: + return ZipFilePathPointer(p, zipentry) + except OSError: + # resource not in zipfile + continue + + # Fallback: if the path doesn't include a zip file, then try + # again, assuming that one of the path components is inside a + # zipfile of the same name. + if zipfile is None: + pieces = resource_name.split("/") + for i in range(len(pieces)): + modified_name = "/".join(pieces[:i] + [pieces[i] + ".zip"] + pieces[i:]) + try: + return find(modified_name, paths) + except LookupError: + pass + + # Identify the package (i.e. the .zip file) to download. + resource_zipname = resource_name.split("/")[1] + if resource_zipname.endswith(".zip"): + resource_zipname = resource_zipname.rpartition(".")[0] + # Display a friendly error message if the resource wasn't found: + msg = str( + "Resource \33[93m{resource}\033[0m not found.\n" + "Please use the NLTK Downloader to obtain the resource:\n\n" + "\33[31m" # To display red text in terminal. + ">>> import nltk\n" + ">>> nltk.download('{resource}')\n" + "\033[0m" + ).format(resource=resource_zipname) + msg = textwrap_indent(msg) + + msg += "\n For more information see: https://www.nltk.org/data.html\n" + + msg += "\n Attempted to load \33[93m{resource_name}\033[0m\n".format( + resource_name=resource_name + ) + + msg += "\n Searched in:" + "".join("\n - %r" % d for d in paths) + sep = "*" * 70 + resource_not_found = f"\n{sep}\n{msg}\n{sep}\n" + raise LookupError(resource_not_found) + + +def retrieve(resource_url, filename=None, verbose=True): + """ + Copy the given resource to a local file. If no filename is + specified, then use the URL's filename. If there is already a + file named ``filename``, then raise a ``ValueError``. + + :type resource_url: str + :param resource_url: A URL specifying where the resource should be + loaded from. The default protocol is "nltk:", which searches + for the file in the the NLTK data package. + """ + resource_url = normalize_resource_url(resource_url) + if filename is None: + if resource_url.startswith("file:"): + filename = os.path.split(resource_url)[-1] + else: + filename = re.sub(r"(^\w+:)?.*/", "", resource_url) + if os.path.exists(filename): + filename = os.path.abspath(filename) + raise ValueError("File %r already exists!" % filename) + + if verbose: + print(f"Retrieving {resource_url!r}, saving to {filename!r}") + + # Open the input & output streams. + infile = _open(resource_url) + + # Copy infile -> outfile, using 64k blocks. + with open(filename, "wb") as outfile: + while True: + s = infile.read(1024 * 64) # 64k blocks. + outfile.write(s) + if not s: + break + + infile.close() + + +#: A dictionary describing the formats that are supported by NLTK's +#: load() method. Keys are format names, and values are format +#: descriptions. +FORMATS = { + "pickle": "A serialized python object, stored using the pickle module.", + "json": "A serialized python object, stored using the json module.", + "yaml": "A serialized python object, stored using the yaml module.", + "cfg": "A context free grammar.", + "pcfg": "A probabilistic CFG.", + "fcfg": "A feature CFG.", + "fol": "A list of first order logic expressions, parsed with " + "nltk.sem.logic.Expression.fromstring.", + "logic": "A list of first order logic expressions, parsed with " + "nltk.sem.logic.LogicParser. Requires an additional logic_parser " + "parameter", + "val": "A semantic valuation, parsed by nltk.sem.Valuation.fromstring.", + "raw": "The raw (byte string) contents of a file.", + "text": "The raw (unicode string) contents of a file. ", +} + +#: A dictionary mapping from file extensions to format names, used +#: by load() when format="auto" to decide the format for a +#: given resource url. +AUTO_FORMATS = { + "pickle": "pickle", + "json": "json", + "yaml": "yaml", + "cfg": "cfg", + "pcfg": "pcfg", + "fcfg": "fcfg", + "fol": "fol", + "logic": "logic", + "val": "val", + "txt": "text", + "text": "text", +} + + +def load( + resource_url, + format="auto", + cache=True, + verbose=False, + logic_parser=None, + fstruct_reader=None, + encoding=None, +): + """ + Load a given resource from the NLTK data package. The following + resource formats are currently supported: + + - ``pickle`` + - ``json`` + - ``yaml`` + - ``cfg`` (context free grammars) + - ``pcfg`` (probabilistic CFGs) + - ``fcfg`` (feature-based CFGs) + - ``fol`` (formulas of First Order Logic) + - ``logic`` (Logical formulas to be parsed by the given logic_parser) + - ``val`` (valuation of First Order Logic model) + - ``text`` (the file contents as a unicode string) + - ``raw`` (the raw file contents as a byte string) + + If no format is specified, ``load()`` will attempt to determine a + format based on the resource name's file extension. If that + fails, ``load()`` will raise a ``ValueError`` exception. + + For all text formats (everything except ``pickle``, ``json``, ``yaml`` and ``raw``), + it tries to decode the raw contents using UTF-8, and if that doesn't + work, it tries with ISO-8859-1 (Latin-1), unless the ``encoding`` + is specified. + + :type resource_url: str + :param resource_url: A URL specifying where the resource should be + loaded from. The default protocol is "nltk:", which searches + for the file in the the NLTK data package. + :type cache: bool + :param cache: If true, add this resource to a cache. If load() + finds a resource in its cache, then it will return it from the + cache rather than loading it. + :type verbose: bool + :param verbose: If true, print a message when loading a resource. + Messages are not displayed when a resource is retrieved from + the cache. + :type logic_parser: LogicParser + :param logic_parser: The parser that will be used to parse logical + expressions. + :type fstruct_reader: FeatStructReader + :param fstruct_reader: The parser that will be used to parse the + feature structure of an fcfg. + :type encoding: str + :param encoding: the encoding of the input; only used for text formats. + """ + resource_url = normalize_resource_url(resource_url) + resource_url = add_py3_data(resource_url) + + # Determine the format of the resource. + if format == "auto": + resource_url_parts = resource_url.split(".") + ext = resource_url_parts[-1] + if ext == "gz": + ext = resource_url_parts[-2] + format = AUTO_FORMATS.get(ext) + if format is None: + raise ValueError( + "Could not determine format for %s based " + 'on its file\nextension; use the "format" ' + "argument to specify the format explicitly." % resource_url + ) + + if format not in FORMATS: + raise ValueError(f"Unknown format type: {format}!") + + # If we've cached the resource, then just return it. + if cache: + resource_val = _resource_cache.get((resource_url, format)) + if resource_val is not None: + if verbose: + print(f"<>") + return resource_val + + # Let the user know what's going on. + if verbose: + print(f"<>") + + # Load the resource. + opened_resource = _open(resource_url) + + if format == "raw": + resource_val = opened_resource.read() + elif format == "pickle": + resource_val = pickle.load(opened_resource) + elif format == "json": + import json + + from nltk.jsontags import json_tags + + resource_val = json.load(opened_resource) + tag = None + if len(resource_val) != 1: + tag = next(resource_val.keys()) + if tag not in json_tags: + raise ValueError("Unknown json tag.") + elif format == "yaml": + import yaml + + resource_val = yaml.safe_load(opened_resource) + else: + # The resource is a text format. + binary_data = opened_resource.read() + if encoding is not None: + string_data = binary_data.decode(encoding) + else: + try: + string_data = binary_data.decode("utf-8") + except UnicodeDecodeError: + string_data = binary_data.decode("latin-1") + if format == "text": + resource_val = string_data + elif format == "cfg": + resource_val = grammar.CFG.fromstring(string_data, encoding=encoding) + elif format == "pcfg": + resource_val = grammar.PCFG.fromstring(string_data, encoding=encoding) + elif format == "fcfg": + resource_val = grammar.FeatureGrammar.fromstring( + string_data, + logic_parser=logic_parser, + fstruct_reader=fstruct_reader, + encoding=encoding, + ) + elif format == "fol": + resource_val = sem.read_logic( + string_data, + logic_parser=sem.logic.LogicParser(), + encoding=encoding, + ) + elif format == "logic": + resource_val = sem.read_logic( + string_data, logic_parser=logic_parser, encoding=encoding + ) + elif format == "val": + resource_val = sem.read_valuation(string_data, encoding=encoding) + else: + raise AssertionError( + "Internal NLTK error: Format %s isn't " + "handled by nltk.data.load()" % (format,) + ) + + opened_resource.close() + + # If requested, add it to the cache. + if cache: + try: + _resource_cache[(resource_url, format)] = resource_val + # TODO: add this line + # print('<>' % (resource_url,)) + except TypeError: + # We can't create weak references to some object types, like + # strings and tuples. For now, just don't cache them. + pass + + return resource_val + + +def show_cfg(resource_url, escape="##"): + """ + Write out a grammar file, ignoring escaped and empty lines. + + :type resource_url: str + :param resource_url: A URL specifying where the resource should be + loaded from. The default protocol is "nltk:", which searches + for the file in the the NLTK data package. + :type escape: str + :param escape: Prepended string that signals lines to be ignored + """ + resource_url = normalize_resource_url(resource_url) + resource_val = load(resource_url, format="text", cache=False) + lines = resource_val.splitlines() + for l in lines: + if l.startswith(escape): + continue + if re.match("^$", l): + continue + print(l) + + +def clear_cache(): + """ + Remove all objects from the resource cache. + :see: load() + """ + _resource_cache.clear() + + +def _open(resource_url): + """ + Helper function that returns an open file object for a resource, + given its resource URL. If the given resource URL uses the "nltk:" + protocol, or uses no protocol, then use ``nltk.data.find`` to find + its path, and open it with the given mode; if the resource URL + uses the 'file' protocol, then open the file with the given mode; + otherwise, delegate to ``urllib2.urlopen``. + + :type resource_url: str + :param resource_url: A URL specifying where the resource should be + loaded from. The default protocol is "nltk:", which searches + for the file in the the NLTK data package. + """ + resource_url = normalize_resource_url(resource_url) + protocol, path_ = split_resource_url(resource_url) + + if protocol is None or protocol.lower() == "nltk": + return find(path_, path + [""]).open() + elif protocol.lower() == "file": + # urllib might not use mode='rb', so handle this one ourselves: + return find(path_, [""]).open() + else: + return urlopen(resource_url) + + +###################################################################### +# Lazy Resource Loader +###################################################################### + + +class LazyLoader: + @py3_data + def __init__(self, _path): + self._path = _path + + def __load(self): + resource = load(self._path) + # This is where the magic happens! Transform ourselves into + # the object by modifying our own __dict__ and __class__ to + # match that of `resource`. + self.__dict__ = resource.__dict__ + self.__class__ = resource.__class__ + + def __getattr__(self, attr): + self.__load() + # This looks circular, but its not, since __load() changes our + # __class__ to something new: + return getattr(self, attr) + + def __repr__(self): + self.__load() + # This looks circular, but its not, since __load() changes our + # __class__ to something new: + return repr(self) + + +###################################################################### +# Open-On-Demand ZipFile +###################################################################### + + +class OpenOnDemandZipFile(zipfile.ZipFile): + """ + A subclass of ``zipfile.ZipFile`` that closes its file pointer + whenever it is not using it; and re-opens it when it needs to read + data from the zipfile. This is useful for reducing the number of + open file handles when many zip files are being accessed at once. + ``OpenOnDemandZipFile`` must be constructed from a filename, not a + file-like object (to allow re-opening). ``OpenOnDemandZipFile`` is + read-only (i.e. ``write()`` and ``writestr()`` are disabled. + """ + + @py3_data + def __init__(self, filename): + if not isinstance(filename, str): + raise TypeError("ReopenableZipFile filename must be a string") + zipfile.ZipFile.__init__(self, filename) + assert self.filename == filename + self.close() + # After closing a ZipFile object, the _fileRefCnt needs to be cleared + # for Python2and3 compatible code. + self._fileRefCnt = 0 + + def read(self, name): + assert self.fp is None + self.fp = open(self.filename, "rb") + value = zipfile.ZipFile.read(self, name) + # Ensure that _fileRefCnt needs to be set for Python2and3 compatible code. + # Since we only opened one file here, we add 1. + self._fileRefCnt += 1 + self.close() + return value + + def write(self, *args, **kwargs): + """:raise NotImplementedError: OpenOnDemandZipfile is read-only""" + raise NotImplementedError("OpenOnDemandZipfile is read-only") + + def writestr(self, *args, **kwargs): + """:raise NotImplementedError: OpenOnDemandZipfile is read-only""" + raise NotImplementedError("OpenOnDemandZipfile is read-only") + + def __repr__(self): + return repr("OpenOnDemandZipFile(%r)" % self.filename) + + +###################################################################### +# Seekable Unicode Stream Reader +###################################################################### + + +class SeekableUnicodeStreamReader: + """ + A stream reader that automatically encodes the source byte stream + into unicode (like ``codecs.StreamReader``); but still supports the + ``seek()`` and ``tell()`` operations correctly. This is in contrast + to ``codecs.StreamReader``, which provide *broken* ``seek()`` and + ``tell()`` methods. + + This class was motivated by ``StreamBackedCorpusView``, which + makes extensive use of ``seek()`` and ``tell()``, and needs to be + able to handle unicode-encoded files. + + Note: this class requires stateless decoders. To my knowledge, + this shouldn't cause a problem with any of python's builtin + unicode encodings. + """ + + DEBUG = True # : If true, then perform extra sanity checks. + + @py3_data + def __init__(self, stream, encoding, errors="strict"): + # Rewind the stream to its beginning. + stream.seek(0) + + self.stream = stream + """The underlying stream.""" + + self.encoding = encoding + """The name of the encoding that should be used to encode the + underlying stream.""" + + self.errors = errors + """The error mode that should be used when decoding data from + the underlying stream. Can be 'strict', 'ignore', or + 'replace'.""" + + self.decode = codecs.getdecoder(encoding) + """The function that is used to decode byte strings into + unicode strings.""" + + self.bytebuffer = b"" + """A buffer to use bytes that have been read but have not yet + been decoded. This is only used when the final bytes from + a read do not form a complete encoding for a character.""" + + self.linebuffer = None + """A buffer used by ``readline()`` to hold characters that have + been read, but have not yet been returned by ``read()`` or + ``readline()``. This buffer consists of a list of unicode + strings, where each string corresponds to a single line. + The final element of the list may or may not be a complete + line. Note that the existence of a linebuffer makes the + ``tell()`` operation more complex, because it must backtrack + to the beginning of the buffer to determine the correct + file position in the underlying byte stream.""" + + self._rewind_checkpoint = 0 + """The file position at which the most recent read on the + underlying stream began. This is used, together with + ``_rewind_numchars``, to backtrack to the beginning of + ``linebuffer`` (which is required by ``tell()``).""" + + self._rewind_numchars = None + """The number of characters that have been returned since the + read that started at ``_rewind_checkpoint``. This is used, + together with ``_rewind_checkpoint``, to backtrack to the + beginning of ``linebuffer`` (which is required by ``tell()``).""" + + self._bom = self._check_bom() + """The length of the byte order marker at the beginning of + the stream (or None for no byte order marker).""" + + # ///////////////////////////////////////////////////////////////// + # Read methods + # ///////////////////////////////////////////////////////////////// + + def read(self, size=None): + """ + Read up to ``size`` bytes, decode them using this reader's + encoding, and return the resulting unicode string. + + :param size: The maximum number of bytes to read. If not + specified, then read as many bytes as possible. + :type size: int + :rtype: unicode + """ + chars = self._read(size) + + # If linebuffer is not empty, then include it in the result + if self.linebuffer: + chars = "".join(self.linebuffer) + chars + self.linebuffer = None + self._rewind_numchars = None + + return chars + + def discard_line(self): + if self.linebuffer and len(self.linebuffer) > 1: + line = self.linebuffer.pop(0) + self._rewind_numchars += len(line) + else: + self.stream.readline() + + def readline(self, size=None): + """ + Read a line of text, decode it using this reader's encoding, + and return the resulting unicode string. + + :param size: The maximum number of bytes to read. If no + newline is encountered before ``size`` bytes have been read, + then the returned value may not be a complete line of text. + :type size: int + """ + # If we have a non-empty linebuffer, then return the first + # line from it. (Note that the last element of linebuffer may + # not be a complete line; so let _read() deal with it.) + if self.linebuffer and len(self.linebuffer) > 1: + line = self.linebuffer.pop(0) + self._rewind_numchars += len(line) + return line + + readsize = size or 72 + chars = "" + + # If there's a remaining incomplete line in the buffer, add it. + if self.linebuffer: + chars += self.linebuffer.pop() + self.linebuffer = None + + while True: + startpos = self.stream.tell() - len(self.bytebuffer) + new_chars = self._read(readsize) + + # If we're at a '\r', then read one extra character, since + # it might be a '\n', to get the proper line ending. + if new_chars and new_chars.endswith("\r"): + new_chars += self._read(1) + + chars += new_chars + lines = chars.splitlines(True) + if len(lines) > 1: + line = lines[0] + self.linebuffer = lines[1:] + self._rewind_numchars = len(new_chars) - (len(chars) - len(line)) + self._rewind_checkpoint = startpos + break + elif len(lines) == 1: + line0withend = lines[0] + line0withoutend = lines[0].splitlines(False)[0] + if line0withend != line0withoutend: # complete line + line = line0withend + break + + if not new_chars or size is not None: + line = chars + break + + # Read successively larger blocks of text. + if readsize < 8000: + readsize *= 2 + + return line + + def readlines(self, sizehint=None, keepends=True): + """ + Read this file's contents, decode them using this reader's + encoding, and return it as a list of unicode lines. + + :rtype: list(unicode) + :param sizehint: Ignored. + :param keepends: If false, then strip newlines. + """ + return self.read().splitlines(keepends) + + def next(self): + """Return the next decoded line from the underlying stream.""" + line = self.readline() + if line: + return line + else: + raise StopIteration + + def __next__(self): + return self.next() + + def __iter__(self): + """Return self""" + return self + + def __del__(self): + # let garbage collector deal with still opened streams + if not self.closed: + self.close() + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def xreadlines(self): + """Return self""" + return self + + # ///////////////////////////////////////////////////////////////// + # Pass-through methods & properties + # ///////////////////////////////////////////////////////////////// + + @property + def closed(self): + """True if the underlying stream is closed.""" + return self.stream.closed + + @property + def name(self): + """The name of the underlying stream.""" + return self.stream.name + + @property + def mode(self): + """The mode of the underlying stream.""" + return self.stream.mode + + def close(self): + """ + Close the underlying stream. + """ + self.stream.close() + + # ///////////////////////////////////////////////////////////////// + # Seek and tell + # ///////////////////////////////////////////////////////////////// + + def seek(self, offset, whence=0): + """ + Move the stream to a new file position. If the reader is + maintaining any buffers, then they will be cleared. + + :param offset: A byte count offset. + :param whence: If 0, then the offset is from the start of the file + (offset should be positive), if 1, then the offset is from the + current position (offset may be positive or negative); and if 2, + then the offset is from the end of the file (offset should + typically be negative). + """ + if whence == 1: + raise ValueError( + "Relative seek is not supported for " + "SeekableUnicodeStreamReader -- consider " + "using char_seek_forward() instead." + ) + self.stream.seek(offset, whence) + self.linebuffer = None + self.bytebuffer = b"" + self._rewind_numchars = None + self._rewind_checkpoint = self.stream.tell() + + def char_seek_forward(self, offset): + """ + Move the read pointer forward by ``offset`` characters. + """ + if offset < 0: + raise ValueError("Negative offsets are not supported") + # Clear all buffers. + self.seek(self.tell()) + # Perform the seek operation. + self._char_seek_forward(offset) + + def _char_seek_forward(self, offset, est_bytes=None): + """ + Move the file position forward by ``offset`` characters, + ignoring all buffers. + + :param est_bytes: A hint, giving an estimate of the number of + bytes that will be needed to move forward by ``offset`` chars. + Defaults to ``offset``. + """ + if est_bytes is None: + est_bytes = offset + bytes = b"" + + while True: + # Read in a block of bytes. + newbytes = self.stream.read(est_bytes - len(bytes)) + bytes += newbytes + + # Decode the bytes to characters. + chars, bytes_decoded = self._incr_decode(bytes) + + # If we got the right number of characters, then seek + # backwards over any truncated characters, and return. + if len(chars) == offset: + self.stream.seek(-len(bytes) + bytes_decoded, 1) + return + + # If we went too far, then we can back-up until we get it + # right, using the bytes we've already read. + if len(chars) > offset: + while len(chars) > offset: + # Assume at least one byte/char. + est_bytes += offset - len(chars) + chars, bytes_decoded = self._incr_decode(bytes[:est_bytes]) + self.stream.seek(-len(bytes) + bytes_decoded, 1) + return + + # Otherwise, we haven't read enough bytes yet; loop again. + est_bytes += offset - len(chars) + + def tell(self): + """ + Return the current file position on the underlying byte + stream. If this reader is maintaining any buffers, then the + returned file position will be the position of the beginning + of those buffers. + """ + # If nothing's buffered, then just return our current filepos: + if self.linebuffer is None: + return self.stream.tell() - len(self.bytebuffer) + + # Otherwise, we'll need to backtrack the filepos until we + # reach the beginning of the buffer. + + # Store our original file position, so we can return here. + orig_filepos = self.stream.tell() + + # Calculate an estimate of where we think the newline is. + bytes_read = (orig_filepos - len(self.bytebuffer)) - self._rewind_checkpoint + buf_size = sum(len(line) for line in self.linebuffer) + est_bytes = int( + bytes_read * self._rewind_numchars / (self._rewind_numchars + buf_size) + ) + + self.stream.seek(self._rewind_checkpoint) + self._char_seek_forward(self._rewind_numchars, est_bytes) + filepos = self.stream.tell() + + # Sanity check + if self.DEBUG: + self.stream.seek(filepos) + check1 = self._incr_decode(self.stream.read(50))[0] + check2 = "".join(self.linebuffer) + assert check1.startswith(check2) or check2.startswith(check1) + + # Return to our original filepos (so we don't have to throw + # out our buffer.) + self.stream.seek(orig_filepos) + + # Return the calculated filepos + return filepos + + # ///////////////////////////////////////////////////////////////// + # Helper methods + # ///////////////////////////////////////////////////////////////// + + def _read(self, size=None): + """ + Read up to ``size`` bytes from the underlying stream, decode + them using this reader's encoding, and return the resulting + unicode string. ``linebuffer`` is not included in the result. + """ + if size == 0: + return "" + + # Skip past the byte order marker, if present. + if self._bom and self.stream.tell() == 0: + self.stream.read(self._bom) + + # Read the requested number of bytes. + if size is None: + new_bytes = self.stream.read() + else: + new_bytes = self.stream.read(size) + bytes = self.bytebuffer + new_bytes + + # Decode the bytes into unicode characters + chars, bytes_decoded = self._incr_decode(bytes) + + # If we got bytes but couldn't decode any, then read further. + if (size is not None) and (not chars) and (len(new_bytes) > 0): + while not chars: + new_bytes = self.stream.read(1) + if not new_bytes: + break # end of file. + bytes += new_bytes + chars, bytes_decoded = self._incr_decode(bytes) + + # Record any bytes we didn't consume. + self.bytebuffer = bytes[bytes_decoded:] + + # Return the result + return chars + + def _incr_decode(self, bytes): + """ + Decode the given byte string into a unicode string, using this + reader's encoding. If an exception is encountered that + appears to be caused by a truncation error, then just decode + the byte string without the bytes that cause the trunctaion + error. + + Return a tuple ``(chars, num_consumed)``, where ``chars`` is + the decoded unicode string, and ``num_consumed`` is the + number of bytes that were consumed. + """ + while True: + try: + return self.decode(bytes, "strict") + except UnicodeDecodeError as exc: + # If the exception occurs at the end of the string, + # then assume that it's a truncation error. + if exc.end == len(bytes): + return self.decode(bytes[: exc.start], self.errors) + + # Otherwise, if we're being strict, then raise it. + elif self.errors == "strict": + raise + + # If we're not strict, then re-process it with our + # errors setting. This *may* raise an exception. + else: + return self.decode(bytes, self.errors) + + _BOM_TABLE = { + "utf8": [(codecs.BOM_UTF8, None)], + "utf16": [(codecs.BOM_UTF16_LE, "utf16-le"), (codecs.BOM_UTF16_BE, "utf16-be")], + "utf16le": [(codecs.BOM_UTF16_LE, None)], + "utf16be": [(codecs.BOM_UTF16_BE, None)], + "utf32": [(codecs.BOM_UTF32_LE, "utf32-le"), (codecs.BOM_UTF32_BE, "utf32-be")], + "utf32le": [(codecs.BOM_UTF32_LE, None)], + "utf32be": [(codecs.BOM_UTF32_BE, None)], + } + + def _check_bom(self): + # Normalize our encoding name + enc = re.sub("[ -]", "", self.encoding.lower()) + + # Look up our encoding in the BOM table. + bom_info = self._BOM_TABLE.get(enc) + + if bom_info: + # Read a prefix, to check against the BOM(s) + bytes = self.stream.read(16) + self.stream.seek(0) + + # Check for each possible BOM. + for (bom, new_encoding) in bom_info: + if bytes.startswith(bom): + if new_encoding: + self.encoding = new_encoding + return len(bom) + + return None + + +__all__ = [ + "path", + "PathPointer", + "FileSystemPathPointer", + "BufferedGzipFile", + "GzipFileSystemPathPointer", + "GzipFileSystemPathPointer", + "find", + "retrieve", + "FORMATS", + "AUTO_FORMATS", + "load", + "show_cfg", + "clear_cache", + "LazyLoader", + "OpenOnDemandZipFile", + "GzipFileSystemPathPointer", + "SeekableUnicodeStreamReader", +] diff --git a/lib/python3.10/site-packages/nltk/decorators.py b/lib/python3.10/site-packages/nltk/decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..3a0fae1852afd47a2290b41ce94843aca36aa05f --- /dev/null +++ b/lib/python3.10/site-packages/nltk/decorators.py @@ -0,0 +1,251 @@ +""" +Decorator module by Michele Simionato +Copyright Michele Simionato, distributed under the terms of the BSD License (see below). +http://www.phyast.pitt.edu/~micheles/python/documentation.html + +Included in NLTK for its support of a nice memoization decorator. +""" + +__docformat__ = "restructuredtext en" + +## The basic trick is to generate the source code for the decorated function +## with the right signature and to evaluate it. +## Uncomment the statement 'print >> sys.stderr, func_src' in _decorator +## to understand what is going on. + +__all__ = ["decorator", "new_wrapper", "getinfo"] + +import sys + +# Hack to keep NLTK's "tokenize" module from colliding with the "tokenize" in +# the Python standard library. +OLD_SYS_PATH = sys.path[:] +sys.path = [p for p in sys.path if p and "nltk" not in str(p)] +import inspect + +sys.path = OLD_SYS_PATH + + +def __legacysignature(signature): + """ + For retrocompatibility reasons, we don't use a standard Signature. + Instead, we use the string generated by this method. + Basically, from a Signature we create a string and remove the default values. + """ + listsignature = str(signature)[1:-1].split(",") + for counter, param in enumerate(listsignature): + if param.count("=") > 0: + listsignature[counter] = param[0 : param.index("=")].strip() + else: + listsignature[counter] = param.strip() + return ", ".join(listsignature) + + +def getinfo(func): + """ + Returns an info dictionary containing: + - name (the name of the function : str) + - argnames (the names of the arguments : list) + - defaults (the values of the default arguments : tuple) + - signature (the signature : str) + - fullsignature (the full signature : Signature) + - doc (the docstring : str) + - module (the module name : str) + - dict (the function __dict__ : str) + + >>> def f(self, x=1, y=2, *args, **kw): pass + + >>> info = getinfo(f) + + >>> info["name"] + 'f' + >>> info["argnames"] + ['self', 'x', 'y', 'args', 'kw'] + + >>> info["defaults"] + (1, 2) + + >>> info["signature"] + 'self, x, y, *args, **kw' + + >>> info["fullsignature"] + + """ + assert inspect.ismethod(func) or inspect.isfunction(func) + argspec = inspect.getfullargspec(func) + regargs, varargs, varkwargs = argspec[:3] + argnames = list(regargs) + if varargs: + argnames.append(varargs) + if varkwargs: + argnames.append(varkwargs) + fullsignature = inspect.signature(func) + # Convert Signature to str + signature = __legacysignature(fullsignature) + + # pypy compatibility + if hasattr(func, "__closure__"): + _closure = func.__closure__ + _globals = func.__globals__ + else: + _closure = func.func_closure + _globals = func.func_globals + + return dict( + name=func.__name__, + argnames=argnames, + signature=signature, + fullsignature=fullsignature, + defaults=func.__defaults__, + doc=func.__doc__, + module=func.__module__, + dict=func.__dict__, + globals=_globals, + closure=_closure, + ) + + +def update_wrapper(wrapper, model, infodict=None): + "akin to functools.update_wrapper" + infodict = infodict or getinfo(model) + wrapper.__name__ = infodict["name"] + wrapper.__doc__ = infodict["doc"] + wrapper.__module__ = infodict["module"] + wrapper.__dict__.update(infodict["dict"]) + wrapper.__defaults__ = infodict["defaults"] + wrapper.undecorated = model + return wrapper + + +def new_wrapper(wrapper, model): + """ + An improvement over functools.update_wrapper. The wrapper is a generic + callable object. It works by generating a copy of the wrapper with the + right signature and by updating the copy, not the original. + Moreovoer, 'model' can be a dictionary with keys 'name', 'doc', 'module', + 'dict', 'defaults'. + """ + if isinstance(model, dict): + infodict = model + else: # assume model is a function + infodict = getinfo(model) + assert ( + not "_wrapper_" in infodict["argnames"] + ), '"_wrapper_" is a reserved argument name!' + src = "lambda %(signature)s: _wrapper_(%(signature)s)" % infodict + funcopy = eval(src, dict(_wrapper_=wrapper)) + return update_wrapper(funcopy, model, infodict) + + +# helper used in decorator_factory +def __call__(self, func): + return new_wrapper(lambda *a, **k: self.call(func, *a, **k), func) + + +def decorator_factory(cls): + """ + Take a class with a ``.caller`` method and return a callable decorator + object. It works by adding a suitable __call__ method to the class; + it raises a TypeError if the class already has a nontrivial __call__ + method. + """ + attrs = set(dir(cls)) + if "__call__" in attrs: + raise TypeError( + "You cannot decorate a class with a nontrivial " "__call__ method" + ) + if "call" not in attrs: + raise TypeError("You cannot decorate a class without a " ".call method") + cls.__call__ = __call__ + return cls + + +def decorator(caller): + """ + General purpose decorator factory: takes a caller function as + input and returns a decorator with the same attributes. + A caller function is any function like this:: + + def caller(func, *args, **kw): + # do something + return func(*args, **kw) + + Here is an example of usage: + + >>> @decorator + ... def chatty(f, *args, **kw): + ... print("Calling %r" % f.__name__) + ... return f(*args, **kw) + + >>> chatty.__name__ + 'chatty' + + >>> @chatty + ... def f(): pass + ... + >>> f() + Calling 'f' + + decorator can also take in input a class with a .caller method; in this + case it converts the class into a factory of callable decorator objects. + See the documentation for an example. + """ + if inspect.isclass(caller): + return decorator_factory(caller) + + def _decorator(func): # the real meat is here + infodict = getinfo(func) + argnames = infodict["argnames"] + assert not ( + "_call_" in argnames or "_func_" in argnames + ), "You cannot use _call_ or _func_ as argument names!" + src = "lambda %(signature)s: _call_(_func_, %(signature)s)" % infodict + # import sys; print >> sys.stderr, src # for debugging purposes + dec_func = eval(src, dict(_func_=func, _call_=caller)) + return update_wrapper(dec_func, func, infodict) + + return update_wrapper(_decorator, caller) + + +def getattr_(obj, name, default_thunk): + "Similar to .setdefault in dictionaries." + try: + return getattr(obj, name) + except AttributeError: + default = default_thunk() + setattr(obj, name, default) + return default + + +@decorator +def memoize(func, *args): + dic = getattr_(func, "memoize_dic", dict) + # memoize_dic is created at the first call + if args in dic: + return dic[args] + result = func(*args) + dic[args] = result + return result + + +########################## LEGALESE ############################### + +## Redistributions of source code must retain the above copyright +## notice, this list of conditions and the following disclaimer. +## Redistributions in bytecode form must reproduce the above copyright +## notice, this list of conditions and the following disclaimer in +## the documentation and/or other materials provided with the +## distribution. + +## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +## HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +## INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +## OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +## TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +## USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +## DAMAGE. diff --git a/lib/python3.10/site-packages/nltk/downloader.py b/lib/python3.10/site-packages/nltk/downloader.py new file mode 100644 index 0000000000000000000000000000000000000000..71519238755062c698a1d82ffa0984b3ccb5ba92 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/downloader.py @@ -0,0 +1,2559 @@ +# Natural Language Toolkit: Corpus & Model Downloader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +The NLTK corpus and module downloader. This module defines several +interfaces which can be used to download corpora, models, and other +data packages that can be used with NLTK. + +Downloading Packages +==================== +If called with no arguments, ``download()`` will display an interactive +interface which can be used to download and install new packages. +If Tkinter is available, then a graphical interface will be shown, +otherwise a simple text interface will be provided. + +Individual packages can be downloaded by calling the ``download()`` +function with a single argument, giving the package identifier for the +package that should be downloaded: + + >>> download('treebank') # doctest: +SKIP + [nltk_data] Downloading package 'treebank'... + [nltk_data] Unzipping corpora/treebank.zip. + +NLTK also provides a number of \"package collections\", consisting of +a group of related packages. To download all packages in a +colleciton, simply call ``download()`` with the collection's +identifier: + + >>> download('all-corpora') # doctest: +SKIP + [nltk_data] Downloading package 'abc'... + [nltk_data] Unzipping corpora/abc.zip. + [nltk_data] Downloading package 'alpino'... + [nltk_data] Unzipping corpora/alpino.zip. + ... + [nltk_data] Downloading package 'words'... + [nltk_data] Unzipping corpora/words.zip. + +Download Directory +================== +By default, packages are installed in either a system-wide directory +(if Python has sufficient access to write to it); or in the current +user's home directory. However, the ``download_dir`` argument may be +used to specify a different installation target, if desired. + +See ``Downloader.default_download_dir()`` for more a detailed +description of how the default download directory is chosen. + +NLTK Download Server +==================== +Before downloading any packages, the corpus and module downloader +contacts the NLTK download server, to retrieve an index file +describing the available packages. By default, this index file is +loaded from ``https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml``. +If necessary, it is possible to create a new ``Downloader`` object, +specifying a different URL for the package index file. + +Usage:: + + python nltk/downloader.py [-d DATADIR] [-q] [-f] [-k] PACKAGE_IDS + +or:: + + python -m nltk.downloader [-d DATADIR] [-q] [-f] [-k] PACKAGE_IDS +""" +# ---------------------------------------------------------------------- + +""" + + 0 1 2 3 +[label][----][label][----] +[column ][column ] + +Notes +===== +Handling data files.. Some questions: + +* Should the data files be kept zipped or unzipped? I say zipped. + +* Should the data files be kept in svn at all? Advantages: history; + automatic version numbers; 'svn up' could be used rather than the + downloader to update the corpora. Disadvantages: they're big, + which makes working from svn a bit of a pain. And we're planning + to potentially make them much bigger. I don't think we want + people to have to download 400MB corpora just to use nltk from svn. + +* Compromise: keep the data files in trunk/data rather than in + trunk/nltk. That way you can check them out in svn if you want + to; but you don't need to, and you can use the downloader instead. + +* Also: keep models in mind. When we change the code, we'd + potentially like the models to get updated. This could require a + little thought. + +* So.. let's assume we have a trunk/data directory, containing a bunch + of packages. The packages should be kept as zip files, because we + really shouldn't be editing them much (well -- we may edit models + more, but they tend to be binary-ish files anyway, where diffs + aren't that helpful). So we'll have trunk/data, with a bunch of + files like abc.zip and treebank.zip and propbank.zip. For each + package we could also have eg treebank.xml and propbank.xml, + describing the contents of the package (name, copyright, license, + etc). Collections would also have .xml files. Finally, we would + pull all these together to form a single index.xml file. Some + directory structure wouldn't hurt. So how about:: + + /trunk/data/ ....................... root of data svn + index.xml ........................ main index file + src/ ............................. python scripts + packages/ ........................ dir for packages + corpora/ ....................... zip & xml files for corpora + grammars/ ...................... zip & xml files for grammars + taggers/ ....................... zip & xml files for taggers + tokenizers/ .................... zip & xml files for tokenizers + etc. + collections/ ..................... xml files for collections + + Where the root (/trunk/data) would contain a makefile; and src/ + would contain a script to update the info.xml file. It could also + contain scripts to rebuild some of the various model files. The + script that builds index.xml should probably check that each zip + file expands entirely into a single subdir, whose name matches the + package's uid. + +Changes I need to make: + - in index: change "size" to "filesize" or "compressed-size" + - in index: add "unzipped-size" + - when checking status: check both compressed & uncompressed size. + uncompressed size is important to make sure we detect a problem + if something got partially unzipped. define new status values + to differentiate stale vs corrupt vs corruptly-uncompressed?? + (we shouldn't need to re-download the file if the zip file is ok + but it didn't get uncompressed fully.) + - add other fields to the index: author, license, copyright, contact, + etc. + +the current grammars/ package would become a single new package (eg +toy-grammars or book-grammars). + +xml file should have: + - authorship info + - license info + - copyright info + - contact info + - info about what type of data/annotation it contains? + - recommended corpus reader? + +collections can contain other collections. they can also contain +multiple package types (corpora & models). Have a single 'basics' +package that includes everything we talk about in the book? + +n.b.: there will have to be a fallback to the punkt tokenizer, in case +they didn't download that model. + +default: unzip or not? + +""" +import functools +import itertools +import os +import shutil +import subprocess +import sys +import textwrap +import threading +import time +import warnings +import zipfile +from hashlib import md5 +from xml.etree import ElementTree + +try: + TKINTER = True + from tkinter import Button, Canvas, Entry, Frame, IntVar, Label, Menu, TclError, Tk + from tkinter.messagebox import showerror + + from nltk.draw.table import Table + from nltk.draw.util import ShowText +except ImportError: + TKINTER = False + TclError = ValueError + +from urllib.error import HTTPError, URLError +from urllib.request import urlopen + +import nltk + +# urllib2 = nltk.internals.import_from_stdlib('urllib2') + + +###################################################################### +# Directory entry objects (from the data server's index file) +###################################################################### + + +class Package: + """ + A directory entry for a downloadable package. These entries are + extracted from the XML index file that is downloaded by + ``Downloader``. Each package consists of a single file; but if + that file is a zip file, then it can be automatically decompressed + when the package is installed. + """ + + def __init__( + self, + id, + url, + name=None, + subdir="", + size=None, + unzipped_size=None, + checksum=None, + svn_revision=None, + copyright="Unknown", + contact="Unknown", + license="Unknown", + author="Unknown", + unzip=True, + **kw, + ): + self.id = id + """A unique identifier for this package.""" + + self.name = name or id + """A string name for this package.""" + + self.subdir = subdir + """The subdirectory where this package should be installed. + E.g., ``'corpora'`` or ``'taggers'``.""" + + self.url = url + """A URL that can be used to download this package's file.""" + + self.size = int(size) + """The filesize (in bytes) of the package file.""" + + self.unzipped_size = int(unzipped_size) + """The total filesize of the files contained in the package's + zipfile.""" + + self.checksum = checksum + """The MD-5 checksum of the package file.""" + + self.svn_revision = svn_revision + """A subversion revision number for this package.""" + + self.copyright = copyright + """Copyright holder for this package.""" + + self.contact = contact + """Name & email of the person who should be contacted with + questions about this package.""" + + self.license = license + """License information for this package.""" + + self.author = author + """Author of this package.""" + + ext = os.path.splitext(url.split("/")[-1])[1] + self.filename = os.path.join(subdir, id + ext) + """The filename that should be used for this package's file. It + is formed by joining ``self.subdir`` with ``self.id``, and + using the same extension as ``url``.""" + + self.unzip = bool(int(unzip)) # '0' or '1' + """A flag indicating whether this corpus should be unzipped by + default.""" + + # Include any other attributes provided by the XML file. + self.__dict__.update(kw) + + @staticmethod + def fromxml(xml): + if isinstance(xml, str): + xml = ElementTree.parse(xml) + for key in xml.attrib: + xml.attrib[key] = str(xml.attrib[key]) + return Package(**xml.attrib) + + def __lt__(self, other): + return self.id < other.id + + def __repr__(self): + return "" % self.id + + +class Collection: + """ + A directory entry for a collection of downloadable packages. + These entries are extracted from the XML index file that is + downloaded by ``Downloader``. + """ + + def __init__(self, id, children, name=None, **kw): + self.id = id + """A unique identifier for this collection.""" + + self.name = name or id + """A string name for this collection.""" + + self.children = children + """A list of the ``Collections`` or ``Packages`` directly + contained by this collection.""" + + self.packages = None + """A list of ``Packages`` contained by this collection or any + collections it recursively contains.""" + + # Include any other attributes provided by the XML file. + self.__dict__.update(kw) + + @staticmethod + def fromxml(xml): + if isinstance(xml, str): + xml = ElementTree.parse(xml) + for key in xml.attrib: + xml.attrib[key] = str(xml.attrib[key]) + children = [child.get("ref") for child in xml.findall("item")] + return Collection(children=children, **xml.attrib) + + def __lt__(self, other): + return self.id < other.id + + def __repr__(self): + return "" % self.id + + +###################################################################### +# Message Passing Objects +###################################################################### + + +class DownloaderMessage: + """A status message object, used by ``incr_download`` to + communicate its progress.""" + + +class StartCollectionMessage(DownloaderMessage): + """Data server has started working on a collection of packages.""" + + def __init__(self, collection): + self.collection = collection + + +class FinishCollectionMessage(DownloaderMessage): + """Data server has finished working on a collection of packages.""" + + def __init__(self, collection): + self.collection = collection + + +class StartPackageMessage(DownloaderMessage): + """Data server has started working on a package.""" + + def __init__(self, package): + self.package = package + + +class FinishPackageMessage(DownloaderMessage): + """Data server has finished working on a package.""" + + def __init__(self, package): + self.package = package + + +class StartDownloadMessage(DownloaderMessage): + """Data server has started downloading a package.""" + + def __init__(self, package): + self.package = package + + +class FinishDownloadMessage(DownloaderMessage): + """Data server has finished downloading a package.""" + + def __init__(self, package): + self.package = package + + +class StartUnzipMessage(DownloaderMessage): + """Data server has started unzipping a package.""" + + def __init__(self, package): + self.package = package + + +class FinishUnzipMessage(DownloaderMessage): + """Data server has finished unzipping a package.""" + + def __init__(self, package): + self.package = package + + +class UpToDateMessage(DownloaderMessage): + """The package download file is already up-to-date""" + + def __init__(self, package): + self.package = package + + +class StaleMessage(DownloaderMessage): + """The package download file is out-of-date or corrupt""" + + def __init__(self, package): + self.package = package + + +class ErrorMessage(DownloaderMessage): + """Data server encountered an error""" + + def __init__(self, package, message): + self.package = package + if isinstance(message, Exception): + self.message = str(message) + else: + self.message = message + + +class ProgressMessage(DownloaderMessage): + """Indicates how much progress the data server has made""" + + def __init__(self, progress): + self.progress = progress + + +class SelectDownloadDirMessage(DownloaderMessage): + """Indicates what download directory the data server is using""" + + def __init__(self, download_dir): + self.download_dir = download_dir + + +###################################################################### +# NLTK Data Server +###################################################################### + + +class Downloader: + """ + A class used to access the NLTK data server, which can be used to + download corpora and other data packages. + """ + + # ///////////////////////////////////////////////////////////////// + # Configuration + # ///////////////////////////////////////////////////////////////// + + INDEX_TIMEOUT = 60 * 60 # 1 hour + """The amount of time after which the cached copy of the data + server index will be considered 'stale,' and will be + re-downloaded.""" + + DEFAULT_URL = "https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml" + """The default URL for the NLTK data server's index. An + alternative URL can be specified when creating a new + ``Downloader`` object.""" + + # ///////////////////////////////////////////////////////////////// + # Status Constants + # ///////////////////////////////////////////////////////////////// + + INSTALLED = "installed" + """A status string indicating that a package or collection is + installed and up-to-date.""" + NOT_INSTALLED = "not installed" + """A status string indicating that a package or collection is + not installed.""" + STALE = "out of date" + """A status string indicating that a package or collection is + corrupt or out-of-date.""" + PARTIAL = "partial" + """A status string indicating that a collection is partially + installed (i.e., only some of its packages are installed.)""" + + # ///////////////////////////////////////////////////////////////// + # Constructor + # ///////////////////////////////////////////////////////////////// + + def __init__(self, server_index_url=None, download_dir=None): + self._url = server_index_url or self.DEFAULT_URL + """The URL for the data server's index file.""" + + self._collections = {} + """Dictionary from collection identifier to ``Collection``""" + + self._packages = {} + """Dictionary from package identifier to ``Package``""" + + self._download_dir = download_dir + """The default directory to which packages will be downloaded.""" + + self._index = None + """The XML index file downloaded from the data server""" + + self._index_timestamp = None + """Time at which ``self._index`` was downloaded. If it is more + than ``INDEX_TIMEOUT`` seconds old, it will be re-downloaded.""" + + self._status_cache = {} + """Dictionary from package/collection identifier to status + string (``INSTALLED``, ``NOT_INSTALLED``, ``STALE``, or + ``PARTIAL``). Cache is used for packages only, not + collections.""" + + self._errors = None + """Flag for telling if all packages got successfully downloaded or not.""" + + # decide where we're going to save things to. + if self._download_dir is None: + self._download_dir = self.default_download_dir() + + # ///////////////////////////////////////////////////////////////// + # Information + # ///////////////////////////////////////////////////////////////// + + def list( + self, + download_dir=None, + show_packages=True, + show_collections=True, + header=True, + more_prompt=False, + skip_installed=False, + ): + lines = 0 # for more_prompt + if download_dir is None: + download_dir = self._download_dir + print("Using default data directory (%s)" % download_dir) + if header: + print("=" * (26 + len(self._url))) + print(" Data server index for <%s>" % self._url) + print("=" * (26 + len(self._url))) + lines += 3 # for more_prompt + stale = partial = False + + categories = [] + if show_packages: + categories.append("packages") + if show_collections: + categories.append("collections") + for category in categories: + print("%s:" % category.capitalize()) + lines += 1 # for more_prompt + for info in sorted(getattr(self, category)(), key=str): + status = self.status(info, download_dir) + if status == self.INSTALLED and skip_installed: + continue + if status == self.STALE: + stale = True + if status == self.PARTIAL: + partial = True + prefix = { + self.INSTALLED: "*", + self.STALE: "-", + self.PARTIAL: "P", + self.NOT_INSTALLED: " ", + }[status] + name = textwrap.fill( + "-" * 27 + (info.name or info.id), 75, subsequent_indent=27 * " " + )[27:] + print(" [{}] {} {}".format(prefix, info.id.ljust(20, "."), name)) + lines += len(name.split("\n")) # for more_prompt + if more_prompt and lines > 20: + user_input = input("Hit Enter to continue: ") + if user_input.lower() in ("x", "q"): + return + lines = 0 + print() + msg = "([*] marks installed packages" + if stale: + msg += "; [-] marks out-of-date or corrupt packages" + if partial: + msg += "; [P] marks partially installed collections" + print(textwrap.fill(msg + ")", subsequent_indent=" ", width=76)) + + def packages(self): + self._update_index() + return self._packages.values() + + def corpora(self): + self._update_index() + return [pkg for (id, pkg) in self._packages.items() if pkg.subdir == "corpora"] + + def models(self): + self._update_index() + return [pkg for (id, pkg) in self._packages.items() if pkg.subdir != "corpora"] + + def collections(self): + self._update_index() + return self._collections.values() + + # ///////////////////////////////////////////////////////////////// + # Downloading + # ///////////////////////////////////////////////////////////////// + + def _info_or_id(self, info_or_id): + if isinstance(info_or_id, str): + return self.info(info_or_id) + else: + return info_or_id + + # [xx] When during downloading is it 'safe' to abort? Only unsafe + # time is *during* an unzip -- we don't want to leave a + # partially-unzipped corpus in place because we wouldn't notice + # it. But if we had the exact total size of the unzipped corpus, + # then that would be fine. Then we could abort anytime we want! + # So this is really what we should do. That way the threaded + # downloader in the gui can just kill the download thread anytime + # it wants. + + def incr_download(self, info_or_id, download_dir=None, force=False): + # If they didn't specify a download_dir, then use the default one. + if download_dir is None: + download_dir = self._download_dir + yield SelectDownloadDirMessage(download_dir) + + # If they gave us a list of ids, then download each one. + if isinstance(info_or_id, (list, tuple)): + yield from self._download_list(info_or_id, download_dir, force) + return + + # Look up the requested collection or package. + try: + info = self._info_or_id(info_or_id) + except (OSError, ValueError) as e: + yield ErrorMessage(None, f"Error loading {info_or_id}: {e}") + return + + # Handle collections. + if isinstance(info, Collection): + yield StartCollectionMessage(info) + yield from self.incr_download(info.children, download_dir, force) + yield FinishCollectionMessage(info) + + # Handle Packages (delegate to a helper function). + else: + yield from self._download_package(info, download_dir, force) + + def _num_packages(self, item): + if isinstance(item, Package): + return 1 + else: + return len(item.packages) + + def _download_list(self, items, download_dir, force): + # Look up the requested items. + for i in range(len(items)): + try: + items[i] = self._info_or_id(items[i]) + except (OSError, ValueError) as e: + yield ErrorMessage(items[i], e) + return + + # Download each item, re-scaling their progress. + num_packages = sum(self._num_packages(item) for item in items) + progress = 0 + for i, item in enumerate(items): + if isinstance(item, Package): + delta = 1.0 / num_packages + else: + delta = len(item.packages) / num_packages + for msg in self.incr_download(item, download_dir, force): + if isinstance(msg, ProgressMessage): + yield ProgressMessage(progress + msg.progress * delta) + else: + yield msg + + progress += 100 * delta + + def _download_package(self, info, download_dir, force): + yield StartPackageMessage(info) + yield ProgressMessage(0) + + # Do we already have the current version? + status = self.status(info, download_dir) + if not force and status == self.INSTALLED: + yield UpToDateMessage(info) + yield ProgressMessage(100) + yield FinishPackageMessage(info) + return + + # Remove the package from our status cache + self._status_cache.pop(info.id, None) + + # Check for (and remove) any old/stale version. + filepath = os.path.join(download_dir, info.filename) + if os.path.exists(filepath): + if status == self.STALE: + yield StaleMessage(info) + os.remove(filepath) + + # Ensure the download_dir exists + if not os.path.exists(download_dir): + os.makedirs(download_dir) + if not os.path.exists(os.path.join(download_dir, info.subdir)): + os.makedirs(os.path.join(download_dir, info.subdir)) + + # Download the file. This will raise an IOError if the url + # is not found. + yield StartDownloadMessage(info) + yield ProgressMessage(5) + try: + infile = urlopen(info.url) + with open(filepath, "wb") as outfile: + num_blocks = max(1, info.size / (1024 * 16)) + for block in itertools.count(): + s = infile.read(1024 * 16) # 16k blocks. + outfile.write(s) + if not s: + break + if block % 2 == 0: # how often? + yield ProgressMessage(min(80, 5 + 75 * (block / num_blocks))) + infile.close() + except OSError as e: + yield ErrorMessage( + info, + "Error downloading %r from <%s>:" "\n %s" % (info.id, info.url, e), + ) + return + yield FinishDownloadMessage(info) + yield ProgressMessage(80) + + # If it's a zipfile, uncompress it. + if info.filename.endswith(".zip"): + zipdir = os.path.join(download_dir, info.subdir) + # Unzip if we're unzipping by default; *or* if it's already + # been unzipped (presumably a previous version). + if info.unzip or os.path.exists(os.path.join(zipdir, info.id)): + yield StartUnzipMessage(info) + for msg in _unzip_iter(filepath, zipdir, verbose=False): + # Somewhat of a hack, but we need a proper package reference + msg.package = info + yield msg + yield FinishUnzipMessage(info) + + yield FinishPackageMessage(info) + + def download( + self, + info_or_id=None, + download_dir=None, + quiet=False, + force=False, + prefix="[nltk_data] ", + halt_on_error=True, + raise_on_error=False, + print_error_to=sys.stderr, + ): + + print_to = functools.partial(print, file=print_error_to) + # If no info or id is given, then use the interactive shell. + if info_or_id is None: + # [xx] hmm -- changing self._download_dir here seems like + # the wrong thing to do. Maybe the _interactive_download + # function should make a new copy of self to use? + if download_dir is not None: + self._download_dir = download_dir + self._interactive_download() + return True + + else: + # Define a helper function for displaying output: + def show(s, prefix2=""): + print_to( + textwrap.fill( + s, + initial_indent=prefix + prefix2, + subsequent_indent=prefix + prefix2 + " " * 4, + ) + ) + + for msg in self.incr_download(info_or_id, download_dir, force): + # Error messages + if isinstance(msg, ErrorMessage): + show(msg.message) + if raise_on_error: + raise ValueError(msg.message) + if halt_on_error: + return False + self._errors = True + if not quiet: + print_to("Error installing package. Retry? [n/y/e]") + choice = input().strip() + if choice in ["y", "Y"]: + if not self.download( + msg.package.id, + download_dir, + quiet, + force, + prefix, + halt_on_error, + raise_on_error, + ): + return False + elif choice in ["e", "E"]: + return False + + # All other messages + if not quiet: + # Collection downloading messages: + if isinstance(msg, StartCollectionMessage): + show("Downloading collection %r" % msg.collection.id) + prefix += " | " + print_to(prefix) + elif isinstance(msg, FinishCollectionMessage): + print_to(prefix) + prefix = prefix[:-4] + if self._errors: + show( + "Downloaded collection %r with errors" + % msg.collection.id + ) + else: + show("Done downloading collection %s" % msg.collection.id) + + # Package downloading messages: + elif isinstance(msg, StartPackageMessage): + show( + "Downloading package %s to %s..." + % (msg.package.id, download_dir) + ) + elif isinstance(msg, UpToDateMessage): + show("Package %s is already up-to-date!" % msg.package.id, " ") + # elif isinstance(msg, StaleMessage): + # show('Package %s is out-of-date or corrupt' % + # msg.package.id, ' ') + elif isinstance(msg, StartUnzipMessage): + show("Unzipping %s." % msg.package.filename, " ") + + # Data directory message: + elif isinstance(msg, SelectDownloadDirMessage): + download_dir = msg.download_dir + return True + + def is_stale(self, info_or_id, download_dir=None): + return self.status(info_or_id, download_dir) == self.STALE + + def is_installed(self, info_or_id, download_dir=None): + return self.status(info_or_id, download_dir) == self.INSTALLED + + def clear_status_cache(self, id=None): + if id is None: + self._status_cache.clear() + else: + self._status_cache.pop(id, None) + + def status(self, info_or_id, download_dir=None): + """ + Return a constant describing the status of the given package + or collection. Status can be one of ``INSTALLED``, + ``NOT_INSTALLED``, ``STALE``, or ``PARTIAL``. + """ + if download_dir is None: + download_dir = self._download_dir + info = self._info_or_id(info_or_id) + + # Handle collections: + if isinstance(info, Collection): + pkg_status = [self.status(pkg.id) for pkg in info.packages] + if self.STALE in pkg_status: + return self.STALE + elif self.PARTIAL in pkg_status: + return self.PARTIAL + elif self.INSTALLED in pkg_status and self.NOT_INSTALLED in pkg_status: + return self.PARTIAL + elif self.NOT_INSTALLED in pkg_status: + return self.NOT_INSTALLED + else: + return self.INSTALLED + + # Handle packages: + else: + filepath = os.path.join(download_dir, info.filename) + if download_dir != self._download_dir: + return self._pkg_status(info, filepath) + else: + if info.id not in self._status_cache: + self._status_cache[info.id] = self._pkg_status(info, filepath) + return self._status_cache[info.id] + + def _pkg_status(self, info, filepath): + if not os.path.exists(filepath): + return self.NOT_INSTALLED + + # Check if the file has the correct size. + try: + filestat = os.stat(filepath) + except OSError: + return self.NOT_INSTALLED + if filestat.st_size != int(info.size): + return self.STALE + + # Check if the file's checksum matches + if md5_hexdigest(filepath) != info.checksum: + return self.STALE + + # If it's a zipfile, and it's been at least partially + # unzipped, then check if it's been fully unzipped. + if filepath.endswith(".zip"): + unzipdir = filepath[:-4] + if not os.path.exists(unzipdir): + return self.INSTALLED # but not unzipped -- ok! + if not os.path.isdir(unzipdir): + return self.STALE + + unzipped_size = sum( + os.stat(os.path.join(d, f)).st_size + for d, _, files in os.walk(unzipdir) + for f in files + ) + if unzipped_size != info.unzipped_size: + return self.STALE + + # Otherwise, everything looks good. + return self.INSTALLED + + def update(self, quiet=False, prefix="[nltk_data] "): + """ + Re-download any packages whose status is STALE. + """ + self.clear_status_cache() + for pkg in self.packages(): + if self.status(pkg) == self.STALE: + self.download(pkg, quiet=quiet, prefix=prefix) + + # ///////////////////////////////////////////////////////////////// + # Index + # ///////////////////////////////////////////////////////////////// + + def _update_index(self, url=None): + """A helper function that ensures that self._index is + up-to-date. If the index is older than self.INDEX_TIMEOUT, + then download it again.""" + # Check if the index is already up-to-date. If so, do nothing. + if not ( + self._index is None + or url is not None + or time.time() - self._index_timestamp > self.INDEX_TIMEOUT + ): + return + + # If a URL was specified, then update our URL. + self._url = url or self._url + + # Download the index file. + self._index = nltk.internals.ElementWrapper( + ElementTree.parse(urlopen(self._url)).getroot() + ) + self._index_timestamp = time.time() + + # Build a dictionary of packages. + packages = [Package.fromxml(p) for p in self._index.findall("packages/package")] + self._packages = {p.id: p for p in packages} + + # Build a dictionary of collections. + collections = [ + Collection.fromxml(c) for c in self._index.findall("collections/collection") + ] + self._collections = {c.id: c for c in collections} + + # Replace identifiers with actual children in collection.children. + for collection in self._collections.values(): + for i, child_id in enumerate(collection.children): + if child_id in self._packages: + collection.children[i] = self._packages[child_id] + elif child_id in self._collections: + collection.children[i] = self._collections[child_id] + else: + print( + "removing collection member with no package: {}".format( + child_id + ) + ) + del collection.children[i] + + # Fill in collection.packages for each collection. + for collection in self._collections.values(): + packages = {} + queue = [collection] + for child in queue: + if isinstance(child, Collection): + queue.extend(child.children) + elif isinstance(child, Package): + packages[child.id] = child + else: + pass + collection.packages = packages.values() + + # Flush the status cache + self._status_cache.clear() + + def index(self): + """ + Return the XML index describing the packages available from + the data server. If necessary, this index will be downloaded + from the data server. + """ + self._update_index() + return self._index + + def info(self, id): + """Return the ``Package`` or ``Collection`` record for the + given item.""" + self._update_index() + if id in self._packages: + return self._packages[id] + if id in self._collections: + return self._collections[id] + raise ValueError("Package %r not found in index" % id) + + def xmlinfo(self, id): + """Return the XML info record for the given item""" + self._update_index() + for package in self._index.findall("packages/package"): + if package.get("id") == id: + return package + for collection in self._index.findall("collections/collection"): + if collection.get("id") == id: + return collection + raise ValueError("Package %r not found in index" % id) + + # ///////////////////////////////////////////////////////////////// + # URL & Data Directory + # ///////////////////////////////////////////////////////////////// + + def _get_url(self): + """The URL for the data server's index file.""" + return self._url + + def _set_url(self, url): + """ + Set a new URL for the data server. If we're unable to contact + the given url, then the original url is kept. + """ + original_url = self._url + try: + self._update_index(url) + except: + self._url = original_url + raise + + url = property(_get_url, _set_url) + + def default_download_dir(self): + """ + Return the directory to which packages will be downloaded by + default. This value can be overridden using the constructor, + or on a case-by-case basis using the ``download_dir`` argument when + calling ``download()``. + + On Windows, the default download directory is + ``PYTHONHOME/lib/nltk``, where *PYTHONHOME* is the + directory containing Python, e.g. ``C:\\Python25``. + + On all other platforms, the default directory is the first of + the following which exists or which can be created with write + permission: ``/usr/share/nltk_data``, ``/usr/local/share/nltk_data``, + ``/usr/lib/nltk_data``, ``/usr/local/lib/nltk_data``, ``~/nltk_data``. + """ + # Check if we are on GAE where we cannot write into filesystem. + if "APPENGINE_RUNTIME" in os.environ: + return + + # Check if we have sufficient permissions to install in a + # variety of system-wide locations. + for nltkdir in nltk.data.path: + if os.path.exists(nltkdir) and nltk.internals.is_writable(nltkdir): + return nltkdir + + # On Windows, use %APPDATA% + if sys.platform == "win32" and "APPDATA" in os.environ: + homedir = os.environ["APPDATA"] + + # Otherwise, install in the user's home directory. + else: + homedir = os.path.expanduser("~/") + if homedir == "~/": + raise ValueError("Could not find a default download directory") + + # append "nltk_data" to the home directory + return os.path.join(homedir, "nltk_data") + + def _get_download_dir(self): + """ + The default directory to which packages will be downloaded. + This defaults to the value returned by ``default_download_dir()``. + To override this default on a case-by-case basis, use the + ``download_dir`` argument when calling ``download()``. + """ + return self._download_dir + + def _set_download_dir(self, download_dir): + self._download_dir = download_dir + # Clear the status cache. + self._status_cache.clear() + + download_dir = property(_get_download_dir, _set_download_dir) + + # ///////////////////////////////////////////////////////////////// + # Interactive Shell + # ///////////////////////////////////////////////////////////////// + + def _interactive_download(self): + # Try the GUI first; if that doesn't work, try the simple + # interactive shell. + if TKINTER: + try: + DownloaderGUI(self).mainloop() + except TclError: + DownloaderShell(self).run() + else: + DownloaderShell(self).run() + + +class DownloaderShell: + def __init__(self, dataserver): + self._ds = dataserver + + def _simple_interactive_menu(self, *options): + print("-" * 75) + spc = (68 - sum(len(o) for o in options)) // (len(options) - 1) * " " + print(" " + spc.join(options)) + print("-" * 75) + + def run(self): + print("NLTK Downloader") + while True: + self._simple_interactive_menu( + "d) Download", + "l) List", + " u) Update", + "c) Config", + "h) Help", + "q) Quit", + ) + user_input = input("Downloader> ").strip() + if not user_input: + print() + continue + command = user_input.lower().split()[0] + args = user_input.split()[1:] + try: + if command == "l": + print() + self._ds.list(self._ds.download_dir, header=False, more_prompt=True) + elif command == "h": + self._simple_interactive_help() + elif command == "c": + self._simple_interactive_config() + elif command in ("q", "x"): + return + elif command == "d": + self._simple_interactive_download(args) + elif command == "u": + self._simple_interactive_update() + else: + print("Command %r unrecognized" % user_input) + except HTTPError as e: + print("Error reading from server: %s" % e) + except URLError as e: + print("Error connecting to server: %s" % e.reason) + # try checking if user_input is a package name, & + # downloading it? + print() + + def _simple_interactive_download(self, args): + if args: + for arg in args: + try: + self._ds.download(arg, prefix=" ") + except (OSError, ValueError) as e: + print(e) + else: + while True: + print() + print("Download which package (l=list; x=cancel)?") + user_input = input(" Identifier> ") + if user_input.lower() == "l": + self._ds.list( + self._ds.download_dir, + header=False, + more_prompt=True, + skip_installed=True, + ) + continue + elif user_input.lower() in ("x", "q", ""): + return + elif user_input: + for id in user_input.split(): + try: + self._ds.download(id, prefix=" ") + except (OSError, ValueError) as e: + print(e) + break + + def _simple_interactive_update(self): + while True: + stale_packages = [] + stale = partial = False + for info in sorted(getattr(self._ds, "packages")(), key=str): + if self._ds.status(info) == self._ds.STALE: + stale_packages.append((info.id, info.name)) + + print() + if stale_packages: + print("Will update following packages (o=ok; x=cancel)") + for pid, pname in stale_packages: + name = textwrap.fill( + "-" * 27 + (pname), 75, subsequent_indent=27 * " " + )[27:] + print(" [ ] {} {}".format(pid.ljust(20, "."), name)) + print() + + user_input = input(" Identifier> ") + if user_input.lower() == "o": + for pid, pname in stale_packages: + try: + self._ds.download(pid, prefix=" ") + except (OSError, ValueError) as e: + print(e) + break + elif user_input.lower() in ("x", "q", ""): + return + else: + print("Nothing to update.") + return + + def _simple_interactive_help(self): + print() + print("Commands:") + print( + " d) Download a package or collection u) Update out of date packages" + ) + print(" l) List packages & collections h) Help") + print(" c) View & Modify Configuration q) Quit") + + def _show_config(self): + print() + print("Data Server:") + print(" - URL: <%s>" % self._ds.url) + print(" - %d Package Collections Available" % len(self._ds.collections())) + print(" - %d Individual Packages Available" % len(self._ds.packages())) + print() + print("Local Machine:") + print(" - Data directory: %s" % self._ds.download_dir) + + def _simple_interactive_config(self): + self._show_config() + while True: + print() + self._simple_interactive_menu( + "s) Show Config", "u) Set Server URL", "d) Set Data Dir", "m) Main Menu" + ) + user_input = input("Config> ").strip().lower() + if user_input == "s": + self._show_config() + elif user_input == "d": + new_dl_dir = input(" New Directory> ").strip() + if new_dl_dir in ("", "x", "q", "X", "Q"): + print(" Cancelled!") + elif os.path.isdir(new_dl_dir): + self._ds.download_dir = new_dl_dir + else: + print("Directory %r not found! Create it first." % new_dl_dir) + elif user_input == "u": + new_url = input(" New URL> ").strip() + if new_url in ("", "x", "q", "X", "Q"): + print(" Cancelled!") + else: + if not new_url.startswith(("http://", "https://")): + new_url = "http://" + new_url + try: + self._ds.url = new_url + except Exception as e: + print(f"Error reading <{new_url!r}>:\n {e}") + elif user_input == "m": + break + + +class DownloaderGUI: + """ + Graphical interface for downloading packages from the NLTK data + server. + """ + + # ///////////////////////////////////////////////////////////////// + # Column Configuration + # ///////////////////////////////////////////////////////////////// + + COLUMNS = [ + "", + "Identifier", + "Name", + "Size", + "Status", + "Unzipped Size", + "Copyright", + "Contact", + "License", + "Author", + "Subdir", + "Checksum", + ] + """A list of the names of columns. This controls the order in + which the columns will appear. If this is edited, then + ``_package_to_columns()`` may need to be edited to match.""" + + COLUMN_WEIGHTS = {"": 0, "Name": 5, "Size": 0, "Status": 0} + """A dictionary specifying how columns should be resized when the + table is resized. Columns with weight 0 will not be resized at + all; and columns with high weight will be resized more. + Default weight (for columns not explicitly listed) is 1.""" + + COLUMN_WIDTHS = { + "": 1, + "Identifier": 20, + "Name": 45, + "Size": 10, + "Unzipped Size": 10, + "Status": 12, + } + """A dictionary specifying how wide each column should be, in + characters. The default width (for columns not explicitly + listed) is specified by ``DEFAULT_COLUMN_WIDTH``.""" + + DEFAULT_COLUMN_WIDTH = 30 + """The default width for columns that are not explicitly listed + in ``COLUMN_WIDTHS``.""" + + INITIAL_COLUMNS = ["", "Identifier", "Name", "Size", "Status"] + """The set of columns that should be displayed by default.""" + + # Perform a few import-time sanity checks to make sure that the + # column configuration variables are defined consistently: + for c in COLUMN_WEIGHTS: + assert c in COLUMNS + for c in COLUMN_WIDTHS: + assert c in COLUMNS + for c in INITIAL_COLUMNS: + assert c in COLUMNS + + # ///////////////////////////////////////////////////////////////// + # Color Configuration + # ///////////////////////////////////////////////////////////////// + + _BACKDROP_COLOR = ("#000", "#ccc") + + _ROW_COLOR = { + Downloader.INSTALLED: ("#afa", "#080"), + Downloader.PARTIAL: ("#ffa", "#880"), + Downloader.STALE: ("#faa", "#800"), + Downloader.NOT_INSTALLED: ("#fff", "#888"), + } + + _MARK_COLOR = ("#000", "#ccc") + + # _FRONT_TAB_COLOR = ('#ccf', '#008') + # _BACK_TAB_COLOR = ('#88a', '#448') + _FRONT_TAB_COLOR = ("#fff", "#45c") + _BACK_TAB_COLOR = ("#aaa", "#67a") + + _PROGRESS_COLOR = ("#f00", "#aaa") + + _TAB_FONT = "helvetica -16 bold" + + # ///////////////////////////////////////////////////////////////// + # Constructor + # ///////////////////////////////////////////////////////////////// + + def __init__(self, dataserver, use_threads=True): + self._ds = dataserver + self._use_threads = use_threads + + # For the threaded downloader: + self._download_lock = threading.Lock() + self._download_msg_queue = [] + self._download_abort_queue = [] + self._downloading = False + + # For tkinter after callbacks: + self._afterid = {} + + # A message log. + self._log_messages = [] + self._log_indent = 0 + self._log("NLTK Downloader Started!") + + # Create the main window. + top = self.top = Tk() + top.geometry("+50+50") + top.title("NLTK Downloader") + top.configure(background=self._BACKDROP_COLOR[1]) + + # Set up some bindings now, in case anything goes wrong. + top.bind("", self.destroy) + top.bind("", self.destroy) + self._destroyed = False + + self._column_vars = {} + + # Initialize the GUI. + self._init_widgets() + self._init_menu() + try: + self._fill_table() + except HTTPError as e: + showerror("Error reading from server", e) + except URLError as e: + showerror("Error connecting to server", e.reason) + + self._show_info() + self._select_columns() + self._table.select(0) + + # Make sure we get notified when we're destroyed, so we can + # cancel any download in progress. + self._table.bind("", self._destroy) + + def _log(self, msg): + self._log_messages.append( + "{} {}{}".format(time.ctime(), " | " * self._log_indent, msg) + ) + + # ///////////////////////////////////////////////////////////////// + # Internals + # ///////////////////////////////////////////////////////////////// + + def _init_widgets(self): + # Create the top-level frame structures + f1 = Frame(self.top, relief="raised", border=2, padx=8, pady=0) + f1.pack(sid="top", expand=True, fill="both") + f1.grid_rowconfigure(2, weight=1) + f1.grid_columnconfigure(0, weight=1) + Frame(f1, height=8).grid(column=0, row=0) # spacer + tabframe = Frame(f1) + tabframe.grid(column=0, row=1, sticky="news") + tableframe = Frame(f1) + tableframe.grid(column=0, row=2, sticky="news") + buttonframe = Frame(f1) + buttonframe.grid(column=0, row=3, sticky="news") + Frame(f1, height=8).grid(column=0, row=4) # spacer + infoframe = Frame(f1) + infoframe.grid(column=0, row=5, sticky="news") + Frame(f1, height=8).grid(column=0, row=6) # spacer + progressframe = Frame( + self.top, padx=3, pady=3, background=self._BACKDROP_COLOR[1] + ) + progressframe.pack(side="bottom", fill="x") + self.top["border"] = 0 + self.top["highlightthickness"] = 0 + + # Create the tabs + self._tab_names = ["Collections", "Corpora", "Models", "All Packages"] + self._tabs = {} + for i, tab in enumerate(self._tab_names): + label = Label(tabframe, text=tab, font=self._TAB_FONT) + label.pack(side="left", padx=((i + 1) % 2) * 10) + label.bind("", self._select_tab) + self._tabs[tab.lower()] = label + + # Create the table. + column_weights = [self.COLUMN_WEIGHTS.get(column, 1) for column in self.COLUMNS] + self._table = Table( + tableframe, + self.COLUMNS, + column_weights=column_weights, + highlightthickness=0, + listbox_height=16, + reprfunc=self._table_reprfunc, + ) + self._table.columnconfig(0, foreground=self._MARK_COLOR[0]) # marked + for i, column in enumerate(self.COLUMNS): + width = self.COLUMN_WIDTHS.get(column, self.DEFAULT_COLUMN_WIDTH) + self._table.columnconfig(i, width=width) + self._table.pack(expand=True, fill="both") + self._table.focus() + self._table.bind_to_listboxes("", self._download) + self._table.bind("", self._table_mark) + self._table.bind("", self._download) + self._table.bind("", self._prev_tab) + self._table.bind("", self._next_tab) + self._table.bind("", self._mark_all) + + # Create entry boxes for URL & download_dir + infoframe.grid_columnconfigure(1, weight=1) + + info = [ + ("url", "Server Index:", self._set_url), + ("download_dir", "Download Directory:", self._set_download_dir), + ] + self._info = {} + for (i, (key, label, callback)) in enumerate(info): + Label(infoframe, text=label).grid(column=0, row=i, sticky="e") + entry = Entry( + infoframe, + font="courier", + relief="groove", + disabledforeground="#007aff", + foreground="#007aff", + ) + self._info[key] = (entry, callback) + entry.bind("", self._info_save) + entry.bind("", lambda e, key=key: self._info_edit(key)) + entry.grid(column=1, row=i, sticky="ew") + + # If the user edits url or download_dir, and then clicks outside + # the entry box, then save their results. + self.top.bind("", self._info_save) + + # Create Download & Refresh buttons. + self._download_button = Button( + buttonframe, text="Download", command=self._download, width=8 + ) + self._download_button.pack(side="left") + self._refresh_button = Button( + buttonframe, text="Refresh", command=self._refresh, width=8 + ) + self._refresh_button.pack(side="right") + + # Create Progress bar + self._progresslabel = Label( + progressframe, + text="", + foreground=self._BACKDROP_COLOR[0], + background=self._BACKDROP_COLOR[1], + ) + self._progressbar = Canvas( + progressframe, + width=200, + height=16, + background=self._PROGRESS_COLOR[1], + relief="sunken", + border=1, + ) + self._init_progressbar() + self._progressbar.pack(side="right") + self._progresslabel.pack(side="left") + + def _init_menu(self): + menubar = Menu(self.top) + + filemenu = Menu(menubar, tearoff=0) + filemenu.add_command( + label="Download", underline=0, command=self._download, accelerator="Return" + ) + filemenu.add_separator() + filemenu.add_command( + label="Change Server Index", + underline=7, + command=lambda: self._info_edit("url"), + ) + filemenu.add_command( + label="Change Download Directory", + underline=0, + command=lambda: self._info_edit("download_dir"), + ) + filemenu.add_separator() + filemenu.add_command(label="Show Log", underline=5, command=self._show_log) + filemenu.add_separator() + filemenu.add_command( + label="Exit", underline=1, command=self.destroy, accelerator="Ctrl-x" + ) + menubar.add_cascade(label="File", underline=0, menu=filemenu) + + # Create a menu to control which columns of the table are + # shown. n.b.: we never hide the first two columns (mark and + # identifier). + viewmenu = Menu(menubar, tearoff=0) + for column in self._table.column_names[2:]: + var = IntVar(self.top) + assert column not in self._column_vars + self._column_vars[column] = var + if column in self.INITIAL_COLUMNS: + var.set(1) + viewmenu.add_checkbutton( + label=column, underline=0, variable=var, command=self._select_columns + ) + menubar.add_cascade(label="View", underline=0, menu=viewmenu) + + # Create a sort menu + # [xx] this should be selectbuttons; and it should include + # reversed sorts as options. + sortmenu = Menu(menubar, tearoff=0) + for column in self._table.column_names[1:]: + sortmenu.add_command( + label="Sort by %s" % column, + command=(lambda c=column: self._table.sort_by(c, "ascending")), + ) + sortmenu.add_separator() + # sortmenu.add_command(label='Descending Sort:') + for column in self._table.column_names[1:]: + sortmenu.add_command( + label="Reverse sort by %s" % column, + command=(lambda c=column: self._table.sort_by(c, "descending")), + ) + menubar.add_cascade(label="Sort", underline=0, menu=sortmenu) + + helpmenu = Menu(menubar, tearoff=0) + helpmenu.add_command(label="About", underline=0, command=self.about) + helpmenu.add_command( + label="Instructions", underline=0, command=self.help, accelerator="F1" + ) + menubar.add_cascade(label="Help", underline=0, menu=helpmenu) + self.top.bind("", self.help) + + self.top.config(menu=menubar) + + def _select_columns(self): + for (column, var) in self._column_vars.items(): + if var.get(): + self._table.show_column(column) + else: + self._table.hide_column(column) + + def _refresh(self): + self._ds.clear_status_cache() + try: + self._fill_table() + except HTTPError as e: + showerror("Error reading from server", e) + except URLError as e: + showerror("Error connecting to server", e.reason) + self._table.select(0) + + def _info_edit(self, info_key): + self._info_save() # just in case. + (entry, callback) = self._info[info_key] + entry["state"] = "normal" + entry["relief"] = "sunken" + entry.focus() + + def _info_save(self, e=None): + focus = self._table + for entry, callback in self._info.values(): + if entry["state"] == "disabled": + continue + if e is not None and e.widget is entry and e.keysym != "Return": + focus = entry + else: + entry["state"] = "disabled" + entry["relief"] = "groove" + callback(entry.get()) + focus.focus() + + def _table_reprfunc(self, row, col, val): + if self._table.column_names[col].endswith("Size"): + if isinstance(val, str): + return " %s" % val + elif val < 1024**2: + return " %.1f KB" % (val / 1024.0**1) + elif val < 1024**3: + return " %.1f MB" % (val / 1024.0**2) + else: + return " %.1f GB" % (val / 1024.0**3) + + if col in (0, ""): + return str(val) + else: + return " %s" % val + + def _set_url(self, url): + if url == self._ds.url: + return + try: + self._ds.url = url + self._fill_table() + except OSError as e: + showerror("Error Setting Server Index", str(e)) + self._show_info() + + def _set_download_dir(self, download_dir): + if self._ds.download_dir == download_dir: + return + # check if the dir exists, and if not, ask if we should create it? + + # Clear our status cache, & re-check what's installed + self._ds.download_dir = download_dir + try: + self._fill_table() + except HTTPError as e: + showerror("Error reading from server", e) + except URLError as e: + showerror("Error connecting to server", e.reason) + self._show_info() + + def _show_info(self): + print("showing info", self._ds.url) + for entry, cb in self._info.values(): + entry["state"] = "normal" + entry.delete(0, "end") + self._info["url"][0].insert(0, self._ds.url) + self._info["download_dir"][0].insert(0, self._ds.download_dir) + for entry, cb in self._info.values(): + entry["state"] = "disabled" + + def _prev_tab(self, *e): + for i, tab in enumerate(self._tab_names): + if tab.lower() == self._tab and i > 0: + self._tab = self._tab_names[i - 1].lower() + try: + return self._fill_table() + except HTTPError as e: + showerror("Error reading from server", e) + except URLError as e: + showerror("Error connecting to server", e.reason) + + def _next_tab(self, *e): + for i, tab in enumerate(self._tab_names): + if tab.lower() == self._tab and i < (len(self._tabs) - 1): + self._tab = self._tab_names[i + 1].lower() + try: + return self._fill_table() + except HTTPError as e: + showerror("Error reading from server", e) + except URLError as e: + showerror("Error connecting to server", e.reason) + + def _select_tab(self, event): + self._tab = event.widget["text"].lower() + try: + self._fill_table() + except HTTPError as e: + showerror("Error reading from server", e) + except URLError as e: + showerror("Error connecting to server", e.reason) + + _tab = "collections" + # _tab = 'corpora' + _rows = None + + def _fill_table(self): + selected_row = self._table.selected_row() + self._table.clear() + if self._tab == "all packages": + items = self._ds.packages() + elif self._tab == "corpora": + items = self._ds.corpora() + elif self._tab == "models": + items = self._ds.models() + elif self._tab == "collections": + items = self._ds.collections() + else: + assert 0, "bad tab value %r" % self._tab + rows = [self._package_to_columns(item) for item in items] + self._table.extend(rows) + + # Highlight the active tab. + for tab, label in self._tabs.items(): + if tab == self._tab: + label.configure( + foreground=self._FRONT_TAB_COLOR[0], + background=self._FRONT_TAB_COLOR[1], + ) + else: + label.configure( + foreground=self._BACK_TAB_COLOR[0], + background=self._BACK_TAB_COLOR[1], + ) + + self._table.sort_by("Identifier", order="ascending") + self._color_table() + self._table.select(selected_row) + + # This is a hack, because the scrollbar isn't updating its + # position right -- I'm not sure what the underlying cause is + # though. (This is on OS X w/ python 2.5) The length of + # delay that's necessary seems to depend on how fast the + # comptuer is. :-/ + self.top.after(150, self._table._scrollbar.set, *self._table._mlb.yview()) + self.top.after(300, self._table._scrollbar.set, *self._table._mlb.yview()) + + def _update_table_status(self): + for row_num in range(len(self._table)): + status = self._ds.status(self._table[row_num, "Identifier"]) + self._table[row_num, "Status"] = status + self._color_table() + + def _download(self, *e): + # If we're using threads, then delegate to the threaded + # downloader instead. + if self._use_threads: + return self._download_threaded(*e) + + marked = [ + self._table[row, "Identifier"] + for row in range(len(self._table)) + if self._table[row, 0] != "" + ] + selection = self._table.selected_row() + if not marked and selection is not None: + marked = [self._table[selection, "Identifier"]] + + download_iter = self._ds.incr_download(marked, self._ds.download_dir) + self._log_indent = 0 + self._download_cb(download_iter, marked) + + _DL_DELAY = 10 + + def _download_cb(self, download_iter, ids): + try: + msg = next(download_iter) + except StopIteration: + # self._fill_table(sort=False) + self._update_table_status() + afterid = self.top.after(10, self._show_progress, 0) + self._afterid["_download_cb"] = afterid + return + + def show(s): + self._progresslabel["text"] = s + self._log(s) + + if isinstance(msg, ProgressMessage): + self._show_progress(msg.progress) + elif isinstance(msg, ErrorMessage): + show(msg.message) + if msg.package is not None: + self._select(msg.package.id) + self._show_progress(None) + return # halt progress. + elif isinstance(msg, StartCollectionMessage): + show("Downloading collection %s" % msg.collection.id) + self._log_indent += 1 + elif isinstance(msg, StartPackageMessage): + show("Downloading package %s" % msg.package.id) + elif isinstance(msg, UpToDateMessage): + show("Package %s is up-to-date!" % msg.package.id) + # elif isinstance(msg, StaleMessage): + # show('Package %s is out-of-date or corrupt' % msg.package.id) + elif isinstance(msg, FinishDownloadMessage): + show("Finished downloading %r." % msg.package.id) + elif isinstance(msg, StartUnzipMessage): + show("Unzipping %s" % msg.package.filename) + elif isinstance(msg, FinishCollectionMessage): + self._log_indent -= 1 + show("Finished downloading collection %r." % msg.collection.id) + self._clear_mark(msg.collection.id) + elif isinstance(msg, FinishPackageMessage): + self._clear_mark(msg.package.id) + afterid = self.top.after(self._DL_DELAY, self._download_cb, download_iter, ids) + self._afterid["_download_cb"] = afterid + + def _select(self, id): + for row in range(len(self._table)): + if self._table[row, "Identifier"] == id: + self._table.select(row) + return + + def _color_table(self): + # Color rows according to status. + for row in range(len(self._table)): + bg, sbg = self._ROW_COLOR[self._table[row, "Status"]] + fg, sfg = ("black", "white") + self._table.rowconfig( + row, + foreground=fg, + selectforeground=sfg, + background=bg, + selectbackground=sbg, + ) + # Color the marked column + self._table.itemconfigure( + row, 0, foreground=self._MARK_COLOR[0], background=self._MARK_COLOR[1] + ) + + def _clear_mark(self, id): + for row in range(len(self._table)): + if self._table[row, "Identifier"] == id: + self._table[row, 0] = "" + + def _mark_all(self, *e): + for row in range(len(self._table)): + self._table[row, 0] = "X" + + def _table_mark(self, *e): + selection = self._table.selected_row() + if selection >= 0: + if self._table[selection][0] != "": + self._table[selection, 0] = "" + else: + self._table[selection, 0] = "X" + self._table.select(delta=1) + + def _show_log(self): + text = "\n".join(self._log_messages) + ShowText(self.top, "NLTK Downloader Log", text) + + def _package_to_columns(self, pkg): + """ + Given a package, return a list of values describing that + package, one for each column in ``self.COLUMNS``. + """ + row = [] + for column_index, column_name in enumerate(self.COLUMNS): + if column_index == 0: # Mark: + row.append("") + elif column_name == "Identifier": + row.append(pkg.id) + elif column_name == "Status": + row.append(self._ds.status(pkg)) + else: + attr = column_name.lower().replace(" ", "_") + row.append(getattr(pkg, attr, "n/a")) + return row + + # ///////////////////////////////////////////////////////////////// + # External Interface + # ///////////////////////////////////////////////////////////////// + + def destroy(self, *e): + if self._destroyed: + return + self.top.destroy() + self._destroyed = True + + def _destroy(self, *e): + if self.top is not None: + for afterid in self._afterid.values(): + self.top.after_cancel(afterid) + + # Abort any download in progress. + if self._downloading and self._use_threads: + self._abort_download() + + # Make sure the garbage collector destroys these now; + # otherwise, they may get destroyed when we're not in the main + # thread, which would make Tkinter unhappy. + self._column_vars.clear() + + def mainloop(self, *args, **kwargs): + self.top.mainloop(*args, **kwargs) + + # ///////////////////////////////////////////////////////////////// + # HELP + # ///////////////////////////////////////////////////////////////// + + HELP = textwrap.dedent( + """\ + This tool can be used to download a variety of corpora and models + that can be used with NLTK. Each corpus or model is distributed + in a single zip file, known as a \"package file.\" You can + download packages individually, or you can download pre-defined + collections of packages. + + When you download a package, it will be saved to the \"download + directory.\" A default download directory is chosen when you run + + the downloader; but you may also select a different download + directory. On Windows, the default download directory is + + + \"package.\" + + The NLTK downloader can be used to download a variety of corpora, + models, and other data packages. + + Keyboard shortcuts:: + [return]\t Download + [up]\t Select previous package + [down]\t Select next package + [left]\t Select previous tab + [right]\t Select next tab + """ + ) + + def help(self, *e): + # The default font's not very legible; try using 'fixed' instead. + try: + ShowText( + self.top, + "Help: NLTK Downloader", + self.HELP.strip(), + width=75, + font="fixed", + ) + except: + ShowText(self.top, "Help: NLTK Downloader", self.HELP.strip(), width=75) + + def about(self, *e): + ABOUT = "NLTK Downloader\n" + "Written by Edward Loper" + TITLE = "About: NLTK Downloader" + try: + from tkinter.messagebox import Message + + Message(message=ABOUT, title=TITLE).show() + except ImportError: + ShowText(self.top, TITLE, ABOUT) + + # ///////////////////////////////////////////////////////////////// + # Progress Bar + # ///////////////////////////////////////////////////////////////// + + _gradient_width = 5 + + def _init_progressbar(self): + c = self._progressbar + width, height = int(c["width"]), int(c["height"]) + for i in range(0, (int(c["width"]) * 2) // self._gradient_width): + c.create_line( + i * self._gradient_width + 20, + -20, + i * self._gradient_width - height - 20, + height + 20, + width=self._gradient_width, + fill="#%02x0000" % (80 + abs(i % 6 - 3) * 12), + ) + c.addtag_all("gradient") + c.itemconfig("gradient", state="hidden") + + # This is used to display progress + c.addtag_withtag( + "redbox", c.create_rectangle(0, 0, 0, 0, fill=self._PROGRESS_COLOR[0]) + ) + + def _show_progress(self, percent): + c = self._progressbar + if percent is None: + c.coords("redbox", 0, 0, 0, 0) + c.itemconfig("gradient", state="hidden") + else: + width, height = int(c["width"]), int(c["height"]) + x = percent * int(width) // 100 + 1 + c.coords("redbox", 0, 0, x, height + 1) + + def _progress_alive(self): + c = self._progressbar + if not self._downloading: + c.itemconfig("gradient", state="hidden") + else: + c.itemconfig("gradient", state="normal") + x1, y1, x2, y2 = c.bbox("gradient") + if x1 <= -100: + c.move("gradient", (self._gradient_width * 6) - 4, 0) + else: + c.move("gradient", -4, 0) + afterid = self.top.after(200, self._progress_alive) + self._afterid["_progress_alive"] = afterid + + # ///////////////////////////////////////////////////////////////// + # Threaded downloader + # ///////////////////////////////////////////////////////////////// + + def _download_threaded(self, *e): + # If the user tries to start a new download while we're already + # downloading something, then abort the current download instead. + if self._downloading: + self._abort_download() + return + + # Change the 'download' button to an 'abort' button. + self._download_button["text"] = "Cancel" + + marked = [ + self._table[row, "Identifier"] + for row in range(len(self._table)) + if self._table[row, 0] != "" + ] + selection = self._table.selected_row() + if not marked and selection is not None: + marked = [self._table[selection, "Identifier"]] + + # Create a new data server object for the download operation, + # just in case the user modifies our data server during the + # download (e.g., clicking 'refresh' or editing the index url). + ds = Downloader(self._ds.url, self._ds.download_dir) + + # Start downloading in a separate thread. + assert self._download_msg_queue == [] + assert self._download_abort_queue == [] + self._DownloadThread( + ds, + marked, + self._download_lock, + self._download_msg_queue, + self._download_abort_queue, + ).start() + + # Monitor the download message queue & display its progress. + self._log_indent = 0 + self._downloading = True + self._monitor_message_queue() + + # Display an indication that we're still alive and well by + # cycling the progress bar. + self._progress_alive() + + def _abort_download(self): + if self._downloading: + self._download_lock.acquire() + self._download_abort_queue.append("abort") + self._download_lock.release() + + class _DownloadThread(threading.Thread): + def __init__(self, data_server, items, lock, message_queue, abort): + self.data_server = data_server + self.items = items + self.lock = lock + self.message_queue = message_queue + self.abort = abort + threading.Thread.__init__(self) + + def run(self): + for msg in self.data_server.incr_download(self.items): + self.lock.acquire() + self.message_queue.append(msg) + # Check if we've been told to kill ourselves: + if self.abort: + self.message_queue.append("aborted") + self.lock.release() + return + self.lock.release() + self.lock.acquire() + self.message_queue.append("finished") + self.lock.release() + + _MONITOR_QUEUE_DELAY = 100 + + def _monitor_message_queue(self): + def show(s): + self._progresslabel["text"] = s + self._log(s) + + # Try to acquire the lock; if it's busy, then just try again later. + if not self._download_lock.acquire(): + return + for msg in self._download_msg_queue: + + # Done downloading? + if msg == "finished" or msg == "aborted": + # self._fill_table(sort=False) + self._update_table_status() + self._downloading = False + self._download_button["text"] = "Download" + del self._download_msg_queue[:] + del self._download_abort_queue[:] + self._download_lock.release() + if msg == "aborted": + show("Download aborted!") + self._show_progress(None) + else: + afterid = self.top.after(100, self._show_progress, None) + self._afterid["_monitor_message_queue"] = afterid + return + + # All other messages + elif isinstance(msg, ProgressMessage): + self._show_progress(msg.progress) + elif isinstance(msg, ErrorMessage): + show(msg.message) + if msg.package is not None: + self._select(msg.package.id) + self._show_progress(None) + self._downloading = False + return # halt progress. + elif isinstance(msg, StartCollectionMessage): + show("Downloading collection %r" % msg.collection.id) + self._log_indent += 1 + elif isinstance(msg, StartPackageMessage): + self._ds.clear_status_cache(msg.package.id) + show("Downloading package %r" % msg.package.id) + elif isinstance(msg, UpToDateMessage): + show("Package %s is up-to-date!" % msg.package.id) + # elif isinstance(msg, StaleMessage): + # show('Package %s is out-of-date or corrupt; updating it' % + # msg.package.id) + elif isinstance(msg, FinishDownloadMessage): + show("Finished downloading %r." % msg.package.id) + elif isinstance(msg, StartUnzipMessage): + show("Unzipping %s" % msg.package.filename) + elif isinstance(msg, FinishUnzipMessage): + show("Finished installing %s" % msg.package.id) + elif isinstance(msg, FinishCollectionMessage): + self._log_indent -= 1 + show("Finished downloading collection %r." % msg.collection.id) + self._clear_mark(msg.collection.id) + elif isinstance(msg, FinishPackageMessage): + self._update_table_status() + self._clear_mark(msg.package.id) + + # Let the user know when we're aborting a download (but + # waiting for a good point to abort it, so we don't end up + # with a partially unzipped package or anything like that). + if self._download_abort_queue: + self._progresslabel["text"] = "Aborting download..." + + # Clear the message queue and then release the lock + del self._download_msg_queue[:] + self._download_lock.release() + + # Check the queue again after MONITOR_QUEUE_DELAY msec. + afterid = self.top.after(self._MONITOR_QUEUE_DELAY, self._monitor_message_queue) + self._afterid["_monitor_message_queue"] = afterid + + +###################################################################### +# Helper Functions +###################################################################### +# [xx] It may make sense to move these to nltk.internals. + + +def md5_hexdigest(file): + """ + Calculate and return the MD5 checksum for a given file. + ``file`` may either be a filename or an open stream. + """ + if isinstance(file, str): + with open(file, "rb") as infile: + return _md5_hexdigest(infile) + return _md5_hexdigest(file) + + +def _md5_hexdigest(fp): + md5_digest = md5() + while True: + block = fp.read(1024 * 16) # 16k blocks + if not block: + break + md5_digest.update(block) + return md5_digest.hexdigest() + + +# change this to periodically yield progress messages? +# [xx] get rid of topdir parameter -- we should be checking +# this when we build the index, anyway. +def unzip(filename, root, verbose=True): + """ + Extract the contents of the zip file ``filename`` into the + directory ``root``. + """ + for message in _unzip_iter(filename, root, verbose): + if isinstance(message, ErrorMessage): + raise Exception(message) + + +def _unzip_iter(filename, root, verbose=True): + if verbose: + sys.stdout.write("Unzipping %s" % os.path.split(filename)[1]) + sys.stdout.flush() + + try: + zf = zipfile.ZipFile(filename) + except zipfile.error as e: + yield ErrorMessage(filename, "Error with downloaded zip file") + return + except Exception as e: + yield ErrorMessage(filename, e) + return + + zf.extractall(root) + + if verbose: + print() + + +###################################################################### +# Index Builder +###################################################################### +# This may move to a different file sometime. + + +def build_index(root, base_url): + """ + Create a new data.xml index file, by combining the xml description + files for various packages and collections. ``root`` should be the + path to a directory containing the package xml and zip files; and + the collection xml files. The ``root`` directory is expected to + have the following subdirectories:: + + root/ + packages/ .................. subdirectory for packages + corpora/ ................. zip & xml files for corpora + grammars/ ................ zip & xml files for grammars + taggers/ ................. zip & xml files for taggers + tokenizers/ .............. zip & xml files for tokenizers + etc. + collections/ ............... xml files for collections + + For each package, there should be two files: ``package.zip`` + (where *package* is the package name) + which contains the package itself as a compressed zip file; and + ``package.xml``, which is an xml description of the package. The + zipfile ``package.zip`` should expand to a single subdirectory + named ``package/``. The base filename ``package`` must match + the identifier given in the package's xml file. + + For each collection, there should be a single file ``collection.zip`` + describing the collection, where *collection* is the name of the collection. + + All identifiers (for both packages and collections) must be unique. + """ + # Find all packages. + packages = [] + for pkg_xml, zf, subdir in _find_packages(os.path.join(root, "packages")): + zipstat = os.stat(zf.filename) + url = f"{base_url}/{subdir}/{os.path.split(zf.filename)[1]}" + unzipped_size = sum(zf_info.file_size for zf_info in zf.infolist()) + + # Fill in several fields of the package xml with calculated values. + pkg_xml.set("unzipped_size", "%s" % unzipped_size) + pkg_xml.set("size", "%s" % zipstat.st_size) + pkg_xml.set("checksum", "%s" % md5_hexdigest(zf.filename)) + pkg_xml.set("subdir", subdir) + # pkg_xml.set('svn_revision', _svn_revision(zf.filename)) + if not pkg_xml.get("url"): + pkg_xml.set("url", url) + + # Record the package. + packages.append(pkg_xml) + + # Find all collections + collections = list(_find_collections(os.path.join(root, "collections"))) + + # Check that all UIDs are unique + uids = set() + for item in packages + collections: + if item.get("id") in uids: + raise ValueError("Duplicate UID: %s" % item.get("id")) + uids.add(item.get("id")) + + # Put it all together + top_elt = ElementTree.Element("nltk_data") + top_elt.append(ElementTree.Element("packages")) + top_elt[0].extend(sorted(packages, key=lambda package: package.get("id"))) + top_elt.append(ElementTree.Element("collections")) + top_elt[1].extend(sorted(collections, key=lambda collection: collection.get("id"))) + + _indent_xml(top_elt) + return top_elt + + +def _indent_xml(xml, prefix=""): + """ + Helper for ``build_index()``: Given an XML ``ElementTree``, modify it + (and its descendents) ``text`` and ``tail`` attributes to generate + an indented tree, where each nested element is indented by 2 + spaces with respect to its parent. + """ + if len(xml) > 0: + xml.text = (xml.text or "").strip() + "\n" + prefix + " " + for child in xml: + _indent_xml(child, prefix + " ") + for child in xml[:-1]: + child.tail = (child.tail or "").strip() + "\n" + prefix + " " + xml[-1].tail = (xml[-1].tail or "").strip() + "\n" + prefix + + +def _check_package(pkg_xml, zipfilename, zf): + """ + Helper for ``build_index()``: Perform some checks to make sure that + the given package is consistent. + """ + # The filename must patch the id given in the XML file. + uid = os.path.splitext(os.path.split(zipfilename)[1])[0] + if pkg_xml.get("id") != uid: + raise ValueError( + "package identifier mismatch ({} vs {})".format(pkg_xml.get("id"), uid) + ) + + # Zip file must expand to a subdir whose name matches uid. + if sum((name != uid and not name.startswith(uid + "/")) for name in zf.namelist()): + raise ValueError( + "Zipfile %s.zip does not expand to a single " + "subdirectory %s/" % (uid, uid) + ) + + +# update for git? +def _svn_revision(filename): + """ + Helper for ``build_index()``: Calculate the subversion revision + number for a given file (by using ``subprocess`` to run ``svn``). + """ + p = subprocess.Popen( + ["svn", "status", "-v", filename], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + (stdout, stderr) = p.communicate() + if p.returncode != 0 or stderr or not stdout: + raise ValueError( + "Error determining svn_revision for %s: %s" + % (os.path.split(filename)[1], textwrap.fill(stderr)) + ) + return stdout.split()[2] + + +def _find_collections(root): + """ + Helper for ``build_index()``: Yield a list of ElementTree.Element + objects, each holding the xml for a single package collection. + """ + for dirname, _subdirs, files in os.walk(root): + for filename in files: + if filename.endswith(".xml"): + xmlfile = os.path.join(dirname, filename) + yield ElementTree.parse(xmlfile).getroot() + + +def _find_packages(root): + """ + Helper for ``build_index()``: Yield a list of tuples + ``(pkg_xml, zf, subdir)``, where: + - ``pkg_xml`` is an ``ElementTree.Element`` holding the xml for a + package + - ``zf`` is a ``zipfile.ZipFile`` for the package's contents. + - ``subdir`` is the subdirectory (relative to ``root``) where + the package was found (e.g. 'corpora' or 'grammars'). + """ + from nltk.corpus.reader.util import _path_from + + # Find all packages. + packages = [] + for dirname, subdirs, files in os.walk(root): + relpath = "/".join(_path_from(root, dirname)) + for filename in files: + if filename.endswith(".xml"): + xmlfilename = os.path.join(dirname, filename) + zipfilename = xmlfilename[:-4] + ".zip" + try: + zf = zipfile.ZipFile(zipfilename) + except Exception as e: + raise ValueError(f"Error reading file {zipfilename!r}!\n{e}") from e + try: + pkg_xml = ElementTree.parse(xmlfilename).getroot() + except Exception as e: + raise ValueError(f"Error reading file {xmlfilename!r}!\n{e}") from e + + # Check that the UID matches the filename + uid = os.path.split(xmlfilename[:-4])[1] + if pkg_xml.get("id") != uid: + raise ValueError( + "package identifier mismatch (%s " + "vs %s)" % (pkg_xml.get("id"), uid) + ) + + # Check that the zipfile expands to a subdir whose + # name matches the uid. + if sum( + (name != uid and not name.startswith(uid + "/")) + for name in zf.namelist() + ): + raise ValueError( + "Zipfile %s.zip does not expand to a " + "single subdirectory %s/" % (uid, uid) + ) + + yield pkg_xml, zf, relpath + + elif filename.endswith(".zip"): + # Warn user in case a .xml does not exist for a .zip + resourcename = os.path.splitext(filename)[0] + xmlfilename = os.path.join(dirname, resourcename + ".xml") + if not os.path.exists(xmlfilename): + warnings.warn( + f"{filename} exists, but {resourcename + '.xml'} cannot be found! " + f"This could mean that {resourcename} can not be downloaded.", + stacklevel=2, + ) + + # Don't recurse into svn subdirectories: + try: + subdirs.remove(".svn") + except ValueError: + pass + + +###################################################################### +# Main: +###################################################################### + +# There should be a command-line interface + +# Aliases +_downloader = Downloader() +download = _downloader.download + + +def download_shell(): + DownloaderShell(_downloader).run() + + +def download_gui(): + DownloaderGUI(_downloader).mainloop() + + +def update(): + _downloader.update() + + +if __name__ == "__main__": + from optparse import OptionParser + + parser = OptionParser() + parser.add_option( + "-d", + "--dir", + dest="dir", + help="download package to directory DIR", + metavar="DIR", + ) + parser.add_option( + "-q", + "--quiet", + dest="quiet", + action="store_true", + default=False, + help="work quietly", + ) + parser.add_option( + "-f", + "--force", + dest="force", + action="store_true", + default=False, + help="download even if already installed", + ) + parser.add_option( + "-e", + "--exit-on-error", + dest="halt_on_error", + action="store_true", + default=False, + help="exit if an error occurs", + ) + parser.add_option( + "-u", + "--url", + dest="server_index_url", + default=os.environ.get("NLTK_DOWNLOAD_URL"), + help="download server index url", + ) + + (options, args) = parser.parse_args() + + downloader = Downloader(server_index_url=options.server_index_url) + + if args: + for pkg_id in args: + rv = downloader.download( + info_or_id=pkg_id, + download_dir=options.dir, + quiet=options.quiet, + force=options.force, + halt_on_error=options.halt_on_error, + ) + if rv == False and options.halt_on_error: + break + else: + downloader.download( + download_dir=options.dir, + quiet=options.quiet, + force=options.force, + halt_on_error=options.halt_on_error, + ) diff --git a/lib/python3.10/site-packages/nltk/featstruct.py b/lib/python3.10/site-packages/nltk/featstruct.py new file mode 100644 index 0000000000000000000000000000000000000000..5684f06f51e76070ca6e606722aa1583332429e3 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/featstruct.py @@ -0,0 +1,2779 @@ +# Natural Language Toolkit: Feature Structures +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper , +# Rob Speer, +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Basic data classes for representing feature structures, and for +performing basic operations on those feature structures. A feature +structure is a mapping from feature identifiers to feature values, +where each feature value is either a basic value (such as a string or +an integer), or a nested feature structure. There are two types of +feature structure, implemented by two subclasses of ``FeatStruct``: + + - feature dictionaries, implemented by ``FeatDict``, act like + Python dictionaries. Feature identifiers may be strings or + instances of the ``Feature`` class. + - feature lists, implemented by ``FeatList``, act like Python + lists. Feature identifiers are integers. + +Feature structures are typically used to represent partial information +about objects. A feature identifier that is not mapped to a value +stands for a feature whose value is unknown (*not* a feature without +a value). Two feature structures that represent (potentially +overlapping) information about the same object can be combined by +unification. When two inconsistent feature structures are unified, +the unification fails and returns None. + +Features can be specified using "feature paths", or tuples of feature +identifiers that specify path through the nested feature structures to +a value. Feature structures may contain reentrant feature values. A +"reentrant feature value" is a single feature value that can be +accessed via multiple feature paths. Unification preserves the +reentrance relations imposed by both of the unified feature +structures. In the feature structure resulting from unification, any +modifications to a reentrant feature value will be visible using any +of its feature paths. + +Feature structure variables are encoded using the ``nltk.sem.Variable`` +class. The variables' values are tracked using a bindings +dictionary, which maps variables to their values. When two feature +structures are unified, a fresh bindings dictionary is created to +track their values; and before unification completes, all bound +variables are replaced by their values. Thus, the bindings +dictionaries are usually strictly internal to the unification process. +However, it is possible to track the bindings of variables if you +choose to, by supplying your own initial bindings dictionary to the +``unify()`` function. + +When unbound variables are unified with one another, they become +aliased. This is encoded by binding one variable to the other. + +Lightweight Feature Structures +============================== +Many of the functions defined by ``nltk.featstruct`` can be applied +directly to simple Python dictionaries and lists, rather than to +full-fledged ``FeatDict`` and ``FeatList`` objects. In other words, +Python ``dicts`` and ``lists`` can be used as "light-weight" feature +structures. + + >>> from nltk.featstruct import unify + >>> unify(dict(x=1, y=dict()), dict(a='a', y=dict(b='b'))) # doctest: +SKIP + {'y': {'b': 'b'}, 'x': 1, 'a': 'a'} + +However, you should keep in mind the following caveats: + + - Python dictionaries & lists ignore reentrance when checking for + equality between values. But two FeatStructs with different + reentrances are considered nonequal, even if all their base + values are equal. + + - FeatStructs can be easily frozen, allowing them to be used as + keys in hash tables. Python dictionaries and lists can not. + + - FeatStructs display reentrance in their string representations; + Python dictionaries and lists do not. + + - FeatStructs may *not* be mixed with Python dictionaries and lists + (e.g., when performing unification). + + - FeatStructs provide a number of useful methods, such as ``walk()`` + and ``cyclic()``, which are not available for Python dicts and lists. + +In general, if your feature structures will contain any reentrances, +or if you plan to use them as dictionary keys, it is strongly +recommended that you use full-fledged ``FeatStruct`` objects. +""" + +import copy +import re +from functools import total_ordering + +from nltk.internals import raise_unorderable_types, read_str +from nltk.sem.logic import ( + Expression, + LogicalExpressionException, + LogicParser, + SubstituteBindingsI, + Variable, +) + +###################################################################### +# Feature Structure +###################################################################### + + +@total_ordering +class FeatStruct(SubstituteBindingsI): + """ + A mapping from feature identifiers to feature values, where each + feature value is either a basic value (such as a string or an + integer), or a nested feature structure. There are two types of + feature structure: + + - feature dictionaries, implemented by ``FeatDict``, act like + Python dictionaries. Feature identifiers may be strings or + instances of the ``Feature`` class. + - feature lists, implemented by ``FeatList``, act like Python + lists. Feature identifiers are integers. + + Feature structures may be indexed using either simple feature + identifiers or 'feature paths.' A feature path is a sequence + of feature identifiers that stand for a corresponding sequence of + indexing operations. In particular, ``fstruct[(f1,f2,...,fn)]`` is + equivalent to ``fstruct[f1][f2]...[fn]``. + + Feature structures may contain reentrant feature structures. A + "reentrant feature structure" is a single feature structure + object that can be accessed via multiple feature paths. Feature + structures may also be cyclic. A feature structure is "cyclic" + if there is any feature path from the feature structure to itself. + + Two feature structures are considered equal if they assign the + same values to all features, and have the same reentrancies. + + By default, feature structures are mutable. They may be made + immutable with the ``freeze()`` method. Once they have been + frozen, they may be hashed, and thus used as dictionary keys. + """ + + _frozen = False + """:ivar: A flag indicating whether this feature structure is + frozen or not. Once this flag is set, it should never be + un-set; and no further modification should be made to this + feature structure.""" + + ##//////////////////////////////////////////////////////////// + # { Constructor + ##//////////////////////////////////////////////////////////// + + def __new__(cls, features=None, **morefeatures): + """ + Construct and return a new feature structure. If this + constructor is called directly, then the returned feature + structure will be an instance of either the ``FeatDict`` class + or the ``FeatList`` class. + + :param features: The initial feature values for this feature + structure: + + - FeatStruct(string) -> FeatStructReader().read(string) + - FeatStruct(mapping) -> FeatDict(mapping) + - FeatStruct(sequence) -> FeatList(sequence) + - FeatStruct() -> FeatDict() + :param morefeatures: If ``features`` is a mapping or None, + then ``morefeatures`` provides additional features for the + ``FeatDict`` constructor. + """ + # If the FeatStruct constructor is called directly, then decide + # whether to create a FeatDict or a FeatList, based on the + # contents of the `features` argument. + if cls is FeatStruct: + if features is None: + return FeatDict.__new__(FeatDict, **morefeatures) + elif _is_mapping(features): + return FeatDict.__new__(FeatDict, features, **morefeatures) + elif morefeatures: + raise TypeError( + "Keyword arguments may only be specified " + "if features is None or is a mapping." + ) + if isinstance(features, str): + if FeatStructReader._START_FDICT_RE.match(features): + return FeatDict.__new__(FeatDict, features, **morefeatures) + else: + return FeatList.__new__(FeatList, features, **morefeatures) + elif _is_sequence(features): + return FeatList.__new__(FeatList, features) + else: + raise TypeError("Expected string or mapping or sequence") + + # Otherwise, construct the object as normal. + else: + return super().__new__(cls, features, **morefeatures) + + ##//////////////////////////////////////////////////////////// + # { Uniform Accessor Methods + ##//////////////////////////////////////////////////////////// + # These helper functions allow the methods defined by FeatStruct + # to treat all feature structures as mappings, even if they're + # really lists. (Lists are treated as mappings from ints to vals) + + def _keys(self): + """Return an iterable of the feature identifiers used by this + FeatStruct.""" + raise NotImplementedError() # Implemented by subclasses. + + def _values(self): + """Return an iterable of the feature values directly defined + by this FeatStruct.""" + raise NotImplementedError() # Implemented by subclasses. + + def _items(self): + """Return an iterable of (fid,fval) pairs, where fid is a + feature identifier and fval is the corresponding feature + value, for all features defined by this FeatStruct.""" + raise NotImplementedError() # Implemented by subclasses. + + ##//////////////////////////////////////////////////////////// + # { Equality & Hashing + ##//////////////////////////////////////////////////////////// + + def equal_values(self, other, check_reentrance=False): + """ + Return True if ``self`` and ``other`` assign the same value to + to every feature. In particular, return true if + ``self[p]==other[p]`` for every feature path *p* such + that ``self[p]`` or ``other[p]`` is a base value (i.e., + not a nested feature structure). + + :param check_reentrance: If True, then also return False if + there is any difference between the reentrances of ``self`` + and ``other``. + :note: the ``==`` is equivalent to ``equal_values()`` with + ``check_reentrance=True``. + """ + return self._equal(other, check_reentrance, set(), set(), set()) + + def __eq__(self, other): + """ + Return true if ``self`` and ``other`` are both feature structures, + assign the same values to all features, and contain the same + reentrances. I.e., return + ``self.equal_values(other, check_reentrance=True)``. + + :see: ``equal_values()`` + """ + return self._equal(other, True, set(), set(), set()) + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, FeatStruct): + # raise_unorderable_types("<", self, other) + # Sometimes feature values can be pure strings, + # so we need to be able to compare with non-featstructs: + return self.__class__.__name__ < other.__class__.__name__ + else: + return len(self) < len(other) + + def __hash__(self): + """ + If this feature structure is frozen, return its hash value; + otherwise, raise ``TypeError``. + """ + if not self._frozen: + raise TypeError("FeatStructs must be frozen before they " "can be hashed.") + try: + return self._hash + except AttributeError: + self._hash = self._calculate_hashvalue(set()) + return self._hash + + def _equal( + self, other, check_reentrance, visited_self, visited_other, visited_pairs + ): + """ + Return True iff self and other have equal values. + + :param visited_self: A set containing the ids of all ``self`` + feature structures we've already visited. + :param visited_other: A set containing the ids of all ``other`` + feature structures we've already visited. + :param visited_pairs: A set containing ``(selfid, otherid)`` pairs + for all pairs of feature structures we've already visited. + """ + # If we're the same object, then we're equal. + if self is other: + return True + + # If we have different classes, we're definitely not equal. + if self.__class__ != other.__class__: + return False + + # If we define different features, we're definitely not equal. + # (Perform len test first because it's faster -- we should + # do profiling to see if this actually helps) + if len(self) != len(other): + return False + if set(self._keys()) != set(other._keys()): + return False + + # If we're checking reentrance, then any time we revisit a + # structure, make sure that it was paired with the same + # feature structure that it is now. Note: if check_reentrance, + # then visited_pairs will never contain two pairs whose first + # values are equal, or two pairs whose second values are equal. + if check_reentrance: + if id(self) in visited_self or id(other) in visited_other: + return (id(self), id(other)) in visited_pairs + + # If we're not checking reentrance, then we still need to deal + # with cycles. If we encounter the same (self, other) pair a + # second time, then we won't learn anything more by examining + # their children a second time, so just return true. + else: + if (id(self), id(other)) in visited_pairs: + return True + + # Keep track of which nodes we've visited. + visited_self.add(id(self)) + visited_other.add(id(other)) + visited_pairs.add((id(self), id(other))) + + # Now we have to check all values. If any of them don't match, + # then return false. + for (fname, self_fval) in self._items(): + other_fval = other[fname] + if isinstance(self_fval, FeatStruct): + if not self_fval._equal( + other_fval, + check_reentrance, + visited_self, + visited_other, + visited_pairs, + ): + return False + else: + if self_fval != other_fval: + return False + + # Everything matched up; return true. + return True + + def _calculate_hashvalue(self, visited): + """ + Return a hash value for this feature structure. + + :require: ``self`` must be frozen. + :param visited: A set containing the ids of all feature + structures we've already visited while hashing. + """ + if id(self) in visited: + return 1 + visited.add(id(self)) + + hashval = 5831 + for (fname, fval) in sorted(self._items()): + hashval *= 37 + hashval += hash(fname) + hashval *= 37 + if isinstance(fval, FeatStruct): + hashval += fval._calculate_hashvalue(visited) + else: + hashval += hash(fval) + # Convert to a 32 bit int. + hashval = int(hashval & 0x7FFFFFFF) + return hashval + + ##//////////////////////////////////////////////////////////// + # { Freezing + ##//////////////////////////////////////////////////////////// + + #: Error message used by mutating methods when called on a frozen + #: feature structure. + _FROZEN_ERROR = "Frozen FeatStructs may not be modified." + + def freeze(self): + """ + Make this feature structure, and any feature structures it + contains, immutable. Note: this method does not attempt to + 'freeze' any feature value that is not a ``FeatStruct``; it + is recommended that you use only immutable feature values. + """ + if self._frozen: + return + self._freeze(set()) + + def frozen(self): + """ + Return True if this feature structure is immutable. Feature + structures can be made immutable with the ``freeze()`` method. + Immutable feature structures may not be made mutable again, + but new mutable copies can be produced with the ``copy()`` method. + """ + return self._frozen + + def _freeze(self, visited): + """ + Make this feature structure, and any feature structure it + contains, immutable. + + :param visited: A set containing the ids of all feature + structures we've already visited while freezing. + """ + if id(self) in visited: + return + visited.add(id(self)) + self._frozen = True + for (fname, fval) in sorted(self._items()): + if isinstance(fval, FeatStruct): + fval._freeze(visited) + + ##//////////////////////////////////////////////////////////// + # { Copying + ##//////////////////////////////////////////////////////////// + + def copy(self, deep=True): + """ + Return a new copy of ``self``. The new copy will not be frozen. + + :param deep: If true, create a deep copy; if false, create + a shallow copy. + """ + if deep: + return copy.deepcopy(self) + else: + return self.__class__(self) + + # Subclasses should define __deepcopy__ to ensure that the new + # copy will not be frozen. + def __deepcopy__(self, memo): + raise NotImplementedError() # Implemented by subclasses. + + ##//////////////////////////////////////////////////////////// + # { Structural Information + ##//////////////////////////////////////////////////////////// + + def cyclic(self): + """ + Return True if this feature structure contains itself. + """ + return self._find_reentrances({})[id(self)] + + def walk(self): + """ + Return an iterator that generates this feature structure, and + each feature structure it contains. Each feature structure will + be generated exactly once. + """ + return self._walk(set()) + + def _walk(self, visited): + """ + Return an iterator that generates this feature structure, and + each feature structure it contains. + + :param visited: A set containing the ids of all feature + structures we've already visited while freezing. + """ + raise NotImplementedError() # Implemented by subclasses. + + def _walk(self, visited): + if id(self) in visited: + return + visited.add(id(self)) + yield self + for fval in self._values(): + if isinstance(fval, FeatStruct): + yield from fval._walk(visited) + + # Walk through the feature tree. The first time we see a feature + # value, map it to False (not reentrant). If we see a feature + # value more than once, then map it to True (reentrant). + def _find_reentrances(self, reentrances): + """ + Return a dictionary that maps from the ``id`` of each feature + structure contained in ``self`` (including ``self``) to a + boolean value, indicating whether it is reentrant or not. + """ + if id(self) in reentrances: + # We've seen it more than once. + reentrances[id(self)] = True + else: + # This is the first time we've seen it. + reentrances[id(self)] = False + + # Recurse to contained feature structures. + for fval in self._values(): + if isinstance(fval, FeatStruct): + fval._find_reentrances(reentrances) + + return reentrances + + ##//////////////////////////////////////////////////////////// + # { Variables & Bindings + ##//////////////////////////////////////////////////////////// + + def substitute_bindings(self, bindings): + """:see: ``nltk.featstruct.substitute_bindings()``""" + return substitute_bindings(self, bindings) + + def retract_bindings(self, bindings): + """:see: ``nltk.featstruct.retract_bindings()``""" + return retract_bindings(self, bindings) + + def variables(self): + """:see: ``nltk.featstruct.find_variables()``""" + return find_variables(self) + + def rename_variables(self, vars=None, used_vars=(), new_vars=None): + """:see: ``nltk.featstruct.rename_variables()``""" + return rename_variables(self, vars, used_vars, new_vars) + + def remove_variables(self): + """ + Return the feature structure that is obtained by deleting + any feature whose value is a ``Variable``. + + :rtype: FeatStruct + """ + return remove_variables(self) + + ##//////////////////////////////////////////////////////////// + # { Unification + ##//////////////////////////////////////////////////////////// + + def unify(self, other, bindings=None, trace=False, fail=None, rename_vars=True): + return unify(self, other, bindings, trace, fail, rename_vars) + + def subsumes(self, other): + """ + Return True if ``self`` subsumes ``other``. I.e., return true + If unifying ``self`` with ``other`` would result in a feature + structure equal to ``other``. + """ + return subsumes(self, other) + + ##//////////////////////////////////////////////////////////// + # { String Representations + ##//////////////////////////////////////////////////////////// + + def __repr__(self): + """ + Display a single-line representation of this feature structure, + suitable for embedding in other representations. + """ + return self._repr(self._find_reentrances({}), {}) + + def _repr(self, reentrances, reentrance_ids): + """ + Return a string representation of this feature structure. + + :param reentrances: A dictionary that maps from the ``id`` of + each feature value in self, indicating whether that value + is reentrant or not. + :param reentrance_ids: A dictionary mapping from each ``id`` + of a feature value to a unique identifier. This is modified + by ``repr``: the first time a reentrant feature value is + displayed, an identifier is added to ``reentrance_ids`` for it. + """ + raise NotImplementedError() + + +# Mutation: disable if frozen. +_FROZEN_ERROR = "Frozen FeatStructs may not be modified." +_FROZEN_NOTICE = "\n%sIf self is frozen, raise ValueError." + + +def _check_frozen(method, indent=""): + """ + Given a method function, return a new method function that first + checks if ``self._frozen`` is true; and if so, raises ``ValueError`` + with an appropriate message. Otherwise, call the method and return + its result. + """ + + def wrapped(self, *args, **kwargs): + if self._frozen: + raise ValueError(_FROZEN_ERROR) + else: + return method(self, *args, **kwargs) + + wrapped.__name__ = method.__name__ + wrapped.__doc__ = (method.__doc__ or "") + (_FROZEN_NOTICE % indent) + return wrapped + + +###################################################################### +# Feature Dictionary +###################################################################### + + +class FeatDict(FeatStruct, dict): + """ + A feature structure that acts like a Python dictionary. I.e., a + mapping from feature identifiers to feature values, where a feature + identifier can be a string or a ``Feature``; and where a feature value + can be either a basic value (such as a string or an integer), or a nested + feature structure. A feature identifiers for a ``FeatDict`` is + sometimes called a "feature name". + + Two feature dicts are considered equal if they assign the same + values to all features, and have the same reentrances. + + :see: ``FeatStruct`` for information about feature paths, reentrance, + cyclic feature structures, mutability, freezing, and hashing. + """ + + def __init__(self, features=None, **morefeatures): + """ + Create a new feature dictionary, with the specified features. + + :param features: The initial value for this feature + dictionary. If ``features`` is a ``FeatStruct``, then its + features are copied (shallow copy). If ``features`` is a + dict, then a feature is created for each item, mapping its + key to its value. If ``features`` is a string, then it is + processed using ``FeatStructReader``. If ``features`` is a list of + tuples ``(name, val)``, then a feature is created for each tuple. + :param morefeatures: Additional features for the new feature + dictionary. If a feature is listed under both ``features`` and + ``morefeatures``, then the value from ``morefeatures`` will be + used. + """ + if isinstance(features, str): + FeatStructReader().fromstring(features, self) + self.update(**morefeatures) + else: + # update() checks the types of features. + self.update(features, **morefeatures) + + # //////////////////////////////////////////////////////////// + # { Dict methods + # //////////////////////////////////////////////////////////// + _INDEX_ERROR = "Expected feature name or path. Got %r." + + def __getitem__(self, name_or_path): + """If the feature with the given name or path exists, return + its value; otherwise, raise ``KeyError``.""" + if isinstance(name_or_path, (str, Feature)): + return dict.__getitem__(self, name_or_path) + elif isinstance(name_or_path, tuple): + try: + val = self + for fid in name_or_path: + if not isinstance(val, FeatStruct): + raise KeyError # path contains base value + val = val[fid] + return val + except (KeyError, IndexError) as e: + raise KeyError(name_or_path) from e + else: + raise TypeError(self._INDEX_ERROR % name_or_path) + + def get(self, name_or_path, default=None): + """If the feature with the given name or path exists, return its + value; otherwise, return ``default``.""" + try: + return self[name_or_path] + except KeyError: + return default + + def __contains__(self, name_or_path): + """Return true if a feature with the given name or path exists.""" + try: + self[name_or_path] + return True + except KeyError: + return False + + def has_key(self, name_or_path): + """Return true if a feature with the given name or path exists.""" + return name_or_path in self + + def __delitem__(self, name_or_path): + """If the feature with the given name or path exists, delete + its value; otherwise, raise ``KeyError``.""" + if self._frozen: + raise ValueError(_FROZEN_ERROR) + if isinstance(name_or_path, (str, Feature)): + return dict.__delitem__(self, name_or_path) + elif isinstance(name_or_path, tuple): + if len(name_or_path) == 0: + raise ValueError("The path () can not be set") + else: + parent = self[name_or_path[:-1]] + if not isinstance(parent, FeatStruct): + raise KeyError(name_or_path) # path contains base value + del parent[name_or_path[-1]] + else: + raise TypeError(self._INDEX_ERROR % name_or_path) + + def __setitem__(self, name_or_path, value): + """Set the value for the feature with the given name or path + to ``value``. If ``name_or_path`` is an invalid path, raise + ``KeyError``.""" + if self._frozen: + raise ValueError(_FROZEN_ERROR) + if isinstance(name_or_path, (str, Feature)): + return dict.__setitem__(self, name_or_path, value) + elif isinstance(name_or_path, tuple): + if len(name_or_path) == 0: + raise ValueError("The path () can not be set") + else: + parent = self[name_or_path[:-1]] + if not isinstance(parent, FeatStruct): + raise KeyError(name_or_path) # path contains base value + parent[name_or_path[-1]] = value + else: + raise TypeError(self._INDEX_ERROR % name_or_path) + + clear = _check_frozen(dict.clear) + pop = _check_frozen(dict.pop) + popitem = _check_frozen(dict.popitem) + setdefault = _check_frozen(dict.setdefault) + + def update(self, features=None, **morefeatures): + if self._frozen: + raise ValueError(_FROZEN_ERROR) + if features is None: + items = () + elif hasattr(features, "items") and callable(features.items): + items = features.items() + elif hasattr(features, "__iter__"): + items = features + else: + raise ValueError("Expected mapping or list of tuples") + + for key, val in items: + if not isinstance(key, (str, Feature)): + raise TypeError("Feature names must be strings") + self[key] = val + for key, val in morefeatures.items(): + if not isinstance(key, (str, Feature)): + raise TypeError("Feature names must be strings") + self[key] = val + + ##//////////////////////////////////////////////////////////// + # { Copying + ##//////////////////////////////////////////////////////////// + + def __deepcopy__(self, memo): + memo[id(self)] = selfcopy = self.__class__() + for (key, val) in self._items(): + selfcopy[copy.deepcopy(key, memo)] = copy.deepcopy(val, memo) + return selfcopy + + ##//////////////////////////////////////////////////////////// + # { Uniform Accessor Methods + ##//////////////////////////////////////////////////////////// + + def _keys(self): + return self.keys() + + def _values(self): + return self.values() + + def _items(self): + return self.items() + + ##//////////////////////////////////////////////////////////// + # { String Representations + ##//////////////////////////////////////////////////////////// + + def __str__(self): + """ + Display a multi-line representation of this feature dictionary + as an FVM (feature value matrix). + """ + return "\n".join(self._str(self._find_reentrances({}), {})) + + def _repr(self, reentrances, reentrance_ids): + segments = [] + prefix = "" + suffix = "" + + # If this is the first time we've seen a reentrant structure, + # then assign it a unique identifier. + if reentrances[id(self)]: + assert id(self) not in reentrance_ids + reentrance_ids[id(self)] = repr(len(reentrance_ids) + 1) + + # sorting note: keys are unique strings, so we'll never fall + # through to comparing values. + for (fname, fval) in sorted(self.items()): + display = getattr(fname, "display", None) + if id(fval) in reentrance_ids: + segments.append(f"{fname}->({reentrance_ids[id(fval)]})") + elif ( + display == "prefix" and not prefix and isinstance(fval, (Variable, str)) + ): + prefix = "%s" % fval + elif display == "slash" and not suffix: + if isinstance(fval, Variable): + suffix = "/%s" % fval.name + else: + suffix = "/%s" % repr(fval) + elif isinstance(fval, Variable): + segments.append(f"{fname}={fval.name}") + elif fval is True: + segments.append("+%s" % fname) + elif fval is False: + segments.append("-%s" % fname) + elif isinstance(fval, Expression): + segments.append(f"{fname}=<{fval}>") + elif not isinstance(fval, FeatStruct): + segments.append(f"{fname}={repr(fval)}") + else: + fval_repr = fval._repr(reentrances, reentrance_ids) + segments.append(f"{fname}={fval_repr}") + # If it's reentrant, then add on an identifier tag. + if reentrances[id(self)]: + prefix = f"({reentrance_ids[id(self)]}){prefix}" + return "{}[{}]{}".format(prefix, ", ".join(segments), suffix) + + def _str(self, reentrances, reentrance_ids): + """ + :return: A list of lines composing a string representation of + this feature dictionary. + :param reentrances: A dictionary that maps from the ``id`` of + each feature value in self, indicating whether that value + is reentrant or not. + :param reentrance_ids: A dictionary mapping from each ``id`` + of a feature value to a unique identifier. This is modified + by ``repr``: the first time a reentrant feature value is + displayed, an identifier is added to ``reentrance_ids`` for + it. + """ + # If this is the first time we've seen a reentrant structure, + # then tack on an id string. + if reentrances[id(self)]: + assert id(self) not in reentrance_ids + reentrance_ids[id(self)] = repr(len(reentrance_ids) + 1) + + # Special case: empty feature dict. + if len(self) == 0: + if reentrances[id(self)]: + return ["(%s) []" % reentrance_ids[id(self)]] + else: + return ["[]"] + + # What's the longest feature name? Use this to align names. + maxfnamelen = max(len("%s" % k) for k in self.keys()) + + lines = [] + # sorting note: keys are unique strings, so we'll never fall + # through to comparing values. + for (fname, fval) in sorted(self.items()): + fname = ("%s" % fname).ljust(maxfnamelen) + if isinstance(fval, Variable): + lines.append(f"{fname} = {fval.name}") + + elif isinstance(fval, Expression): + lines.append(f"{fname} = <{fval}>") + + elif isinstance(fval, FeatList): + fval_repr = fval._repr(reentrances, reentrance_ids) + lines.append(f"{fname} = {repr(fval_repr)}") + + elif not isinstance(fval, FeatDict): + # It's not a nested feature structure -- just print it. + lines.append(f"{fname} = {repr(fval)}") + + elif id(fval) in reentrance_ids: + # It's a feature structure we've seen before -- print + # the reentrance id. + lines.append(f"{fname} -> ({reentrance_ids[id(fval)]})") + + else: + # It's a new feature structure. Separate it from + # other values by a blank line. + if lines and lines[-1] != "": + lines.append("") + + # Recursively print the feature's value (fval). + fval_lines = fval._str(reentrances, reentrance_ids) + + # Indent each line to make room for fname. + fval_lines = [(" " * (maxfnamelen + 3)) + l for l in fval_lines] + + # Pick which line we'll display fname on, & splice it in. + nameline = (len(fval_lines) - 1) // 2 + fval_lines[nameline] = ( + fname + " =" + fval_lines[nameline][maxfnamelen + 2 :] + ) + + # Add the feature structure to the output. + lines += fval_lines + + # Separate FeatStructs by a blank line. + lines.append("") + + # Get rid of any excess blank lines. + if lines[-1] == "": + lines.pop() + + # Add brackets around everything. + maxlen = max(len(line) for line in lines) + lines = ["[ {}{} ]".format(line, " " * (maxlen - len(line))) for line in lines] + + # If it's reentrant, then add on an identifier tag. + if reentrances[id(self)]: + idstr = "(%s) " % reentrance_ids[id(self)] + lines = [(" " * len(idstr)) + l for l in lines] + idline = (len(lines) - 1) // 2 + lines[idline] = idstr + lines[idline][len(idstr) :] + + return lines + + +###################################################################### +# Feature List +###################################################################### + + +class FeatList(FeatStruct, list): + """ + A list of feature values, where each feature value is either a + basic value (such as a string or an integer), or a nested feature + structure. + + Feature lists may contain reentrant feature values. A "reentrant + feature value" is a single feature value that can be accessed via + multiple feature paths. Feature lists may also be cyclic. + + Two feature lists are considered equal if they assign the same + values to all features, and have the same reentrances. + + :see: ``FeatStruct`` for information about feature paths, reentrance, + cyclic feature structures, mutability, freezing, and hashing. + """ + + def __init__(self, features=()): + """ + Create a new feature list, with the specified features. + + :param features: The initial list of features for this feature + list. If ``features`` is a string, then it is paresd using + ``FeatStructReader``. Otherwise, it should be a sequence + of basic values and nested feature structures. + """ + if isinstance(features, str): + FeatStructReader().fromstring(features, self) + else: + list.__init__(self, features) + + # //////////////////////////////////////////////////////////// + # { List methods + # //////////////////////////////////////////////////////////// + _INDEX_ERROR = "Expected int or feature path. Got %r." + + def __getitem__(self, name_or_path): + if isinstance(name_or_path, int): + return list.__getitem__(self, name_or_path) + elif isinstance(name_or_path, tuple): + try: + val = self + for fid in name_or_path: + if not isinstance(val, FeatStruct): + raise KeyError # path contains base value + val = val[fid] + return val + except (KeyError, IndexError) as e: + raise KeyError(name_or_path) from e + else: + raise TypeError(self._INDEX_ERROR % name_or_path) + + def __delitem__(self, name_or_path): + """If the feature with the given name or path exists, delete + its value; otherwise, raise ``KeyError``.""" + if self._frozen: + raise ValueError(_FROZEN_ERROR) + if isinstance(name_or_path, (int, slice)): + return list.__delitem__(self, name_or_path) + elif isinstance(name_or_path, tuple): + if len(name_or_path) == 0: + raise ValueError("The path () can not be set") + else: + parent = self[name_or_path[:-1]] + if not isinstance(parent, FeatStruct): + raise KeyError(name_or_path) # path contains base value + del parent[name_or_path[-1]] + else: + raise TypeError(self._INDEX_ERROR % name_or_path) + + def __setitem__(self, name_or_path, value): + """Set the value for the feature with the given name or path + to ``value``. If ``name_or_path`` is an invalid path, raise + ``KeyError``.""" + if self._frozen: + raise ValueError(_FROZEN_ERROR) + if isinstance(name_or_path, (int, slice)): + return list.__setitem__(self, name_or_path, value) + elif isinstance(name_or_path, tuple): + if len(name_or_path) == 0: + raise ValueError("The path () can not be set") + else: + parent = self[name_or_path[:-1]] + if not isinstance(parent, FeatStruct): + raise KeyError(name_or_path) # path contains base value + parent[name_or_path[-1]] = value + else: + raise TypeError(self._INDEX_ERROR % name_or_path) + + # __delslice__ = _check_frozen(list.__delslice__, ' ') + # __setslice__ = _check_frozen(list.__setslice__, ' ') + __iadd__ = _check_frozen(list.__iadd__) + __imul__ = _check_frozen(list.__imul__) + append = _check_frozen(list.append) + extend = _check_frozen(list.extend) + insert = _check_frozen(list.insert) + pop = _check_frozen(list.pop) + remove = _check_frozen(list.remove) + reverse = _check_frozen(list.reverse) + sort = _check_frozen(list.sort) + + ##//////////////////////////////////////////////////////////// + # { Copying + ##//////////////////////////////////////////////////////////// + + def __deepcopy__(self, memo): + memo[id(self)] = selfcopy = self.__class__() + selfcopy.extend(copy.deepcopy(fval, memo) for fval in self) + return selfcopy + + ##//////////////////////////////////////////////////////////// + # { Uniform Accessor Methods + ##//////////////////////////////////////////////////////////// + + def _keys(self): + return list(range(len(self))) + + def _values(self): + return self + + def _items(self): + return enumerate(self) + + ##//////////////////////////////////////////////////////////// + # { String Representations + ##//////////////////////////////////////////////////////////// + + # Special handling for: reentrances, variables, expressions. + def _repr(self, reentrances, reentrance_ids): + # If this is the first time we've seen a reentrant structure, + # then assign it a unique identifier. + if reentrances[id(self)]: + assert id(self) not in reentrance_ids + reentrance_ids[id(self)] = repr(len(reentrance_ids) + 1) + prefix = "(%s)" % reentrance_ids[id(self)] + else: + prefix = "" + + segments = [] + for fval in self: + if id(fval) in reentrance_ids: + segments.append("->(%s)" % reentrance_ids[id(fval)]) + elif isinstance(fval, Variable): + segments.append(fval.name) + elif isinstance(fval, Expression): + segments.append("%s" % fval) + elif isinstance(fval, FeatStruct): + segments.append(fval._repr(reentrances, reentrance_ids)) + else: + segments.append("%s" % repr(fval)) + + return "{}[{}]".format(prefix, ", ".join(segments)) + + +###################################################################### +# Variables & Bindings +###################################################################### + + +def substitute_bindings(fstruct, bindings, fs_class="default"): + """ + Return the feature structure that is obtained by replacing each + variable bound by ``bindings`` with its binding. If a variable is + aliased to a bound variable, then it will be replaced by that + variable's value. If a variable is aliased to an unbound + variable, then it will be replaced by that variable. + + :type bindings: dict(Variable -> any) + :param bindings: A dictionary mapping from variables to values. + """ + if fs_class == "default": + fs_class = _default_fs_class(fstruct) + fstruct = copy.deepcopy(fstruct) + _substitute_bindings(fstruct, bindings, fs_class, set()) + return fstruct + + +def _substitute_bindings(fstruct, bindings, fs_class, visited): + # Visit each node only once: + if id(fstruct) in visited: + return + visited.add(id(fstruct)) + + if _is_mapping(fstruct): + items = fstruct.items() + elif _is_sequence(fstruct): + items = enumerate(fstruct) + else: + raise ValueError("Expected mapping or sequence") + for (fname, fval) in items: + while isinstance(fval, Variable) and fval in bindings: + fval = fstruct[fname] = bindings[fval] + if isinstance(fval, fs_class): + _substitute_bindings(fval, bindings, fs_class, visited) + elif isinstance(fval, SubstituteBindingsI): + fstruct[fname] = fval.substitute_bindings(bindings) + + +def retract_bindings(fstruct, bindings, fs_class="default"): + """ + Return the feature structure that is obtained by replacing each + feature structure value that is bound by ``bindings`` with the + variable that binds it. A feature structure value must be + identical to a bound value (i.e., have equal id) to be replaced. + + ``bindings`` is modified to point to this new feature structure, + rather than the original feature structure. Feature structure + values in ``bindings`` may be modified if they are contained in + ``fstruct``. + """ + if fs_class == "default": + fs_class = _default_fs_class(fstruct) + (fstruct, new_bindings) = copy.deepcopy((fstruct, bindings)) + bindings.update(new_bindings) + inv_bindings = {id(val): var for (var, val) in bindings.items()} + _retract_bindings(fstruct, inv_bindings, fs_class, set()) + return fstruct + + +def _retract_bindings(fstruct, inv_bindings, fs_class, visited): + # Visit each node only once: + if id(fstruct) in visited: + return + visited.add(id(fstruct)) + + if _is_mapping(fstruct): + items = fstruct.items() + elif _is_sequence(fstruct): + items = enumerate(fstruct) + else: + raise ValueError("Expected mapping or sequence") + for (fname, fval) in items: + if isinstance(fval, fs_class): + if id(fval) in inv_bindings: + fstruct[fname] = inv_bindings[id(fval)] + _retract_bindings(fval, inv_bindings, fs_class, visited) + + +def find_variables(fstruct, fs_class="default"): + """ + :return: The set of variables used by this feature structure. + :rtype: set(Variable) + """ + if fs_class == "default": + fs_class = _default_fs_class(fstruct) + return _variables(fstruct, set(), fs_class, set()) + + +def _variables(fstruct, vars, fs_class, visited): + # Visit each node only once: + if id(fstruct) in visited: + return + visited.add(id(fstruct)) + if _is_mapping(fstruct): + items = fstruct.items() + elif _is_sequence(fstruct): + items = enumerate(fstruct) + else: + raise ValueError("Expected mapping or sequence") + for (fname, fval) in items: + if isinstance(fval, Variable): + vars.add(fval) + elif isinstance(fval, fs_class): + _variables(fval, vars, fs_class, visited) + elif isinstance(fval, SubstituteBindingsI): + vars.update(fval.variables()) + return vars + + +def rename_variables( + fstruct, vars=None, used_vars=(), new_vars=None, fs_class="default" +): + """ + Return the feature structure that is obtained by replacing + any of this feature structure's variables that are in ``vars`` + with new variables. The names for these new variables will be + names that are not used by any variable in ``vars``, or in + ``used_vars``, or in this feature structure. + + :type vars: set + :param vars: The set of variables that should be renamed. + If not specified, ``find_variables(fstruct)`` is used; i.e., all + variables will be given new names. + :type used_vars: set + :param used_vars: A set of variables whose names should not be + used by the new variables. + :type new_vars: dict(Variable -> Variable) + :param new_vars: A dictionary that is used to hold the mapping + from old variables to new variables. For each variable *v* + in this feature structure: + + - If ``new_vars`` maps *v* to *v'*, then *v* will be + replaced by *v'*. + - If ``new_vars`` does not contain *v*, but ``vars`` + does contain *v*, then a new entry will be added to + ``new_vars``, mapping *v* to the new variable that is used + to replace it. + + To consistently rename the variables in a set of feature + structures, simply apply rename_variables to each one, using + the same dictionary: + + >>> from nltk.featstruct import FeatStruct + >>> fstruct1 = FeatStruct('[subj=[agr=[gender=?y]], obj=[agr=[gender=?y]]]') + >>> fstruct2 = FeatStruct('[subj=[agr=[number=?z,gender=?y]], obj=[agr=[number=?z,gender=?y]]]') + >>> new_vars = {} # Maps old vars to alpha-renamed vars + >>> fstruct1.rename_variables(new_vars=new_vars) + [obj=[agr=[gender=?y2]], subj=[agr=[gender=?y2]]] + >>> fstruct2.rename_variables(new_vars=new_vars) + [obj=[agr=[gender=?y2, number=?z2]], subj=[agr=[gender=?y2, number=?z2]]] + + If new_vars is not specified, then an empty dictionary is used. + """ + if fs_class == "default": + fs_class = _default_fs_class(fstruct) + + # Default values: + if new_vars is None: + new_vars = {} + if vars is None: + vars = find_variables(fstruct, fs_class) + else: + vars = set(vars) + + # Add our own variables to used_vars. + used_vars = find_variables(fstruct, fs_class).union(used_vars) + + # Copy ourselves, and rename variables in the copy. + return _rename_variables( + copy.deepcopy(fstruct), vars, used_vars, new_vars, fs_class, set() + ) + + +def _rename_variables(fstruct, vars, used_vars, new_vars, fs_class, visited): + if id(fstruct) in visited: + return + visited.add(id(fstruct)) + if _is_mapping(fstruct): + items = fstruct.items() + elif _is_sequence(fstruct): + items = enumerate(fstruct) + else: + raise ValueError("Expected mapping or sequence") + for (fname, fval) in items: + if isinstance(fval, Variable): + # If it's in new_vars, then rebind it. + if fval in new_vars: + fstruct[fname] = new_vars[fval] + # If it's in vars, pick a new name for it. + elif fval in vars: + new_vars[fval] = _rename_variable(fval, used_vars) + fstruct[fname] = new_vars[fval] + used_vars.add(new_vars[fval]) + elif isinstance(fval, fs_class): + _rename_variables(fval, vars, used_vars, new_vars, fs_class, visited) + elif isinstance(fval, SubstituteBindingsI): + # Pick new names for any variables in `vars` + for var in fval.variables(): + if var in vars and var not in new_vars: + new_vars[var] = _rename_variable(var, used_vars) + used_vars.add(new_vars[var]) + # Replace all variables in `new_vars`. + fstruct[fname] = fval.substitute_bindings(new_vars) + return fstruct + + +def _rename_variable(var, used_vars): + name, n = re.sub(r"\d+$", "", var.name), 2 + if not name: + name = "?" + while Variable(f"{name}{n}") in used_vars: + n += 1 + return Variable(f"{name}{n}") + + +def remove_variables(fstruct, fs_class="default"): + """ + :rtype: FeatStruct + :return: The feature structure that is obtained by deleting + all features whose values are ``Variables``. + """ + if fs_class == "default": + fs_class = _default_fs_class(fstruct) + return _remove_variables(copy.deepcopy(fstruct), fs_class, set()) + + +def _remove_variables(fstruct, fs_class, visited): + if id(fstruct) in visited: + return + visited.add(id(fstruct)) + + if _is_mapping(fstruct): + items = list(fstruct.items()) + elif _is_sequence(fstruct): + items = list(enumerate(fstruct)) + else: + raise ValueError("Expected mapping or sequence") + + for (fname, fval) in items: + if isinstance(fval, Variable): + del fstruct[fname] + elif isinstance(fval, fs_class): + _remove_variables(fval, fs_class, visited) + return fstruct + + +###################################################################### +# Unification +###################################################################### + + +class _UnificationFailure: + def __repr__(self): + return "nltk.featstruct.UnificationFailure" + + +UnificationFailure = _UnificationFailure() +"""A unique value used to indicate unification failure. It can be + returned by ``Feature.unify_base_values()`` or by custom ``fail()`` + functions to indicate that unificaiton should fail.""" + + +# The basic unification algorithm: +# 1. Make copies of self and other (preserving reentrance) +# 2. Destructively unify self and other +# 3. Apply forward pointers, to preserve reentrance. +# 4. Replace bound variables with their values. +def unify( + fstruct1, + fstruct2, + bindings=None, + trace=False, + fail=None, + rename_vars=True, + fs_class="default", +): + """ + Unify ``fstruct1`` with ``fstruct2``, and return the resulting feature + structure. This unified feature structure is the minimal + feature structure that contains all feature value assignments from both + ``fstruct1`` and ``fstruct2``, and that preserves all reentrancies. + + If no such feature structure exists (because ``fstruct1`` and + ``fstruct2`` specify incompatible values for some feature), then + unification fails, and ``unify`` returns None. + + Bound variables are replaced by their values. Aliased + variables are replaced by their representative variable + (if unbound) or the value of their representative variable + (if bound). I.e., if variable *v* is in ``bindings``, + then *v* is replaced by ``bindings[v]``. This will + be repeated until the variable is replaced by an unbound + variable or a non-variable value. + + Unbound variables are bound when they are unified with + values; and aliased when they are unified with variables. + I.e., if variable *v* is not in ``bindings``, and is + unified with a variable or value *x*, then + ``bindings[v]`` is set to *x*. + + If ``bindings`` is unspecified, then all variables are + assumed to be unbound. I.e., ``bindings`` defaults to an + empty dict. + + >>> from nltk.featstruct import FeatStruct + >>> FeatStruct('[a=?x]').unify(FeatStruct('[b=?x]')) + [a=?x, b=?x2] + + :type bindings: dict(Variable -> any) + :param bindings: A set of variable bindings to be used and + updated during unification. + :type trace: bool + :param trace: If true, generate trace output. + :type rename_vars: bool + :param rename_vars: If True, then rename any variables in + ``fstruct2`` that are also used in ``fstruct1``, in order to + avoid collisions on variable names. + """ + # Decide which class(es) will be treated as feature structures, + # for the purposes of unification. + if fs_class == "default": + fs_class = _default_fs_class(fstruct1) + if _default_fs_class(fstruct2) != fs_class: + raise ValueError( + "Mixing FeatStruct objects with Python " + "dicts and lists is not supported." + ) + assert isinstance(fstruct1, fs_class) + assert isinstance(fstruct2, fs_class) + + # If bindings are unspecified, use an empty set of bindings. + user_bindings = bindings is not None + if bindings is None: + bindings = {} + + # Make copies of fstruct1 and fstruct2 (since the unification + # algorithm is destructive). Do it all at once, to preserve + # reentrance links between fstruct1 and fstruct2. Copy bindings + # as well, in case there are any bound vars that contain parts + # of fstruct1 or fstruct2. + (fstruct1copy, fstruct2copy, bindings_copy) = copy.deepcopy( + (fstruct1, fstruct2, bindings) + ) + + # Copy the bindings back to the original bindings dict. + bindings.update(bindings_copy) + + if rename_vars: + vars1 = find_variables(fstruct1copy, fs_class) + vars2 = find_variables(fstruct2copy, fs_class) + _rename_variables(fstruct2copy, vars1, vars2, {}, fs_class, set()) + + # Do the actual unification. If it fails, return None. + forward = {} + if trace: + _trace_unify_start((), fstruct1copy, fstruct2copy) + try: + result = _destructively_unify( + fstruct1copy, fstruct2copy, bindings, forward, trace, fail, fs_class, () + ) + except _UnificationFailureError: + return None + + # _destructively_unify might return UnificationFailure, e.g. if we + # tried to unify a mapping with a sequence. + if result is UnificationFailure: + if fail is None: + return None + else: + return fail(fstruct1copy, fstruct2copy, ()) + + # Replace any feature structure that has a forward pointer + # with the target of its forward pointer. + result = _apply_forwards(result, forward, fs_class, set()) + if user_bindings: + _apply_forwards_to_bindings(forward, bindings) + + # Replace bound vars with values. + _resolve_aliases(bindings) + _substitute_bindings(result, bindings, fs_class, set()) + + # Return the result. + if trace: + _trace_unify_succeed((), result) + if trace: + _trace_bindings((), bindings) + return result + + +class _UnificationFailureError(Exception): + """An exception that is used by ``_destructively_unify`` to abort + unification when a failure is encountered.""" + + +def _destructively_unify( + fstruct1, fstruct2, bindings, forward, trace, fail, fs_class, path +): + """ + Attempt to unify ``fstruct1`` and ``fstruct2`` by modifying them + in-place. If the unification succeeds, then ``fstruct1`` will + contain the unified value, the value of ``fstruct2`` is undefined, + and forward[id(fstruct2)] is set to fstruct1. If the unification + fails, then a _UnificationFailureError is raised, and the + values of ``fstruct1`` and ``fstruct2`` are undefined. + + :param bindings: A dictionary mapping variables to values. + :param forward: A dictionary mapping feature structures ids + to replacement structures. When two feature structures + are merged, a mapping from one to the other will be added + to the forward dictionary; and changes will be made only + to the target of the forward dictionary. + ``_destructively_unify`` will always 'follow' any links + in the forward dictionary for fstruct1 and fstruct2 before + actually unifying them. + :param trace: If true, generate trace output + :param path: The feature path that led us to this unification + step. Used for trace output. + """ + # If fstruct1 is already identical to fstruct2, we're done. + # Note: this, together with the forward pointers, ensures + # that unification will terminate even for cyclic structures. + if fstruct1 is fstruct2: + if trace: + _trace_unify_identity(path, fstruct1) + return fstruct1 + + # Set fstruct2's forward pointer to point to fstruct1; this makes + # fstruct1 the canonical copy for fstruct2. Note that we need to + # do this before we recurse into any child structures, in case + # they're cyclic. + forward[id(fstruct2)] = fstruct1 + + # Unifying two mappings: + if _is_mapping(fstruct1) and _is_mapping(fstruct2): + for fname in fstruct1: + if getattr(fname, "default", None) is not None: + fstruct2.setdefault(fname, fname.default) + for fname in fstruct2: + if getattr(fname, "default", None) is not None: + fstruct1.setdefault(fname, fname.default) + + # Unify any values that are defined in both fstruct1 and + # fstruct2. Copy any values that are defined in fstruct2 but + # not in fstruct1 to fstruct1. Note: sorting fstruct2's + # features isn't actually necessary; but we do it to give + # deterministic behavior, e.g. for tracing. + for fname, fval2 in sorted(fstruct2.items()): + if fname in fstruct1: + fstruct1[fname] = _unify_feature_values( + fname, + fstruct1[fname], + fval2, + bindings, + forward, + trace, + fail, + fs_class, + path + (fname,), + ) + else: + fstruct1[fname] = fval2 + + return fstruct1 # Contains the unified value. + + # Unifying two sequences: + elif _is_sequence(fstruct1) and _is_sequence(fstruct2): + # If the lengths don't match, fail. + if len(fstruct1) != len(fstruct2): + return UnificationFailure + + # Unify corresponding values in fstruct1 and fstruct2. + for findex in range(len(fstruct1)): + fstruct1[findex] = _unify_feature_values( + findex, + fstruct1[findex], + fstruct2[findex], + bindings, + forward, + trace, + fail, + fs_class, + path + (findex,), + ) + + return fstruct1 # Contains the unified value. + + # Unifying sequence & mapping: fail. The failure function + # doesn't get a chance to recover in this case. + elif (_is_sequence(fstruct1) or _is_mapping(fstruct1)) and ( + _is_sequence(fstruct2) or _is_mapping(fstruct2) + ): + return UnificationFailure + + # Unifying anything else: not allowed! + raise TypeError("Expected mappings or sequences") + + +def _unify_feature_values( + fname, fval1, fval2, bindings, forward, trace, fail, fs_class, fpath +): + """ + Attempt to unify ``fval1`` and and ``fval2``, and return the + resulting unified value. The method of unification will depend on + the types of ``fval1`` and ``fval2``: + + 1. If they're both feature structures, then destructively + unify them (see ``_destructively_unify()``. + 2. If they're both unbound variables, then alias one variable + to the other (by setting bindings[v2]=v1). + 3. If one is an unbound variable, and the other is a value, + then bind the unbound variable to the value. + 4. If one is a feature structure, and the other is a base value, + then fail. + 5. If they're both base values, then unify them. By default, + this will succeed if they are equal, and fail otherwise. + """ + if trace: + _trace_unify_start(fpath, fval1, fval2) + + # Look up the "canonical" copy of fval1 and fval2 + while id(fval1) in forward: + fval1 = forward[id(fval1)] + while id(fval2) in forward: + fval2 = forward[id(fval2)] + + # If fval1 or fval2 is a bound variable, then + # replace it by the variable's bound value. This + # includes aliased variables, which are encoded as + # variables bound to other variables. + fvar1 = fvar2 = None + while isinstance(fval1, Variable) and fval1 in bindings: + fvar1 = fval1 + fval1 = bindings[fval1] + while isinstance(fval2, Variable) and fval2 in bindings: + fvar2 = fval2 + fval2 = bindings[fval2] + + # Case 1: Two feature structures (recursive case) + if isinstance(fval1, fs_class) and isinstance(fval2, fs_class): + result = _destructively_unify( + fval1, fval2, bindings, forward, trace, fail, fs_class, fpath + ) + + # Case 2: Two unbound variables (create alias) + elif isinstance(fval1, Variable) and isinstance(fval2, Variable): + if fval1 != fval2: + bindings[fval2] = fval1 + result = fval1 + + # Case 3: An unbound variable and a value (bind) + elif isinstance(fval1, Variable): + bindings[fval1] = fval2 + result = fval1 + elif isinstance(fval2, Variable): + bindings[fval2] = fval1 + result = fval2 + + # Case 4: A feature structure & a base value (fail) + elif isinstance(fval1, fs_class) or isinstance(fval2, fs_class): + result = UnificationFailure + + # Case 5: Two base values + else: + # Case 5a: Feature defines a custom unification method for base values + if isinstance(fname, Feature): + result = fname.unify_base_values(fval1, fval2, bindings) + # Case 5b: Feature value defines custom unification method + elif isinstance(fval1, CustomFeatureValue): + result = fval1.unify(fval2) + # Sanity check: unify value should be symmetric + if isinstance(fval2, CustomFeatureValue) and result != fval2.unify(fval1): + raise AssertionError( + "CustomFeatureValue objects %r and %r disagree " + "about unification value: %r vs. %r" + % (fval1, fval2, result, fval2.unify(fval1)) + ) + elif isinstance(fval2, CustomFeatureValue): + result = fval2.unify(fval1) + # Case 5c: Simple values -- check if they're equal. + else: + if fval1 == fval2: + result = fval1 + else: + result = UnificationFailure + + # If either value was a bound variable, then update the + # bindings. (This is really only necessary if fname is a + # Feature or if either value is a CustomFeatureValue.) + if result is not UnificationFailure: + if fvar1 is not None: + bindings[fvar1] = result + result = fvar1 + if fvar2 is not None and fvar2 != fvar1: + bindings[fvar2] = result + result = fvar2 + + # If we unification failed, call the failure function; it + # might decide to continue anyway. + if result is UnificationFailure: + if fail is not None: + result = fail(fval1, fval2, fpath) + if trace: + _trace_unify_fail(fpath[:-1], result) + if result is UnificationFailure: + raise _UnificationFailureError + + # Normalize the result. + if isinstance(result, fs_class): + result = _apply_forwards(result, forward, fs_class, set()) + + if trace: + _trace_unify_succeed(fpath, result) + if trace and isinstance(result, fs_class): + _trace_bindings(fpath, bindings) + + return result + + +def _apply_forwards_to_bindings(forward, bindings): + """ + Replace any feature structure that has a forward pointer with + the target of its forward pointer (to preserve reentrancy). + """ + for (var, value) in bindings.items(): + while id(value) in forward: + value = forward[id(value)] + bindings[var] = value + + +def _apply_forwards(fstruct, forward, fs_class, visited): + """ + Replace any feature structure that has a forward pointer with + the target of its forward pointer (to preserve reentrancy). + """ + # Follow our own forwards pointers (if any) + while id(fstruct) in forward: + fstruct = forward[id(fstruct)] + + # Visit each node only once: + if id(fstruct) in visited: + return + visited.add(id(fstruct)) + + if _is_mapping(fstruct): + items = fstruct.items() + elif _is_sequence(fstruct): + items = enumerate(fstruct) + else: + raise ValueError("Expected mapping or sequence") + for fname, fval in items: + if isinstance(fval, fs_class): + # Replace w/ forwarded value. + while id(fval) in forward: + fval = forward[id(fval)] + fstruct[fname] = fval + # Recurse to child. + _apply_forwards(fval, forward, fs_class, visited) + + return fstruct + + +def _resolve_aliases(bindings): + """ + Replace any bound aliased vars with their binding; and replace + any unbound aliased vars with their representative var. + """ + for (var, value) in bindings.items(): + while isinstance(value, Variable) and value in bindings: + value = bindings[var] = bindings[value] + + +def _trace_unify_start(path, fval1, fval2): + if path == (): + print("\nUnification trace:") + else: + fullname = ".".join("%s" % n for n in path) + print(" " + "| " * (len(path) - 1) + "|") + print(" " + "| " * (len(path) - 1) + "| Unify feature: %s" % fullname) + print(" " + "| " * len(path) + " / " + _trace_valrepr(fval1)) + print(" " + "| " * len(path) + "|\\ " + _trace_valrepr(fval2)) + + +def _trace_unify_identity(path, fval1): + print(" " + "| " * len(path) + "|") + print(" " + "| " * len(path) + "| (identical objects)") + print(" " + "| " * len(path) + "|") + print(" " + "| " * len(path) + "+-->" + repr(fval1)) + + +def _trace_unify_fail(path, result): + if result is UnificationFailure: + resume = "" + else: + resume = " (nonfatal)" + print(" " + "| " * len(path) + "| |") + print(" " + "X " * len(path) + "X X <-- FAIL" + resume) + + +def _trace_unify_succeed(path, fval1): + # Print the result. + print(" " + "| " * len(path) + "|") + print(" " + "| " * len(path) + "+-->" + repr(fval1)) + + +def _trace_bindings(path, bindings): + # Print the bindings (if any). + if len(bindings) > 0: + binditems = sorted(bindings.items(), key=lambda v: v[0].name) + bindstr = "{%s}" % ", ".join( + f"{var}: {_trace_valrepr(val)}" for (var, val) in binditems + ) + print(" " + "| " * len(path) + " Bindings: " + bindstr) + + +def _trace_valrepr(val): + if isinstance(val, Variable): + return "%s" % val + else: + return "%s" % repr(val) + + +def subsumes(fstruct1, fstruct2): + """ + Return True if ``fstruct1`` subsumes ``fstruct2``. I.e., return + true if unifying ``fstruct1`` with ``fstruct2`` would result in a + feature structure equal to ``fstruct2.`` + + :rtype: bool + """ + return fstruct2 == unify(fstruct1, fstruct2) + + +def conflicts(fstruct1, fstruct2, trace=0): + """ + Return a list of the feature paths of all features which are + assigned incompatible values by ``fstruct1`` and ``fstruct2``. + + :rtype: list(tuple) + """ + conflict_list = [] + + def add_conflict(fval1, fval2, path): + conflict_list.append(path) + return fval1 + + unify(fstruct1, fstruct2, fail=add_conflict, trace=trace) + return conflict_list + + +###################################################################### +# Helper Functions +###################################################################### + + +def _is_mapping(v): + return hasattr(v, "__contains__") and hasattr(v, "keys") + + +def _is_sequence(v): + return hasattr(v, "__iter__") and hasattr(v, "__len__") and not isinstance(v, str) + + +def _default_fs_class(obj): + if isinstance(obj, FeatStruct): + return FeatStruct + if isinstance(obj, (dict, list)): + return (dict, list) + else: + raise ValueError( + "To unify objects of type %s, you must specify " + "fs_class explicitly." % obj.__class__.__name__ + ) + + +###################################################################### +# FeatureValueSet & FeatureValueTuple +###################################################################### + + +class SubstituteBindingsSequence(SubstituteBindingsI): + """ + A mixin class for sequence classes that distributes variables() and + substitute_bindings() over the object's elements. + """ + + def variables(self): + return [elt for elt in self if isinstance(elt, Variable)] + sum( + ( + list(elt.variables()) + for elt in self + if isinstance(elt, SubstituteBindingsI) + ), + [], + ) + + def substitute_bindings(self, bindings): + return self.__class__([self.subst(v, bindings) for v in self]) + + def subst(self, v, bindings): + if isinstance(v, SubstituteBindingsI): + return v.substitute_bindings(bindings) + else: + return bindings.get(v, v) + + +class FeatureValueTuple(SubstituteBindingsSequence, tuple): + """ + A base feature value that is a tuple of other base feature values. + FeatureValueTuple implements ``SubstituteBindingsI``, so it any + variable substitutions will be propagated to the elements + contained by the set. A ``FeatureValueTuple`` is immutable. + """ + + def __repr__(self): # [xx] really use %s here? + if len(self) == 0: + return "()" + return "(%s)" % ", ".join(f"{b}" for b in self) + + +class FeatureValueSet(SubstituteBindingsSequence, frozenset): + """ + A base feature value that is a set of other base feature values. + FeatureValueSet implements ``SubstituteBindingsI``, so it any + variable substitutions will be propagated to the elements + contained by the set. A ``FeatureValueSet`` is immutable. + """ + + def __repr__(self): # [xx] really use %s here? + if len(self) == 0: + return "{/}" # distinguish from dict. + # n.b., we sort the string reprs of our elements, to ensure + # that our own repr is deterministic. + return "{%s}" % ", ".join(sorted(f"{b}" for b in self)) + + __str__ = __repr__ + + +class FeatureValueUnion(SubstituteBindingsSequence, frozenset): + """ + A base feature value that represents the union of two or more + ``FeatureValueSet`` or ``Variable``. + """ + + def __new__(cls, values): + # If values contains FeatureValueUnions, then collapse them. + values = _flatten(values, FeatureValueUnion) + + # If the resulting list contains no variables, then + # use a simple FeatureValueSet instead. + if sum(isinstance(v, Variable) for v in values) == 0: + values = _flatten(values, FeatureValueSet) + return FeatureValueSet(values) + + # If we contain a single variable, return that variable. + if len(values) == 1: + return list(values)[0] + + # Otherwise, build the FeatureValueUnion. + return frozenset.__new__(cls, values) + + def __repr__(self): + # n.b., we sort the string reprs of our elements, to ensure + # that our own repr is deterministic. also, note that len(self) + # is guaranteed to be 2 or more. + return "{%s}" % "+".join(sorted(f"{b}" for b in self)) + + +class FeatureValueConcat(SubstituteBindingsSequence, tuple): + """ + A base feature value that represents the concatenation of two or + more ``FeatureValueTuple`` or ``Variable``. + """ + + def __new__(cls, values): + # If values contains FeatureValueConcats, then collapse them. + values = _flatten(values, FeatureValueConcat) + + # If the resulting list contains no variables, then + # use a simple FeatureValueTuple instead. + if sum(isinstance(v, Variable) for v in values) == 0: + values = _flatten(values, FeatureValueTuple) + return FeatureValueTuple(values) + + # If we contain a single variable, return that variable. + if len(values) == 1: + return list(values)[0] + + # Otherwise, build the FeatureValueConcat. + return tuple.__new__(cls, values) + + def __repr__(self): + # n.b.: len(self) is guaranteed to be 2 or more. + return "(%s)" % "+".join(f"{b}" for b in self) + + +def _flatten(lst, cls): + """ + Helper function -- return a copy of list, with all elements of + type ``cls`` spliced in rather than appended in. + """ + result = [] + for elt in lst: + if isinstance(elt, cls): + result.extend(elt) + else: + result.append(elt) + return result + + +###################################################################### +# Specialized Features +###################################################################### + + +@total_ordering +class Feature: + """ + A feature identifier that's specialized to put additional + constraints, default values, etc. + """ + + def __init__(self, name, default=None, display=None): + assert display in (None, "prefix", "slash") + + self._name = name # [xx] rename to .identifier? + self._default = default # [xx] not implemented yet. + self._display = display + + if self._display == "prefix": + self._sortkey = (-1, self._name) + elif self._display == "slash": + self._sortkey = (1, self._name) + else: + self._sortkey = (0, self._name) + + @property + def name(self): + """The name of this feature.""" + return self._name + + @property + def default(self): + """Default value for this feature.""" + return self._default + + @property + def display(self): + """Custom display location: can be prefix, or slash.""" + return self._display + + def __repr__(self): + return "*%s*" % self.name + + def __lt__(self, other): + if isinstance(other, str): + return True + if not isinstance(other, Feature): + raise_unorderable_types("<", self, other) + return self._sortkey < other._sortkey + + def __eq__(self, other): + return type(self) == type(other) and self._name == other._name + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash(self._name) + + # //////////////////////////////////////////////////////////// + # These can be overridden by subclasses: + # //////////////////////////////////////////////////////////// + + def read_value(self, s, position, reentrances, parser): + return parser.read_value(s, position, reentrances) + + def unify_base_values(self, fval1, fval2, bindings): + """ + If possible, return a single value.. If not, return + the value ``UnificationFailure``. + """ + if fval1 == fval2: + return fval1 + else: + return UnificationFailure + + +class SlashFeature(Feature): + def read_value(self, s, position, reentrances, parser): + return parser.read_partial(s, position, reentrances) + + +class RangeFeature(Feature): + RANGE_RE = re.compile(r"(-?\d+):(-?\d+)") + + def read_value(self, s, position, reentrances, parser): + m = self.RANGE_RE.match(s, position) + if not m: + raise ValueError("range", position) + return (int(m.group(1)), int(m.group(2))), m.end() + + def unify_base_values(self, fval1, fval2, bindings): + if fval1 is None: + return fval2 + if fval2 is None: + return fval1 + rng = max(fval1[0], fval2[0]), min(fval1[1], fval2[1]) + if rng[1] < rng[0]: + return UnificationFailure + return rng + + +SLASH = SlashFeature("slash", default=False, display="slash") +TYPE = Feature("type", display="prefix") + + +###################################################################### +# Specialized Feature Values +###################################################################### + + +@total_ordering +class CustomFeatureValue: + """ + An abstract base class for base values that define a custom + unification method. The custom unification method of + ``CustomFeatureValue`` will be used during unification if: + + - The ``CustomFeatureValue`` is unified with another base value. + - The ``CustomFeatureValue`` is not the value of a customized + ``Feature`` (which defines its own unification method). + + If two ``CustomFeatureValue`` objects are unified with one another + during feature structure unification, then the unified base values + they return *must* be equal; otherwise, an ``AssertionError`` will + be raised. + + Subclasses must define ``unify()``, ``__eq__()`` and ``__lt__()``. + Subclasses may also wish to define ``__hash__()``. + """ + + def unify(self, other): + """ + If this base value unifies with ``other``, then return the + unified value. Otherwise, return ``UnificationFailure``. + """ + raise NotImplementedError("abstract base class") + + def __eq__(self, other): + return NotImplemented + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + return NotImplemented + + def __hash__(self): + raise TypeError("%s objects or unhashable" % self.__class__.__name__) + + +###################################################################### +# Feature Structure Reader +###################################################################### + + +class FeatStructReader: + def __init__( + self, + features=(SLASH, TYPE), + fdict_class=FeatStruct, + flist_class=FeatList, + logic_parser=None, + ): + self._features = {f.name: f for f in features} + self._fdict_class = fdict_class + self._flist_class = flist_class + self._prefix_feature = None + self._slash_feature = None + for feature in features: + if feature.display == "slash": + if self._slash_feature: + raise ValueError("Multiple features w/ display=slash") + self._slash_feature = feature + if feature.display == "prefix": + if self._prefix_feature: + raise ValueError("Multiple features w/ display=prefix") + self._prefix_feature = feature + self._features_with_defaults = [ + feature for feature in features if feature.default is not None + ] + if logic_parser is None: + logic_parser = LogicParser() + self._logic_parser = logic_parser + + def fromstring(self, s, fstruct=None): + """ + Convert a string representation of a feature structure (as + displayed by repr) into a ``FeatStruct``. This process + imposes the following restrictions on the string + representation: + + - Feature names cannot contain any of the following: + whitespace, parentheses, quote marks, equals signs, + dashes, commas, and square brackets. Feature names may + not begin with plus signs or minus signs. + - Only the following basic feature value are supported: + strings, integers, variables, None, and unquoted + alphanumeric strings. + - For reentrant values, the first mention must specify + a reentrance identifier and a value; and any subsequent + mentions must use arrows (``'->'``) to reference the + reentrance identifier. + """ + s = s.strip() + value, position = self.read_partial(s, 0, {}, fstruct) + if position != len(s): + self._error(s, "end of string", position) + return value + + _START_FSTRUCT_RE = re.compile(r"\s*(?:\((\d+)\)\s*)?(\??[\w-]+)?(\[)") + _END_FSTRUCT_RE = re.compile(r"\s*]\s*") + _SLASH_RE = re.compile(r"/") + _FEATURE_NAME_RE = re.compile(r'\s*([+-]?)([^\s\(\)<>"\'\-=\[\],]+)\s*') + _REENTRANCE_RE = re.compile(r"\s*->\s*") + _TARGET_RE = re.compile(r"\s*\((\d+)\)\s*") + _ASSIGN_RE = re.compile(r"\s*=\s*") + _COMMA_RE = re.compile(r"\s*,\s*") + _BARE_PREFIX_RE = re.compile(r"\s*(?:\((\d+)\)\s*)?(\??[\w-]+\s*)()") + # This one is used to distinguish fdicts from flists: + _START_FDICT_RE = re.compile( + r"(%s)|(%s\s*(%s\s*(=|->)|[+-]%s|\]))" + % ( + _BARE_PREFIX_RE.pattern, + _START_FSTRUCT_RE.pattern, + _FEATURE_NAME_RE.pattern, + _FEATURE_NAME_RE.pattern, + ) + ) + + def read_partial(self, s, position=0, reentrances=None, fstruct=None): + """ + Helper function that reads in a feature structure. + + :param s: The string to read. + :param position: The position in the string to start parsing. + :param reentrances: A dictionary from reentrance ids to values. + Defaults to an empty dictionary. + :return: A tuple (val, pos) of the feature structure created by + parsing and the position where the parsed feature structure ends. + :rtype: bool + """ + if reentrances is None: + reentrances = {} + try: + return self._read_partial(s, position, reentrances, fstruct) + except ValueError as e: + if len(e.args) != 2: + raise + self._error(s, *e.args) + + def _read_partial(self, s, position, reentrances, fstruct=None): + # Create the new feature structure + if fstruct is None: + if self._START_FDICT_RE.match(s, position): + fstruct = self._fdict_class() + else: + fstruct = self._flist_class() + + # Read up to the open bracket. + match = self._START_FSTRUCT_RE.match(s, position) + if not match: + match = self._BARE_PREFIX_RE.match(s, position) + if not match: + raise ValueError("open bracket or identifier", position) + position = match.end() + + # If there as an identifier, record it. + if match.group(1): + identifier = match.group(1) + if identifier in reentrances: + raise ValueError("new identifier", match.start(1)) + reentrances[identifier] = fstruct + + if isinstance(fstruct, FeatDict): + fstruct.clear() + return self._read_partial_featdict(s, position, match, reentrances, fstruct) + else: + del fstruct[:] + return self._read_partial_featlist(s, position, match, reentrances, fstruct) + + def _read_partial_featlist(self, s, position, match, reentrances, fstruct): + # Prefix features are not allowed: + if match.group(2): + raise ValueError("open bracket") + # Bare prefixes are not allowed: + if not match.group(3): + raise ValueError("open bracket") + + # Build a list of the features defined by the structure. + while position < len(s): + # Check for the close bracket. + match = self._END_FSTRUCT_RE.match(s, position) + if match is not None: + return fstruct, match.end() + + # Reentances have the form "-> (target)" + match = self._REENTRANCE_RE.match(s, position) + if match: + position = match.end() + match = self._TARGET_RE.match(s, position) + if not match: + raise ValueError("identifier", position) + target = match.group(1) + if target not in reentrances: + raise ValueError("bound identifier", position) + position = match.end() + fstruct.append(reentrances[target]) + + # Anything else is a value. + else: + value, position = self._read_value(0, s, position, reentrances) + fstruct.append(value) + + # If there's a close bracket, handle it at the top of the loop. + if self._END_FSTRUCT_RE.match(s, position): + continue + + # Otherwise, there should be a comma + match = self._COMMA_RE.match(s, position) + if match is None: + raise ValueError("comma", position) + position = match.end() + + # We never saw a close bracket. + raise ValueError("close bracket", position) + + def _read_partial_featdict(self, s, position, match, reentrances, fstruct): + # If there was a prefix feature, record it. + if match.group(2): + if self._prefix_feature is None: + raise ValueError("open bracket or identifier", match.start(2)) + prefixval = match.group(2).strip() + if prefixval.startswith("?"): + prefixval = Variable(prefixval) + fstruct[self._prefix_feature] = prefixval + + # If group 3 is empty, then we just have a bare prefix, so + # we're done. + if not match.group(3): + return self._finalize(s, match.end(), reentrances, fstruct) + + # Build a list of the features defined by the structure. + # Each feature has one of the three following forms: + # name = value + # name -> (target) + # +name + # -name + while position < len(s): + # Use these variables to hold info about each feature: + name = value = None + + # Check for the close bracket. + match = self._END_FSTRUCT_RE.match(s, position) + if match is not None: + return self._finalize(s, match.end(), reentrances, fstruct) + + # Get the feature name's name + match = self._FEATURE_NAME_RE.match(s, position) + if match is None: + raise ValueError("feature name", position) + name = match.group(2) + position = match.end() + + # Check if it's a special feature. + if name[0] == "*" and name[-1] == "*": + name = self._features.get(name[1:-1]) + if name is None: + raise ValueError("known special feature", match.start(2)) + + # Check if this feature has a value already. + if name in fstruct: + raise ValueError("new name", match.start(2)) + + # Boolean value ("+name" or "-name") + if match.group(1) == "+": + value = True + if match.group(1) == "-": + value = False + + # Reentrance link ("-> (target)") + if value is None: + match = self._REENTRANCE_RE.match(s, position) + if match is not None: + position = match.end() + match = self._TARGET_RE.match(s, position) + if not match: + raise ValueError("identifier", position) + target = match.group(1) + if target not in reentrances: + raise ValueError("bound identifier", position) + position = match.end() + value = reentrances[target] + + # Assignment ("= value"). + if value is None: + match = self._ASSIGN_RE.match(s, position) + if match: + position = match.end() + value, position = self._read_value(name, s, position, reentrances) + # None of the above: error. + else: + raise ValueError("equals sign", position) + + # Store the value. + fstruct[name] = value + + # If there's a close bracket, handle it at the top of the loop. + if self._END_FSTRUCT_RE.match(s, position): + continue + + # Otherwise, there should be a comma + match = self._COMMA_RE.match(s, position) + if match is None: + raise ValueError("comma", position) + position = match.end() + + # We never saw a close bracket. + raise ValueError("close bracket", position) + + def _finalize(self, s, pos, reentrances, fstruct): + """ + Called when we see the close brace -- checks for a slash feature, + and adds in default values. + """ + # Add the slash feature (if any) + match = self._SLASH_RE.match(s, pos) + if match: + name = self._slash_feature + v, pos = self._read_value(name, s, match.end(), reentrances) + fstruct[name] = v + ## Add any default features. -- handle in unficiation instead? + # for feature in self._features_with_defaults: + # fstruct.setdefault(feature, feature.default) + # Return the value. + return fstruct, pos + + def _read_value(self, name, s, position, reentrances): + if isinstance(name, Feature): + return name.read_value(s, position, reentrances, self) + else: + return self.read_value(s, position, reentrances) + + def read_value(self, s, position, reentrances): + for (handler, regexp) in self.VALUE_HANDLERS: + match = regexp.match(s, position) + if match: + handler_func = getattr(self, handler) + return handler_func(s, position, reentrances, match) + raise ValueError("value", position) + + def _error(self, s, expected, position): + lines = s.split("\n") + while position > len(lines[0]): + position -= len(lines.pop(0)) + 1 # +1 for the newline. + estr = ( + "Error parsing feature structure\n " + + lines[0] + + "\n " + + " " * position + + "^ " + + "Expected %s" % expected + ) + raise ValueError(estr) + + # //////////////////////////////////////////////////////////// + # { Value Readers + # //////////////////////////////////////////////////////////// + + #: A table indicating how feature values should be processed. Each + #: entry in the table is a pair (handler, regexp). The first entry + #: with a matching regexp will have its handler called. Handlers + #: should have the following signature:: + #: + #: def handler(s, position, reentrances, match): ... + #: + #: and should return a tuple (value, position), where position is + #: the string position where the value ended. (n.b.: order is + #: important here!) + VALUE_HANDLERS = [ + ("read_fstruct_value", _START_FSTRUCT_RE), + ("read_var_value", re.compile(r"\?[a-zA-Z_][a-zA-Z0-9_]*")), + ("read_str_value", re.compile("[uU]?[rR]?(['\"])")), + ("read_int_value", re.compile(r"-?\d+")), + ("read_sym_value", re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*")), + ( + "read_app_value", + re.compile(r"<(app)\((\?[a-z][a-z]*)\s*," r"\s*(\?[a-z][a-z]*)\)>"), + ), + # ('read_logic_value', re.compile(r'<([^>]*)>')), + # lazily match any character after '<' until we hit a '>' not preceded by '-' + ("read_logic_value", re.compile(r"<(.*?)(?")), + ("read_set_value", re.compile(r"{")), + ("read_tuple_value", re.compile(r"\(")), + ] + + def read_fstruct_value(self, s, position, reentrances, match): + return self.read_partial(s, position, reentrances) + + def read_str_value(self, s, position, reentrances, match): + return read_str(s, position) + + def read_int_value(self, s, position, reentrances, match): + return int(match.group()), match.end() + + # Note: the '?' is included in the variable name. + def read_var_value(self, s, position, reentrances, match): + return Variable(match.group()), match.end() + + _SYM_CONSTS = {"None": None, "True": True, "False": False} + + def read_sym_value(self, s, position, reentrances, match): + val, end = match.group(), match.end() + return self._SYM_CONSTS.get(val, val), end + + def read_app_value(self, s, position, reentrances, match): + """Mainly included for backwards compat.""" + return self._logic_parser.parse("%s(%s)" % match.group(2, 3)), match.end() + + def read_logic_value(self, s, position, reentrances, match): + try: + try: + expr = self._logic_parser.parse(match.group(1)) + except LogicalExpressionException as e: + raise ValueError from e + return expr, match.end() + except ValueError as e: + raise ValueError("logic expression", match.start(1)) from e + + def read_tuple_value(self, s, position, reentrances, match): + return self._read_seq_value( + s, position, reentrances, match, ")", FeatureValueTuple, FeatureValueConcat + ) + + def read_set_value(self, s, position, reentrances, match): + return self._read_seq_value( + s, position, reentrances, match, "}", FeatureValueSet, FeatureValueUnion + ) + + def _read_seq_value( + self, s, position, reentrances, match, close_paren, seq_class, plus_class + ): + """ + Helper function used by read_tuple_value and read_set_value. + """ + cp = re.escape(close_paren) + position = match.end() + # Special syntax of empty tuples: + m = re.compile(r"\s*/?\s*%s" % cp).match(s, position) + if m: + return seq_class(), m.end() + # Read values: + values = [] + seen_plus = False + while True: + # Close paren: return value. + m = re.compile(r"\s*%s" % cp).match(s, position) + if m: + if seen_plus: + return plus_class(values), m.end() + else: + return seq_class(values), m.end() + + # Read the next value. + val, position = self.read_value(s, position, reentrances) + values.append(val) + + # Comma or looking at close paren + m = re.compile(r"\s*(,|\+|(?=%s))\s*" % cp).match(s, position) + if not m: + raise ValueError("',' or '+' or '%s'" % cp, position) + if m.group(1) == "+": + seen_plus = True + position = m.end() + + +###################################################################### +# { Demo +###################################################################### + + +def display_unification(fs1, fs2, indent=" "): + # Print the two input feature structures, side by side. + fs1_lines = ("%s" % fs1).split("\n") + fs2_lines = ("%s" % fs2).split("\n") + if len(fs1_lines) > len(fs2_lines): + blankline = "[" + " " * (len(fs2_lines[0]) - 2) + "]" + fs2_lines += [blankline] * len(fs1_lines) + else: + blankline = "[" + " " * (len(fs1_lines[0]) - 2) + "]" + fs1_lines += [blankline] * len(fs2_lines) + for (fs1_line, fs2_line) in zip(fs1_lines, fs2_lines): + print(indent + fs1_line + " " + fs2_line) + print(indent + "-" * len(fs1_lines[0]) + " " + "-" * len(fs2_lines[0])) + + linelen = len(fs1_lines[0]) * 2 + 3 + print(indent + "| |".center(linelen)) + print(indent + "+-----UNIFY-----+".center(linelen)) + print(indent + "|".center(linelen)) + print(indent + "V".center(linelen)) + + bindings = {} + + result = fs1.unify(fs2, bindings) + if result is None: + print(indent + "(FAILED)".center(linelen)) + else: + print( + "\n".join(indent + l.center(linelen) for l in ("%s" % result).split("\n")) + ) + if bindings and len(bindings.bound_variables()) > 0: + print(repr(bindings).center(linelen)) + return result + + +def interactive_demo(trace=False): + import random + import sys + + HELP = """ + 1-%d: Select the corresponding feature structure + q: Quit + t: Turn tracing on or off + l: List all feature structures + ?: Help + """ + + print( + """ + This demo will repeatedly present you with a list of feature + structures, and ask you to choose two for unification. Whenever a + new feature structure is generated, it is added to the list of + choices that you can pick from. However, since this can be a + large number of feature structures, the demo will only print out a + random subset for you to choose between at a given time. If you + want to see the complete lists, type "l". For a list of valid + commands, type "?". + """ + ) + print('Press "Enter" to continue...') + sys.stdin.readline() + + fstruct_strings = [ + "[agr=[number=sing, gender=masc]]", + "[agr=[gender=masc, person=3]]", + "[agr=[gender=fem, person=3]]", + "[subj=[agr=(1)[]], agr->(1)]", + "[obj=?x]", + "[subj=?x]", + "[/=None]", + "[/=NP]", + "[cat=NP]", + "[cat=VP]", + "[cat=PP]", + "[subj=[agr=[gender=?y]], obj=[agr=[gender=?y]]]", + "[gender=masc, agr=?C]", + "[gender=?S, agr=[gender=?S,person=3]]", + ] + + all_fstructs = [ + (i, FeatStruct(fstruct_strings[i])) for i in range(len(fstruct_strings)) + ] + + def list_fstructs(fstructs): + for i, fstruct in fstructs: + print() + lines = ("%s" % fstruct).split("\n") + print("%3d: %s" % (i + 1, lines[0])) + for line in lines[1:]: + print(" " + line) + print() + + while True: + # Pick 5 feature structures at random from the master list. + MAX_CHOICES = 5 + if len(all_fstructs) > MAX_CHOICES: + fstructs = sorted(random.sample(all_fstructs, MAX_CHOICES)) + else: + fstructs = all_fstructs + + print("_" * 75) + + print("Choose two feature structures to unify:") + list_fstructs(fstructs) + + selected = [None, None] + for (nth, i) in (("First", 0), ("Second", 1)): + while selected[i] is None: + print( + ( + "%s feature structure (1-%d,q,t,l,?): " + % (nth, len(all_fstructs)) + ), + end=" ", + ) + try: + input = sys.stdin.readline().strip() + if input in ("q", "Q", "x", "X"): + return + if input in ("t", "T"): + trace = not trace + print(" Trace = %s" % trace) + continue + if input in ("h", "H", "?"): + print(HELP % len(fstructs)) + continue + if input in ("l", "L"): + list_fstructs(all_fstructs) + continue + num = int(input) - 1 + selected[i] = all_fstructs[num][1] + print() + except: + print("Bad sentence number") + continue + + if trace: + result = selected[0].unify(selected[1], trace=1) + else: + result = display_unification(selected[0], selected[1]) + if result is not None: + for i, fstruct in all_fstructs: + if repr(result) == repr(fstruct): + break + else: + all_fstructs.append((len(all_fstructs), result)) + + print('\nType "Enter" to continue unifying; or "q" to quit.') + input = sys.stdin.readline().strip() + if input in ("q", "Q", "x", "X"): + return + + +def demo(trace=False): + """ + Just for testing + """ + # import random + + # processor breaks with values like '3rd' + fstruct_strings = [ + "[agr=[number=sing, gender=masc]]", + "[agr=[gender=masc, person=3]]", + "[agr=[gender=fem, person=3]]", + "[subj=[agr=(1)[]], agr->(1)]", + "[obj=?x]", + "[subj=?x]", + "[/=None]", + "[/=NP]", + "[cat=NP]", + "[cat=VP]", + "[cat=PP]", + "[subj=[agr=[gender=?y]], obj=[agr=[gender=?y]]]", + "[gender=masc, agr=?C]", + "[gender=?S, agr=[gender=?S,person=3]]", + ] + all_fstructs = [FeatStruct(fss) for fss in fstruct_strings] + # MAX_CHOICES = 5 + # if len(all_fstructs) > MAX_CHOICES: + # fstructs = random.sample(all_fstructs, MAX_CHOICES) + # fstructs.sort() + # else: + # fstructs = all_fstructs + + for fs1 in all_fstructs: + for fs2 in all_fstructs: + print( + "\n*******************\nfs1 is:\n%s\n\nfs2 is:\n%s\n\nresult is:\n%s" + % (fs1, fs2, unify(fs1, fs2)) + ) + + +if __name__ == "__main__": + demo() + +__all__ = [ + "FeatStruct", + "FeatDict", + "FeatList", + "unify", + "subsumes", + "conflicts", + "Feature", + "SlashFeature", + "RangeFeature", + "SLASH", + "TYPE", + "FeatStructReader", +] diff --git a/lib/python3.10/site-packages/nltk/grammar.py b/lib/python3.10/site-packages/nltk/grammar.py new file mode 100644 index 0000000000000000000000000000000000000000..c0f1fe736a4a84e0982780e514108a6812f6876b --- /dev/null +++ b/lib/python3.10/site-packages/nltk/grammar.py @@ -0,0 +1,1708 @@ +# Natural Language Toolkit: Context Free Grammars +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# Jason Narad +# Peter Ljunglöf +# Tom Aarsen <> +# URL: +# For license information, see LICENSE.TXT +# + +""" +Basic data classes for representing context free grammars. A +"grammar" specifies which trees can represent the structure of a +given text. Each of these trees is called a "parse tree" for the +text (or simply a "parse"). In a "context free" grammar, the set of +parse trees for any piece of a text can depend only on that piece, and +not on the rest of the text (i.e., the piece's context). Context free +grammars are often used to find possible syntactic structures for +sentences. In this context, the leaves of a parse tree are word +tokens; and the node values are phrasal categories, such as ``NP`` +and ``VP``. + +The ``CFG`` class is used to encode context free grammars. Each +``CFG`` consists of a start symbol and a set of productions. +The "start symbol" specifies the root node value for parse trees. For example, +the start symbol for syntactic parsing is usually ``S``. Start +symbols are encoded using the ``Nonterminal`` class, which is discussed +below. + +A Grammar's "productions" specify what parent-child relationships a parse +tree can contain. Each production specifies that a particular +node can be the parent of a particular set of children. For example, +the production `` -> `` specifies that an ``S`` node can +be the parent of an ``NP`` node and a ``VP`` node. + +Grammar productions are implemented by the ``Production`` class. +Each ``Production`` consists of a left hand side and a right hand +side. The "left hand side" is a ``Nonterminal`` that specifies the +node type for a potential parent; and the "right hand side" is a list +that specifies allowable children for that parent. This lists +consists of ``Nonterminals`` and text types: each ``Nonterminal`` +indicates that the corresponding child may be a ``TreeToken`` with the +specified node type; and each text type indicates that the +corresponding child may be a ``Token`` with the with that type. + +The ``Nonterminal`` class is used to distinguish node values from leaf +values. This prevents the grammar from accidentally using a leaf +value (such as the English word "A") as the node of a subtree. Within +a ``CFG``, all node values are wrapped in the ``Nonterminal`` +class. Note, however, that the trees that are specified by the grammar do +*not* include these ``Nonterminal`` wrappers. + +Grammars can also be given a more procedural interpretation. According to +this interpretation, a Grammar specifies any tree structure *tree* that +can be produced by the following procedure: + +| Set tree to the start symbol +| Repeat until tree contains no more nonterminal leaves: +| Choose a production prod with whose left hand side +| lhs is a nonterminal leaf of tree. +| Replace the nonterminal leaf with a subtree, whose node +| value is the value wrapped by the nonterminal lhs, and +| whose children are the right hand side of prod. + +The operation of replacing the left hand side (*lhs*) of a production +with the right hand side (*rhs*) in a tree (*tree*) is known as +"expanding" *lhs* to *rhs* in *tree*. +""" +import re +from functools import total_ordering + +from nltk.featstruct import SLASH, TYPE, FeatDict, FeatStruct, FeatStructReader +from nltk.internals import raise_unorderable_types +from nltk.probability import ImmutableProbabilisticMixIn +from nltk.util import invert_graph, transitive_closure + +################################################################# +# Nonterminal +################################################################# + + +@total_ordering +class Nonterminal: + """ + A non-terminal symbol for a context free grammar. ``Nonterminal`` + is a wrapper class for node values; it is used by ``Production`` + objects to distinguish node values from leaf values. + The node value that is wrapped by a ``Nonterminal`` is known as its + "symbol". Symbols are typically strings representing phrasal + categories (such as ``"NP"`` or ``"VP"``). However, more complex + symbol types are sometimes used (e.g., for lexicalized grammars). + Since symbols are node values, they must be immutable and + hashable. Two ``Nonterminals`` are considered equal if their + symbols are equal. + + :see: ``CFG``, ``Production`` + :type _symbol: any + :ivar _symbol: The node value corresponding to this + ``Nonterminal``. This value must be immutable and hashable. + """ + + def __init__(self, symbol): + """ + Construct a new non-terminal from the given symbol. + + :type symbol: any + :param symbol: The node value corresponding to this + ``Nonterminal``. This value must be immutable and + hashable. + """ + self._symbol = symbol + + def symbol(self): + """ + Return the node value corresponding to this ``Nonterminal``. + + :rtype: (any) + """ + return self._symbol + + def __eq__(self, other): + """ + Return True if this non-terminal is equal to ``other``. In + particular, return True if ``other`` is a ``Nonterminal`` + and this non-terminal's symbol is equal to ``other`` 's symbol. + + :rtype: bool + """ + return type(self) == type(other) and self._symbol == other._symbol + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, Nonterminal): + raise_unorderable_types("<", self, other) + return self._symbol < other._symbol + + def __hash__(self): + return hash(self._symbol) + + def __repr__(self): + """ + Return a string representation for this ``Nonterminal``. + + :rtype: str + """ + if isinstance(self._symbol, str): + return "%s" % self._symbol + else: + return "%s" % repr(self._symbol) + + def __str__(self): + """ + Return a string representation for this ``Nonterminal``. + + :rtype: str + """ + if isinstance(self._symbol, str): + return "%s" % self._symbol + else: + return "%s" % repr(self._symbol) + + def __div__(self, rhs): + """ + Return a new nonterminal whose symbol is ``A/B``, where ``A`` is + the symbol for this nonterminal, and ``B`` is the symbol for rhs. + + :param rhs: The nonterminal used to form the right hand side + of the new nonterminal. + :type rhs: Nonterminal + :rtype: Nonterminal + """ + return Nonterminal(f"{self._symbol}/{rhs._symbol}") + + def __truediv__(self, rhs): + """ + Return a new nonterminal whose symbol is ``A/B``, where ``A`` is + the symbol for this nonterminal, and ``B`` is the symbol for rhs. + This function allows use of the slash ``/`` operator with + the future import of division. + + :param rhs: The nonterminal used to form the right hand side + of the new nonterminal. + :type rhs: Nonterminal + :rtype: Nonterminal + """ + return self.__div__(rhs) + + +def nonterminals(symbols): + """ + Given a string containing a list of symbol names, return a list of + ``Nonterminals`` constructed from those symbols. + + :param symbols: The symbol name string. This string can be + delimited by either spaces or commas. + :type symbols: str + :return: A list of ``Nonterminals`` constructed from the symbol + names given in ``symbols``. The ``Nonterminals`` are sorted + in the same order as the symbols names. + :rtype: list(Nonterminal) + """ + if "," in symbols: + symbol_list = symbols.split(",") + else: + symbol_list = symbols.split() + return [Nonterminal(s.strip()) for s in symbol_list] + + +class FeatStructNonterminal(FeatDict, Nonterminal): + """A feature structure that's also a nonterminal. It acts as its + own symbol, and automatically freezes itself when hashed.""" + + def __hash__(self): + self.freeze() + return FeatStruct.__hash__(self) + + def symbol(self): + return self + + +def is_nonterminal(item): + """ + :return: True if the item is a ``Nonterminal``. + :rtype: bool + """ + return isinstance(item, Nonterminal) + + +################################################################# +# Terminals +################################################################# + + +def is_terminal(item): + """ + Return True if the item is a terminal, which currently is + if it is hashable and not a ``Nonterminal``. + + :rtype: bool + """ + return hasattr(item, "__hash__") and not isinstance(item, Nonterminal) + + +################################################################# +# Productions +################################################################# + + +@total_ordering +class Production: + """ + A grammar production. Each production maps a single symbol + on the "left-hand side" to a sequence of symbols on the + "right-hand side". (In the case of context-free productions, + the left-hand side must be a ``Nonterminal``, and the right-hand + side is a sequence of terminals and ``Nonterminals``.) + "terminals" can be any immutable hashable object that is + not a ``Nonterminal``. Typically, terminals are strings + representing words, such as ``"dog"`` or ``"under"``. + + :see: ``CFG`` + :see: ``DependencyGrammar`` + :see: ``Nonterminal`` + :type _lhs: Nonterminal + :ivar _lhs: The left-hand side of the production. + :type _rhs: tuple(Nonterminal, terminal) + :ivar _rhs: The right-hand side of the production. + """ + + def __init__(self, lhs, rhs): + """ + Construct a new ``Production``. + + :param lhs: The left-hand side of the new ``Production``. + :type lhs: Nonterminal + :param rhs: The right-hand side of the new ``Production``. + :type rhs: sequence(Nonterminal and terminal) + """ + if isinstance(rhs, str): + raise TypeError( + "production right hand side should be a list, " "not a string" + ) + self._lhs = lhs + self._rhs = tuple(rhs) + + def lhs(self): + """ + Return the left-hand side of this ``Production``. + + :rtype: Nonterminal + """ + return self._lhs + + def rhs(self): + """ + Return the right-hand side of this ``Production``. + + :rtype: sequence(Nonterminal and terminal) + """ + return self._rhs + + def __len__(self): + """ + Return the length of the right-hand side. + + :rtype: int + """ + return len(self._rhs) + + def is_nonlexical(self): + """ + Return True if the right-hand side only contains ``Nonterminals`` + + :rtype: bool + """ + return all(is_nonterminal(n) for n in self._rhs) + + def is_lexical(self): + """ + Return True if the right-hand contain at least one terminal token. + + :rtype: bool + """ + return not self.is_nonlexical() + + def __str__(self): + """ + Return a verbose string representation of the ``Production``. + + :rtype: str + """ + result = "%s -> " % repr(self._lhs) + result += " ".join(repr(el) for el in self._rhs) + return result + + def __repr__(self): + """ + Return a concise string representation of the ``Production``. + + :rtype: str + """ + return "%s" % self + + def __eq__(self, other): + """ + Return True if this ``Production`` is equal to ``other``. + + :rtype: bool + """ + return ( + type(self) == type(other) + and self._lhs == other._lhs + and self._rhs == other._rhs + ) + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, Production): + raise_unorderable_types("<", self, other) + return (self._lhs, self._rhs) < (other._lhs, other._rhs) + + def __hash__(self): + """ + Return a hash value for the ``Production``. + + :rtype: int + """ + return hash((self._lhs, self._rhs)) + + +class DependencyProduction(Production): + """ + A dependency grammar production. Each production maps a single + head word to an unordered list of one or more modifier words. + """ + + def __str__(self): + """ + Return a verbose string representation of the ``DependencyProduction``. + + :rtype: str + """ + result = f"'{self._lhs}' ->" + for elt in self._rhs: + result += f" '{elt}'" + return result + + +class ProbabilisticProduction(Production, ImmutableProbabilisticMixIn): + """ + A probabilistic context free grammar production. + A PCFG ``ProbabilisticProduction`` is essentially just a ``Production`` that + has an associated probability, which represents how likely it is that + this production will be used. In particular, the probability of a + ``ProbabilisticProduction`` records the likelihood that its right-hand side is + the correct instantiation for any given occurrence of its left-hand side. + + :see: ``Production`` + """ + + def __init__(self, lhs, rhs, **prob): + """ + Construct a new ``ProbabilisticProduction``. + + :param lhs: The left-hand side of the new ``ProbabilisticProduction``. + :type lhs: Nonterminal + :param rhs: The right-hand side of the new ``ProbabilisticProduction``. + :type rhs: sequence(Nonterminal and terminal) + :param prob: Probability parameters of the new ``ProbabilisticProduction``. + """ + ImmutableProbabilisticMixIn.__init__(self, **prob) + Production.__init__(self, lhs, rhs) + + def __str__(self): + return super().__str__() + ( + " [1.0]" if (self.prob() == 1.0) else " [%g]" % self.prob() + ) + + def __eq__(self, other): + return ( + type(self) == type(other) + and self._lhs == other._lhs + and self._rhs == other._rhs + and self.prob() == other.prob() + ) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self._lhs, self._rhs, self.prob())) + + +################################################################# +# Grammars +################################################################# + + +class CFG: + """ + A context-free grammar. A grammar consists of a start state and + a set of productions. The set of terminals and nonterminals is + implicitly specified by the productions. + + If you need efficient key-based access to productions, you + can use a subclass to implement it. + """ + + def __init__(self, start, productions, calculate_leftcorners=True): + """ + Create a new context-free grammar, from the given start state + and set of ``Production`` instances. + + :param start: The start symbol + :type start: Nonterminal + :param productions: The list of productions that defines the grammar + :type productions: list(Production) + :param calculate_leftcorners: False if we don't want to calculate the + leftcorner relation. In that case, some optimized chart parsers won't work. + :type calculate_leftcorners: bool + """ + if not is_nonterminal(start): + raise TypeError( + "start should be a Nonterminal object," + " not a %s" % type(start).__name__ + ) + + self._start = start + self._productions = productions + self._categories = {prod.lhs() for prod in productions} + self._calculate_indexes() + self._calculate_grammar_forms() + if calculate_leftcorners: + self._calculate_leftcorners() + + def _calculate_indexes(self): + self._lhs_index = {} + self._rhs_index = {} + self._empty_index = {} + self._lexical_index = {} + for prod in self._productions: + # Left hand side. + lhs = prod._lhs + if lhs not in self._lhs_index: + self._lhs_index[lhs] = [] + self._lhs_index[lhs].append(prod) + if prod._rhs: + # First item in right hand side. + rhs0 = prod._rhs[0] + if rhs0 not in self._rhs_index: + self._rhs_index[rhs0] = [] + self._rhs_index[rhs0].append(prod) + else: + # The right hand side is empty. + self._empty_index[prod.lhs()] = prod + # Lexical tokens in the right hand side. + for token in prod._rhs: + if is_terminal(token): + self._lexical_index.setdefault(token, set()).add(prod) + + def _calculate_leftcorners(self): + # Calculate leftcorner relations, for use in optimized parsing. + self._immediate_leftcorner_categories = {cat: {cat} for cat in self._categories} + self._immediate_leftcorner_words = {cat: set() for cat in self._categories} + for prod in self.productions(): + if len(prod) > 0: + cat, left = prod.lhs(), prod.rhs()[0] + if is_nonterminal(left): + self._immediate_leftcorner_categories[cat].add(left) + else: + self._immediate_leftcorner_words[cat].add(left) + + lc = transitive_closure(self._immediate_leftcorner_categories, reflexive=True) + self._leftcorners = lc + self._leftcorner_parents = invert_graph(lc) + + nr_leftcorner_categories = sum( + map(len, self._immediate_leftcorner_categories.values()) + ) + nr_leftcorner_words = sum(map(len, self._immediate_leftcorner_words.values())) + if nr_leftcorner_words > nr_leftcorner_categories > 10000: + # If the grammar is big, the leftcorner-word dictionary will be too large. + # In that case it is better to calculate the relation on demand. + self._leftcorner_words = None + return + + self._leftcorner_words = {} + for cat in self._leftcorners: + lefts = self._leftcorners[cat] + lc = self._leftcorner_words[cat] = set() + for left in lefts: + lc.update(self._immediate_leftcorner_words.get(left, set())) + + @classmethod + def fromstring(cls, input, encoding=None): + """ + Return the grammar instance corresponding to the input string(s). + + :param input: a grammar, either in the form of a string or as a list of strings. + """ + start, productions = read_grammar( + input, standard_nonterm_parser, encoding=encoding + ) + return cls(start, productions) + + def start(self): + """ + Return the start symbol of the grammar + + :rtype: Nonterminal + """ + return self._start + + # tricky to balance readability and efficiency here! + # can't use set operations as they don't preserve ordering + def productions(self, lhs=None, rhs=None, empty=False): + """ + Return the grammar productions, filtered by the left-hand side + or the first item in the right-hand side. + + :param lhs: Only return productions with the given left-hand side. + :param rhs: Only return productions with the given first item + in the right-hand side. + :param empty: Only return productions with an empty right-hand side. + :return: A list of productions matching the given constraints. + :rtype: list(Production) + """ + if rhs and empty: + raise ValueError( + "You cannot select empty and non-empty " "productions at the same time." + ) + + # no constraints so return everything + if not lhs and not rhs: + if not empty: + return self._productions + else: + return self._empty_index.values() + + # only lhs specified so look up its index + elif lhs and not rhs: + if not empty: + return self._lhs_index.get(lhs, []) + elif lhs in self._empty_index: + return [self._empty_index[lhs]] + else: + return [] + + # only rhs specified so look up its index + elif rhs and not lhs: + return self._rhs_index.get(rhs, []) + + # intersect + else: + return [ + prod + for prod in self._lhs_index.get(lhs, []) + if prod in self._rhs_index.get(rhs, []) + ] + + def leftcorners(self, cat): + """ + Return the set of all nonterminals that the given nonterminal + can start with, including itself. + + This is the reflexive, transitive closure of the immediate + leftcorner relation: (A > B) iff (A -> B beta) + + :param cat: the parent of the leftcorners + :type cat: Nonterminal + :return: the set of all leftcorners + :rtype: set(Nonterminal) + """ + return self._leftcorners.get(cat, {cat}) + + def is_leftcorner(self, cat, left): + """ + True if left is a leftcorner of cat, where left can be a + terminal or a nonterminal. + + :param cat: the parent of the leftcorner + :type cat: Nonterminal + :param left: the suggested leftcorner + :type left: Terminal or Nonterminal + :rtype: bool + """ + if is_nonterminal(left): + return left in self.leftcorners(cat) + elif self._leftcorner_words: + return left in self._leftcorner_words.get(cat, set()) + else: + return any( + left in self._immediate_leftcorner_words.get(parent, set()) + for parent in self.leftcorners(cat) + ) + + def leftcorner_parents(self, cat): + """ + Return the set of all nonterminals for which the given category + is a left corner. This is the inverse of the leftcorner relation. + + :param cat: the suggested leftcorner + :type cat: Nonterminal + :return: the set of all parents to the leftcorner + :rtype: set(Nonterminal) + """ + return self._leftcorner_parents.get(cat, {cat}) + + def check_coverage(self, tokens): + """ + Check whether the grammar rules cover the given list of tokens. + If not, then raise an exception. + + :type tokens: list(str) + """ + missing = [tok for tok in tokens if not self._lexical_index.get(tok)] + if missing: + missing = ", ".join(f"{w!r}" for w in missing) + raise ValueError( + "Grammar does not cover some of the " "input words: %r." % missing + ) + + def _calculate_grammar_forms(self): + """ + Pre-calculate of which form(s) the grammar is. + """ + prods = self._productions + self._is_lexical = all(p.is_lexical() for p in prods) + self._is_nonlexical = all(p.is_nonlexical() for p in prods if len(p) != 1) + self._min_len = min(len(p) for p in prods) + self._max_len = max(len(p) for p in prods) + self._all_unary_are_lexical = all(p.is_lexical() for p in prods if len(p) == 1) + + def is_lexical(self): + """ + Return True if all productions are lexicalised. + """ + return self._is_lexical + + def is_nonlexical(self): + """ + Return True if all lexical rules are "preterminals", that is, + unary rules which can be separated in a preprocessing step. + + This means that all productions are of the forms + A -> B1 ... Bn (n>=0), or A -> "s". + + Note: is_lexical() and is_nonlexical() are not opposites. + There are grammars which are neither, and grammars which are both. + """ + return self._is_nonlexical + + def min_len(self): + """ + Return the right-hand side length of the shortest grammar production. + """ + return self._min_len + + def max_len(self): + """ + Return the right-hand side length of the longest grammar production. + """ + return self._max_len + + def is_nonempty(self): + """ + Return True if there are no empty productions. + """ + return self._min_len > 0 + + def is_binarised(self): + """ + Return True if all productions are at most binary. + Note that there can still be empty and unary productions. + """ + return self._max_len <= 2 + + def is_flexible_chomsky_normal_form(self): + """ + Return True if all productions are of the forms + A -> B C, A -> B, or A -> "s". + """ + return self.is_nonempty() and self.is_nonlexical() and self.is_binarised() + + def is_chomsky_normal_form(self): + """ + Return True if the grammar is of Chomsky Normal Form, i.e. all productions + are of the form A -> B C, or A -> "s". + """ + return self.is_flexible_chomsky_normal_form() and self._all_unary_are_lexical + + def chomsky_normal_form(self, new_token_padding="@$@", flexible=False): + """ + Returns a new Grammar that is in chomsky normal + + :param: new_token_padding + Customise new rule formation during binarisation + """ + if self.is_chomsky_normal_form(): + return self + if self.productions(empty=True): + raise ValueError( + "Grammar has Empty rules. " "Cannot deal with them at the moment" + ) + + # check for mixed rules + for rule in self.productions(): + if rule.is_lexical() and len(rule.rhs()) > 1: + raise ValueError( + f"Cannot handled mixed rule {rule.lhs()} => {rule.rhs()}" + ) + + step1 = CFG.eliminate_start(self) + step2 = CFG.binarize(step1, new_token_padding) + if flexible: + return step2 + step3 = CFG.remove_unitary_rules(step2) + step4 = CFG(step3.start(), list(set(step3.productions()))) + return step4 + + @classmethod + def remove_unitary_rules(cls, grammar): + """ + Remove nonlexical unitary rules and convert them to + lexical + """ + result = [] + unitary = [] + for rule in grammar.productions(): + if len(rule) == 1 and rule.is_nonlexical(): + unitary.append(rule) + else: + result.append(rule) + + while unitary: + rule = unitary.pop(0) + for item in grammar.productions(lhs=rule.rhs()[0]): + new_rule = Production(rule.lhs(), item.rhs()) + if len(new_rule) != 1 or new_rule.is_lexical(): + result.append(new_rule) + else: + unitary.append(new_rule) + + n_grammar = CFG(grammar.start(), result) + return n_grammar + + @classmethod + def binarize(cls, grammar, padding="@$@"): + """ + Convert all non-binary rules into binary by introducing + new tokens. + Example:: + + Original: + A => B C D + After Conversion: + A => B A@$@B + A@$@B => C D + """ + result = [] + + for rule in grammar.productions(): + if len(rule.rhs()) > 2: + # this rule needs to be broken down + left_side = rule.lhs() + for k in range(0, len(rule.rhs()) - 2): + tsym = rule.rhs()[k] + new_sym = Nonterminal(left_side.symbol() + padding + tsym.symbol()) + new_production = Production(left_side, (tsym, new_sym)) + left_side = new_sym + result.append(new_production) + last_prd = Production(left_side, rule.rhs()[-2:]) + result.append(last_prd) + else: + result.append(rule) + + n_grammar = CFG(grammar.start(), result) + return n_grammar + + @classmethod + def eliminate_start(cls, grammar): + """ + Eliminate start rule in case it appears on RHS + Example: S -> S0 S1 and S0 -> S1 S + Then another rule S0_Sigma -> S is added + """ + start = grammar.start() + result = [] + need_to_add = None + for rule in grammar.productions(): + if start in rule.rhs(): + need_to_add = True + result.append(rule) + if need_to_add: + start = Nonterminal("S0_SIGMA") + result.append(Production(start, [grammar.start()])) + n_grammar = CFG(start, result) + return n_grammar + return grammar + + def __repr__(self): + return "" % len(self._productions) + + def __str__(self): + result = "Grammar with %d productions" % len(self._productions) + result += " (start state = %r)" % self._start + for production in self._productions: + result += "\n %s" % production + return result + + +class FeatureGrammar(CFG): + """ + A feature-based grammar. This is equivalent to a + ``CFG`` whose nonterminals are all + ``FeatStructNonterminal``. + + A grammar consists of a start state and a set of + productions. The set of terminals and nonterminals + is implicitly specified by the productions. + """ + + def __init__(self, start, productions): + """ + Create a new feature-based grammar, from the given start + state and set of ``Productions``. + + :param start: The start symbol + :type start: FeatStructNonterminal + :param productions: The list of productions that defines the grammar + :type productions: list(Production) + """ + CFG.__init__(self, start, productions) + + # The difference with CFG is that the productions are + # indexed on the TYPE feature of the nonterminals. + # This is calculated by the method _get_type_if_possible(). + + def _calculate_indexes(self): + self._lhs_index = {} + self._rhs_index = {} + self._empty_index = {} + self._empty_productions = [] + self._lexical_index = {} + for prod in self._productions: + # Left hand side. + lhs = self._get_type_if_possible(prod._lhs) + if lhs not in self._lhs_index: + self._lhs_index[lhs] = [] + self._lhs_index[lhs].append(prod) + if prod._rhs: + # First item in right hand side. + rhs0 = self._get_type_if_possible(prod._rhs[0]) + if rhs0 not in self._rhs_index: + self._rhs_index[rhs0] = [] + self._rhs_index[rhs0].append(prod) + else: + # The right hand side is empty. + if lhs not in self._empty_index: + self._empty_index[lhs] = [] + self._empty_index[lhs].append(prod) + self._empty_productions.append(prod) + # Lexical tokens in the right hand side. + for token in prod._rhs: + if is_terminal(token): + self._lexical_index.setdefault(token, set()).add(prod) + + @classmethod + def fromstring( + cls, input, features=None, logic_parser=None, fstruct_reader=None, encoding=None + ): + """ + Return a feature structure based grammar. + + :param input: a grammar, either in the form of a string or else + as a list of strings. + :param features: a tuple of features (default: SLASH, TYPE) + :param logic_parser: a parser for lambda-expressions, + by default, ``LogicParser()`` + :param fstruct_reader: a feature structure parser + (only if features and logic_parser is None) + """ + if features is None: + features = (SLASH, TYPE) + + if fstruct_reader is None: + fstruct_reader = FeatStructReader( + features, FeatStructNonterminal, logic_parser=logic_parser + ) + elif logic_parser is not None: + raise Exception( + "'logic_parser' and 'fstruct_reader' must " "not both be set" + ) + + start, productions = read_grammar( + input, fstruct_reader.read_partial, encoding=encoding + ) + return cls(start, productions) + + def productions(self, lhs=None, rhs=None, empty=False): + """ + Return the grammar productions, filtered by the left-hand side + or the first item in the right-hand side. + + :param lhs: Only return productions with the given left-hand side. + :param rhs: Only return productions with the given first item + in the right-hand side. + :param empty: Only return productions with an empty right-hand side. + :rtype: list(Production) + """ + if rhs and empty: + raise ValueError( + "You cannot select empty and non-empty " "productions at the same time." + ) + + # no constraints so return everything + if not lhs and not rhs: + if empty: + return self._empty_productions + else: + return self._productions + + # only lhs specified so look up its index + elif lhs and not rhs: + if empty: + return self._empty_index.get(self._get_type_if_possible(lhs), []) + else: + return self._lhs_index.get(self._get_type_if_possible(lhs), []) + + # only rhs specified so look up its index + elif rhs and not lhs: + return self._rhs_index.get(self._get_type_if_possible(rhs), []) + + # intersect + else: + return [ + prod + for prod in self._lhs_index.get(self._get_type_if_possible(lhs), []) + if prod in self._rhs_index.get(self._get_type_if_possible(rhs), []) + ] + + def leftcorners(self, cat): + """ + Return the set of all words that the given category can start with. + Also called the "first set" in compiler construction. + """ + raise NotImplementedError("Not implemented yet") + + def leftcorner_parents(self, cat): + """ + Return the set of all categories for which the given category + is a left corner. + """ + raise NotImplementedError("Not implemented yet") + + def _get_type_if_possible(self, item): + """ + Helper function which returns the ``TYPE`` feature of the ``item``, + if it exists, otherwise it returns the ``item`` itself + """ + if isinstance(item, dict) and TYPE in item: + return FeatureValueType(item[TYPE]) + else: + return item + + +@total_ordering +class FeatureValueType: + """ + A helper class for ``FeatureGrammars``, designed to be different + from ordinary strings. This is to stop the ``FeatStruct`` + ``FOO[]`` from being compare equal to the terminal "FOO". + """ + + def __init__(self, value): + self._value = value + + def __repr__(self): + return "<%s>" % self._value + + def __eq__(self, other): + return type(self) == type(other) and self._value == other._value + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, FeatureValueType): + raise_unorderable_types("<", self, other) + return self._value < other._value + + def __hash__(self): + return hash(self._value) + + +class DependencyGrammar: + """ + A dependency grammar. A DependencyGrammar consists of a set of + productions. Each production specifies a head/modifier relationship + between a pair of words. + """ + + def __init__(self, productions): + """ + Create a new dependency grammar, from the set of ``Productions``. + + :param productions: The list of productions that defines the grammar + :type productions: list(Production) + """ + self._productions = productions + + @classmethod + def fromstring(cls, input): + productions = [] + for linenum, line in enumerate(input.split("\n")): + line = line.strip() + if line.startswith("#") or line == "": + continue + try: + productions += _read_dependency_production(line) + except ValueError as e: + raise ValueError(f"Unable to parse line {linenum}: {line}") from e + if len(productions) == 0: + raise ValueError("No productions found!") + return cls(productions) + + def contains(self, head, mod): + """ + :param head: A head word. + :type head: str + :param mod: A mod word, to test as a modifier of 'head'. + :type mod: str + + :return: true if this ``DependencyGrammar`` contains a + ``DependencyProduction`` mapping 'head' to 'mod'. + :rtype: bool + """ + for production in self._productions: + for possibleMod in production._rhs: + if production._lhs == head and possibleMod == mod: + return True + return False + + def __contains__(self, head_mod): + """ + Return True if this ``DependencyGrammar`` contains a + ``DependencyProduction`` mapping 'head' to 'mod'. + + :param head_mod: A tuple of a head word and a mod word, + to test as a modifier of 'head'. + :type head: Tuple[str, str] + :rtype: bool + """ + try: + head, mod = head_mod + except ValueError as e: + raise ValueError( + "Must use a tuple of strings, e.g. `('price', 'of') in grammar`" + ) from e + return self.contains(head, mod) + + # # should be rewritten, the set comp won't work in all comparisons + # def contains_exactly(self, head, modlist): + # for production in self._productions: + # if(len(production._rhs) == len(modlist)): + # if(production._lhs == head): + # set1 = Set(production._rhs) + # set2 = Set(modlist) + # if(set1 == set2): + # return True + # return False + + def __str__(self): + """ + Return a verbose string representation of the ``DependencyGrammar`` + + :rtype: str + """ + str = "Dependency grammar with %d productions" % len(self._productions) + for production in self._productions: + str += "\n %s" % production + return str + + def __repr__(self): + """ + Return a concise string representation of the ``DependencyGrammar`` + """ + return "Dependency grammar with %d productions" % len(self._productions) + + +class ProbabilisticDependencyGrammar: + """ """ + + def __init__(self, productions, events, tags): + self._productions = productions + self._events = events + self._tags = tags + + def contains(self, head, mod): + """ + Return True if this ``DependencyGrammar`` contains a + ``DependencyProduction`` mapping 'head' to 'mod'. + + :param head: A head word. + :type head: str + :param mod: A mod word, to test as a modifier of 'head'. + :type mod: str + :rtype: bool + """ + for production in self._productions: + for possibleMod in production._rhs: + if production._lhs == head and possibleMod == mod: + return True + return False + + def __str__(self): + """ + Return a verbose string representation of the ``ProbabilisticDependencyGrammar`` + + :rtype: str + """ + str = "Statistical dependency grammar with %d productions" % len( + self._productions + ) + for production in self._productions: + str += "\n %s" % production + str += "\nEvents:" + for event in self._events: + str += "\n %d:%s" % (self._events[event], event) + str += "\nTags:" + for tag_word in self._tags: + str += f"\n {tag_word}:\t({self._tags[tag_word]})" + return str + + def __repr__(self): + """ + Return a concise string representation of the ``ProbabilisticDependencyGrammar`` + """ + return "Statistical Dependency grammar with %d productions" % len( + self._productions + ) + + +class PCFG(CFG): + """ + A probabilistic context-free grammar. A PCFG consists of a + start state and a set of productions with probabilities. The set of + terminals and nonterminals is implicitly specified by the productions. + + PCFG productions use the ``ProbabilisticProduction`` class. + ``PCFGs`` impose the constraint that the set of productions with + any given left-hand-side must have probabilities that sum to 1 + (allowing for a small margin of error). + + If you need efficient key-based access to productions, you can use + a subclass to implement it. + + :type EPSILON: float + :cvar EPSILON: The acceptable margin of error for checking that + productions with a given left-hand side have probabilities + that sum to 1. + """ + + EPSILON = 0.01 + + def __init__(self, start, productions, calculate_leftcorners=True): + """ + Create a new context-free grammar, from the given start state + and set of ``ProbabilisticProductions``. + + :param start: The start symbol + :type start: Nonterminal + :param productions: The list of productions that defines the grammar + :type productions: list(Production) + :raise ValueError: if the set of productions with any left-hand-side + do not have probabilities that sum to a value within + EPSILON of 1. + :param calculate_leftcorners: False if we don't want to calculate the + leftcorner relation. In that case, some optimized chart parsers won't work. + :type calculate_leftcorners: bool + """ + CFG.__init__(self, start, productions, calculate_leftcorners) + + # Make sure that the probabilities sum to one. + probs = {} + for production in productions: + probs[production.lhs()] = probs.get(production.lhs(), 0) + production.prob() + for (lhs, p) in probs.items(): + if not ((1 - PCFG.EPSILON) < p < (1 + PCFG.EPSILON)): + raise ValueError("Productions for %r do not sum to 1" % lhs) + + @classmethod + def fromstring(cls, input, encoding=None): + """ + Return a probabilistic context-free grammar corresponding to the + input string(s). + + :param input: a grammar, either in the form of a string or else + as a list of strings. + """ + start, productions = read_grammar( + input, standard_nonterm_parser, probabilistic=True, encoding=encoding + ) + return cls(start, productions) + + +################################################################# +# Inducing Grammars +################################################################# + +# Contributed by Nathan Bodenstab + + +def induce_pcfg(start, productions): + r""" + Induce a PCFG grammar from a list of productions. + + The probability of a production A -> B C in a PCFG is: + + | count(A -> B C) + | P(B, C | A) = --------------- where \* is any right hand side + | count(A -> \*) + + :param start: The start symbol + :type start: Nonterminal + :param productions: The list of productions that defines the grammar + :type productions: list(Production) + """ + # Production count: the number of times a given production occurs + pcount = {} + + # LHS-count: counts the number of times a given lhs occurs + lcount = {} + + for prod in productions: + lcount[prod.lhs()] = lcount.get(prod.lhs(), 0) + 1 + pcount[prod] = pcount.get(prod, 0) + 1 + + prods = [ + ProbabilisticProduction(p.lhs(), p.rhs(), prob=pcount[p] / lcount[p.lhs()]) + for p in pcount + ] + return PCFG(start, prods) + + +################################################################# +# Helper functions for reading productions +################################################################# + + +def _read_cfg_production(input): + """ + Return a list of context-free ``Productions``. + """ + return _read_production(input, standard_nonterm_parser) + + +def _read_pcfg_production(input): + """ + Return a list of PCFG ``ProbabilisticProductions``. + """ + return _read_production(input, standard_nonterm_parser, probabilistic=True) + + +def _read_fcfg_production(input, fstruct_reader): + """ + Return a list of feature-based ``Productions``. + """ + return _read_production(input, fstruct_reader) + + +# Parsing generic grammars + +_ARROW_RE = re.compile(r"\s* -> \s*", re.VERBOSE) +_PROBABILITY_RE = re.compile(r"( \[ [\d\.]+ \] ) \s*", re.VERBOSE) +_TERMINAL_RE = re.compile(r'( "[^"]*" | \'[^\']*\' ) \s*', re.VERBOSE) +_DISJUNCTION_RE = re.compile(r"\| \s*", re.VERBOSE) + + +def _read_production(line, nonterm_parser, probabilistic=False): + """ + Parse a grammar rule, given as a string, and return + a list of productions. + """ + pos = 0 + + # Parse the left-hand side. + lhs, pos = nonterm_parser(line, pos) + + # Skip over the arrow. + m = _ARROW_RE.match(line, pos) + if not m: + raise ValueError("Expected an arrow") + pos = m.end() + + # Parse the right hand side. + probabilities = [0.0] + rhsides = [[]] + while pos < len(line): + # Probability. + m = _PROBABILITY_RE.match(line, pos) + if probabilistic and m: + pos = m.end() + probabilities[-1] = float(m.group(1)[1:-1]) + if probabilities[-1] > 1.0: + raise ValueError( + "Production probability %f, " + "should not be greater than 1.0" % (probabilities[-1],) + ) + + # String -- add terminal. + elif line[pos] in "'\"": + m = _TERMINAL_RE.match(line, pos) + if not m: + raise ValueError("Unterminated string") + rhsides[-1].append(m.group(1)[1:-1]) + pos = m.end() + + # Vertical bar -- start new rhside. + elif line[pos] == "|": + m = _DISJUNCTION_RE.match(line, pos) + probabilities.append(0.0) + rhsides.append([]) + pos = m.end() + + # Anything else -- nonterminal. + else: + nonterm, pos = nonterm_parser(line, pos) + rhsides[-1].append(nonterm) + + if probabilistic: + return [ + ProbabilisticProduction(lhs, rhs, prob=probability) + for (rhs, probability) in zip(rhsides, probabilities) + ] + else: + return [Production(lhs, rhs) for rhs in rhsides] + + +################################################################# +# Reading Phrase Structure Grammars +################################################################# + + +def read_grammar(input, nonterm_parser, probabilistic=False, encoding=None): + """ + Return a pair consisting of a starting category and a list of + ``Productions``. + + :param input: a grammar, either in the form of a string or else + as a list of strings. + :param nonterm_parser: a function for parsing nonterminals. + It should take a ``(string, position)`` as argument and + return a ``(nonterminal, position)`` as result. + :param probabilistic: are the grammar rules probabilistic? + :type probabilistic: bool + :param encoding: the encoding of the grammar, if it is a binary string + :type encoding: str + """ + if encoding is not None: + input = input.decode(encoding) + if isinstance(input, str): + lines = input.split("\n") + else: + lines = input + + start = None + productions = [] + continue_line = "" + for linenum, line in enumerate(lines): + line = continue_line + line.strip() + if line.startswith("#") or line == "": + continue + if line.endswith("\\"): + continue_line = line[:-1].rstrip() + " " + continue + continue_line = "" + try: + if line[0] == "%": + directive, args = line[1:].split(None, 1) + if directive == "start": + start, pos = nonterm_parser(args, 0) + if pos != len(args): + raise ValueError("Bad argument to start directive") + else: + raise ValueError("Bad directive") + else: + # expand out the disjunctions on the RHS + productions += _read_production(line, nonterm_parser, probabilistic) + except ValueError as e: + raise ValueError(f"Unable to parse line {linenum + 1}: {line}\n{e}") from e + + if not productions: + raise ValueError("No productions found!") + if not start: + start = productions[0].lhs() + return (start, productions) + + +_STANDARD_NONTERM_RE = re.compile(r"( [\w/][\w/^<>-]* ) \s*", re.VERBOSE) + + +def standard_nonterm_parser(string, pos): + m = _STANDARD_NONTERM_RE.match(string, pos) + if not m: + raise ValueError("Expected a nonterminal, found: " + string[pos:]) + return (Nonterminal(m.group(1)), m.end()) + + +################################################################# +# Reading Dependency Grammars +################################################################# + +_READ_DG_RE = re.compile( + r"""^\s* # leading whitespace + ('[^']+')\s* # single-quoted lhs + (?:[-=]+>)\s* # arrow + (?:( # rhs: + "[^"]+" # doubled-quoted terminal + | '[^']+' # single-quoted terminal + | \| # disjunction + ) + \s*) # trailing space + *$""", # zero or more copies + re.VERBOSE, +) +_SPLIT_DG_RE = re.compile(r"""('[^']'|[-=]+>|"[^"]+"|'[^']+'|\|)""") + + +def _read_dependency_production(s): + if not _READ_DG_RE.match(s): + raise ValueError("Bad production string") + pieces = _SPLIT_DG_RE.split(s) + pieces = [p for i, p in enumerate(pieces) if i % 2 == 1] + lhside = pieces[0].strip("'\"") + rhsides = [[]] + for piece in pieces[2:]: + if piece == "|": + rhsides.append([]) + else: + rhsides[-1].append(piece.strip("'\"")) + return [DependencyProduction(lhside, rhside) for rhside in rhsides] + + +################################################################# +# Demonstration +################################################################# + + +def cfg_demo(): + """ + A demonstration showing how ``CFGs`` can be created and used. + """ + + from nltk import CFG, Production, nonterminals + + # Create some nonterminals + S, NP, VP, PP = nonterminals("S, NP, VP, PP") + N, V, P, Det = nonterminals("N, V, P, Det") + VP_slash_NP = VP / NP + + print("Some nonterminals:", [S, NP, VP, PP, N, V, P, Det, VP / NP]) + print(" S.symbol() =>", repr(S.symbol())) + print() + + print(Production(S, [NP])) + + # Create some Grammar Productions + grammar = CFG.fromstring( + """ + S -> NP VP + PP -> P NP + NP -> Det N | NP PP + VP -> V NP | VP PP + Det -> 'a' | 'the' + N -> 'dog' | 'cat' + V -> 'chased' | 'sat' + P -> 'on' | 'in' + """ + ) + + print("A Grammar:", repr(grammar)) + print(" grammar.start() =>", repr(grammar.start())) + print(" grammar.productions() =>", end=" ") + # Use string.replace(...) is to line-wrap the output. + print(repr(grammar.productions()).replace(",", ",\n" + " " * 25)) + print() + + +def pcfg_demo(): + """ + A demonstration showing how a ``PCFG`` can be created and used. + """ + + from nltk import induce_pcfg, treetransforms + from nltk.corpus import treebank + from nltk.parse import pchart + + toy_pcfg1 = PCFG.fromstring( + """ + S -> NP VP [1.0] + NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15] + Det -> 'the' [0.8] | 'my' [0.2] + N -> 'man' [0.5] | 'telescope' [0.5] + VP -> VP PP [0.1] | V NP [0.7] | V [0.2] + V -> 'ate' [0.35] | 'saw' [0.65] + PP -> P NP [1.0] + P -> 'with' [0.61] | 'under' [0.39] + """ + ) + + toy_pcfg2 = PCFG.fromstring( + """ + S -> NP VP [1.0] + VP -> V NP [.59] + VP -> V [.40] + VP -> VP PP [.01] + NP -> Det N [.41] + NP -> Name [.28] + NP -> NP PP [.31] + PP -> P NP [1.0] + V -> 'saw' [.21] + V -> 'ate' [.51] + V -> 'ran' [.28] + N -> 'boy' [.11] + N -> 'cookie' [.12] + N -> 'table' [.13] + N -> 'telescope' [.14] + N -> 'hill' [.5] + Name -> 'Jack' [.52] + Name -> 'Bob' [.48] + P -> 'with' [.61] + P -> 'under' [.39] + Det -> 'the' [.41] + Det -> 'a' [.31] + Det -> 'my' [.28] + """ + ) + + pcfg_prods = toy_pcfg1.productions() + + pcfg_prod = pcfg_prods[2] + print("A PCFG production:", repr(pcfg_prod)) + print(" pcfg_prod.lhs() =>", repr(pcfg_prod.lhs())) + print(" pcfg_prod.rhs() =>", repr(pcfg_prod.rhs())) + print(" pcfg_prod.prob() =>", repr(pcfg_prod.prob())) + print() + + grammar = toy_pcfg2 + print("A PCFG grammar:", repr(grammar)) + print(" grammar.start() =>", repr(grammar.start())) + print(" grammar.productions() =>", end=" ") + # Use .replace(...) is to line-wrap the output. + print(repr(grammar.productions()).replace(",", ",\n" + " " * 26)) + print() + + # extract productions from three trees and induce the PCFG + print("Induce PCFG grammar from treebank data:") + + productions = [] + item = treebank._fileids[0] + for tree in treebank.parsed_sents(item)[:3]: + # perform optional tree transformations, e.g.: + tree.collapse_unary(collapsePOS=False) + tree.chomsky_normal_form(horzMarkov=2) + + productions += tree.productions() + + S = Nonterminal("S") + grammar = induce_pcfg(S, productions) + print(grammar) + print() + + print("Parse sentence using induced grammar:") + + parser = pchart.InsideChartParser(grammar) + parser.trace(3) + + # doesn't work as tokens are different: + # sent = treebank.tokenized('wsj_0001.mrg')[0] + + sent = treebank.parsed_sents(item)[0].leaves() + print(sent) + for parse in parser.parse(sent): + print(parse) + + +def fcfg_demo(): + import nltk.data + + g = nltk.data.load("grammars/book_grammars/feat0.fcfg") + print(g) + print() + + +def dg_demo(): + """ + A demonstration showing the creation and inspection of a + ``DependencyGrammar``. + """ + grammar = DependencyGrammar.fromstring( + """ + 'scratch' -> 'cats' | 'walls' + 'walls' -> 'the' + 'cats' -> 'the' + """ + ) + print(grammar) + + +def sdg_demo(): + """ + A demonstration of how to read a string representation of + a CoNLL format dependency tree. + """ + from nltk.parse import DependencyGraph + + dg = DependencyGraph( + """ + 1 Ze ze Pron Pron per|3|evofmv|nom 2 su _ _ + 2 had heb V V trans|ovt|1of2of3|ev 0 ROOT _ _ + 3 met met Prep Prep voor 8 mod _ _ + 4 haar haar Pron Pron bez|3|ev|neut|attr 5 det _ _ + 5 moeder moeder N N soort|ev|neut 3 obj1 _ _ + 6 kunnen kan V V hulp|ott|1of2of3|mv 2 vc _ _ + 7 gaan ga V V hulp|inf 6 vc _ _ + 8 winkelen winkel V V intrans|inf 11 cnj _ _ + 9 , , Punc Punc komma 8 punct _ _ + 10 zwemmen zwem V V intrans|inf 11 cnj _ _ + 11 of of Conj Conj neven 7 vc _ _ + 12 terrassen terras N N soort|mv|neut 11 cnj _ _ + 13 . . Punc Punc punt 12 punct _ _ + """ + ) + tree = dg.tree() + print(tree.pprint()) + + +def demo(): + cfg_demo() + pcfg_demo() + fcfg_demo() + dg_demo() + sdg_demo() + + +if __name__ == "__main__": + demo() + +__all__ = [ + "Nonterminal", + "nonterminals", + "CFG", + "Production", + "PCFG", + "ProbabilisticProduction", + "DependencyGrammar", + "DependencyProduction", + "ProbabilisticDependencyGrammar", + "induce_pcfg", + "read_grammar", +] diff --git a/lib/python3.10/site-packages/nltk/help.py b/lib/python3.10/site-packages/nltk/help.py new file mode 100644 index 0000000000000000000000000000000000000000..e0b5f7b876cda304a7fdaeecfbab9278113058df --- /dev/null +++ b/lib/python3.10/site-packages/nltk/help.py @@ -0,0 +1,64 @@ +# Natural Language Toolkit (NLTK) Help +# +# Copyright (C) 2001-2023 NLTK Project +# Authors: Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Provide structured access to documentation. +""" + +import re +from textwrap import wrap + +from nltk.data import load + + +def brown_tagset(tagpattern=None): + _format_tagset("brown_tagset", tagpattern) + + +def claws5_tagset(tagpattern=None): + _format_tagset("claws5_tagset", tagpattern) + + +def upenn_tagset(tagpattern=None): + _format_tagset("upenn_tagset", tagpattern) + + +##################################################################### +# UTILITIES +##################################################################### + + +def _print_entries(tags, tagdict): + for tag in tags: + entry = tagdict[tag] + defn = [tag + ": " + entry[0]] + examples = wrap( + entry[1], width=75, initial_indent=" ", subsequent_indent=" " + ) + print("\n".join(defn + examples)) + + +def _format_tagset(tagset, tagpattern=None): + tagdict = load("help/tagsets/" + tagset + ".pickle") + if not tagpattern: + _print_entries(sorted(tagdict), tagdict) + elif tagpattern in tagdict: + _print_entries([tagpattern], tagdict) + else: + tagpattern = re.compile(tagpattern) + tags = [tag for tag in sorted(tagdict) if tagpattern.match(tag)] + if tags: + _print_entries(tags, tagdict) + else: + print("No matching tags found.") + + +if __name__ == "__main__": + brown_tagset(r"NN.*") + upenn_tagset(r".*\$") + claws5_tagset("UNDEFINED") + brown_tagset(r"NN") diff --git a/lib/python3.10/site-packages/nltk/internals.py b/lib/python3.10/site-packages/nltk/internals.py new file mode 100644 index 0000000000000000000000000000000000000000..b53d77da5e976c08f2cb002759e9da1044dc9bf0 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/internals.py @@ -0,0 +1,1123 @@ +# Natural Language Toolkit: Internal utility functions +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# Nitin Madnani +# URL: +# For license information, see LICENSE.TXT + +import fnmatch +import locale +import os +import re +import stat +import subprocess +import sys +import textwrap +import types +import warnings +from xml.etree import ElementTree + +########################################################################## +# Java Via Command-Line +########################################################################## + +_java_bin = None +_java_options = [] +# [xx] add classpath option to config_java? +def config_java(bin=None, options=None, verbose=False): + """ + Configure nltk's java interface, by letting nltk know where it can + find the Java binary, and what extra options (if any) should be + passed to Java when it is run. + + :param bin: The full path to the Java binary. If not specified, + then nltk will search the system for a Java binary; and if + one is not found, it will raise a ``LookupError`` exception. + :type bin: str + :param options: A list of options that should be passed to the + Java binary when it is called. A common value is + ``'-Xmx512m'``, which tells Java binary to increase + the maximum heap size to 512 megabytes. If no options are + specified, then do not modify the options list. + :type options: list(str) + """ + global _java_bin, _java_options + _java_bin = find_binary( + "java", + bin, + env_vars=["JAVAHOME", "JAVA_HOME"], + verbose=verbose, + binary_names=["java.exe"], + ) + + if options is not None: + if isinstance(options, str): + options = options.split() + _java_options = list(options) + + +def java(cmd, classpath=None, stdin=None, stdout=None, stderr=None, blocking=True): + """ + Execute the given java command, by opening a subprocess that calls + Java. If java has not yet been configured, it will be configured + by calling ``config_java()`` with no arguments. + + :param cmd: The java command that should be called, formatted as + a list of strings. Typically, the first string will be the name + of the java class; and the remaining strings will be arguments + for that java class. + :type cmd: list(str) + + :param classpath: A ``':'`` separated list of directories, JAR + archives, and ZIP archives to search for class files. + :type classpath: str + + :param stdin: Specify the executed program's + standard input file handles, respectively. Valid values are ``subprocess.PIPE``, + an existing file descriptor (a positive integer), an existing + file object, 'pipe', 'stdout', 'devnull' and None. ``subprocess.PIPE`` indicates that a + new pipe to the child should be created. With None, no + redirection will occur; the child's file handles will be + inherited from the parent. Additionally, stderr can be + ``subprocess.STDOUT``, which indicates that the stderr data + from the applications should be captured into the same file + handle as for stdout. + + :param stdout: Specify the executed program's standard output file + handle. See ``stdin`` for valid values. + + :param stderr: Specify the executed program's standard error file + handle. See ``stdin`` for valid values. + + + :param blocking: If ``false``, then return immediately after + spawning the subprocess. In this case, the return value is + the ``Popen`` object, and not a ``(stdout, stderr)`` tuple. + + :return: If ``blocking=True``, then return a tuple ``(stdout, + stderr)``, containing the stdout and stderr outputs generated + by the java command if the ``stdout`` and ``stderr`` parameters + were set to ``subprocess.PIPE``; or None otherwise. If + ``blocking=False``, then return a ``subprocess.Popen`` object. + + :raise OSError: If the java command returns a nonzero return code. + """ + + subprocess_output_dict = { + "pipe": subprocess.PIPE, + "stdout": subprocess.STDOUT, + "devnull": subprocess.DEVNULL, + } + + stdin = subprocess_output_dict.get(stdin, stdin) + stdout = subprocess_output_dict.get(stdout, stdout) + stderr = subprocess_output_dict.get(stderr, stderr) + + if isinstance(cmd, str): + raise TypeError("cmd should be a list of strings") + + # Make sure we know where a java binary is. + if _java_bin is None: + config_java() + + # Set up the classpath. + if isinstance(classpath, str): + classpaths = [classpath] + else: + classpaths = list(classpath) + classpath = os.path.pathsep.join(classpaths) + + # Construct the full command string. + cmd = list(cmd) + cmd = ["-cp", classpath] + cmd + cmd = [_java_bin] + _java_options + cmd + + # Call java via a subprocess + p = subprocess.Popen(cmd, stdin=stdin, stdout=stdout, stderr=stderr) + if not blocking: + return p + (stdout, stderr) = p.communicate() + + # Check the return code. + if p.returncode != 0: + print(_decode_stdoutdata(stderr)) + raise OSError("Java command failed : " + str(cmd)) + + return (stdout, stderr) + + +###################################################################### +# Parsing +###################################################################### + + +class ReadError(ValueError): + """ + Exception raised by read_* functions when they fail. + :param position: The index in the input string where an error occurred. + :param expected: What was expected when an error occurred. + """ + + def __init__(self, expected, position): + ValueError.__init__(self, expected, position) + self.expected = expected + self.position = position + + def __str__(self): + return f"Expected {self.expected} at {self.position}" + + +_STRING_START_RE = re.compile(r"[uU]?[rR]?(\"\"\"|\'\'\'|\"|\')") + + +def read_str(s, start_position): + """ + If a Python string literal begins at the specified position in the + given string, then return a tuple ``(val, end_position)`` + containing the value of the string literal and the position where + it ends. Otherwise, raise a ``ReadError``. + + :param s: A string that will be checked to see if within which a + Python string literal exists. + :type s: str + + :param start_position: The specified beginning position of the string ``s`` + to begin regex matching. + :type start_position: int + + :return: A tuple containing the matched string literal evaluated as a + string and the end position of the string literal. + :rtype: tuple(str, int) + + :raise ReadError: If the ``_STRING_START_RE`` regex doesn't return a + match in ``s`` at ``start_position``, i.e., open quote. If the + ``_STRING_END_RE`` regex doesn't return a match in ``s`` at the + end of the first match, i.e., close quote. + :raise ValueError: If an invalid string (i.e., contains an invalid + escape sequence) is passed into the ``eval``. + + :Example: + + >>> from nltk.internals import read_str + >>> read_str('"Hello", World!', 0) + ('Hello', 7) + + """ + # Read the open quote, and any modifiers. + m = _STRING_START_RE.match(s, start_position) + if not m: + raise ReadError("open quote", start_position) + quotemark = m.group(1) + + # Find the close quote. + _STRING_END_RE = re.compile(r"\\|%s" % quotemark) + position = m.end() + while True: + match = _STRING_END_RE.search(s, position) + if not match: + raise ReadError("close quote", position) + if match.group(0) == "\\": + position = match.end() + 1 + else: + break + + # Process it, using eval. Strings with invalid escape sequences + # might raise ValueError. + try: + return eval(s[start_position : match.end()]), match.end() + except ValueError as e: + raise ReadError("valid escape sequence", start_position) from e + + +_READ_INT_RE = re.compile(r"-?\d+") + + +def read_int(s, start_position): + """ + If an integer begins at the specified position in the given + string, then return a tuple ``(val, end_position)`` containing the + value of the integer and the position where it ends. Otherwise, + raise a ``ReadError``. + + :param s: A string that will be checked to see if within which a + Python integer exists. + :type s: str + + :param start_position: The specified beginning position of the string ``s`` + to begin regex matching. + :type start_position: int + + :return: A tuple containing the matched integer casted to an int, + and the end position of the int in ``s``. + :rtype: tuple(int, int) + + :raise ReadError: If the ``_READ_INT_RE`` regex doesn't return a + match in ``s`` at ``start_position``. + + :Example: + + >>> from nltk.internals import read_int + >>> read_int('42 is the answer', 0) + (42, 2) + + """ + m = _READ_INT_RE.match(s, start_position) + if not m: + raise ReadError("integer", start_position) + return int(m.group()), m.end() + + +_READ_NUMBER_VALUE = re.compile(r"-?(\d*)([.]?\d*)?") + + +def read_number(s, start_position): + """ + If an integer or float begins at the specified position in the + given string, then return a tuple ``(val, end_position)`` + containing the value of the number and the position where it ends. + Otherwise, raise a ``ReadError``. + + :param s: A string that will be checked to see if within which a + Python number exists. + :type s: str + + :param start_position: The specified beginning position of the string ``s`` + to begin regex matching. + :type start_position: int + + :return: A tuple containing the matched number casted to a ``float``, + and the end position of the number in ``s``. + :rtype: tuple(float, int) + + :raise ReadError: If the ``_READ_NUMBER_VALUE`` regex doesn't return a + match in ``s`` at ``start_position``. + + :Example: + + >>> from nltk.internals import read_number + >>> read_number('Pi is 3.14159', 6) + (3.14159, 13) + + """ + m = _READ_NUMBER_VALUE.match(s, start_position) + if not m or not (m.group(1) or m.group(2)): + raise ReadError("number", start_position) + if m.group(2): + return float(m.group()), m.end() + else: + return int(m.group()), m.end() + + +###################################################################### +# Check if a method has been overridden +###################################################################### + + +def overridden(method): + """ + :return: True if ``method`` overrides some method with the same + name in a base class. This is typically used when defining + abstract base classes or interfaces, to allow subclasses to define + either of two related methods: + + >>> class EaterI: + ... '''Subclass must define eat() or batch_eat().''' + ... def eat(self, food): + ... if overridden(self.batch_eat): + ... return self.batch_eat([food])[0] + ... else: + ... raise NotImplementedError() + ... def batch_eat(self, foods): + ... return [self.eat(food) for food in foods] + + :type method: instance method + """ + if isinstance(method, types.MethodType) and method.__self__.__class__ is not None: + name = method.__name__ + funcs = [ + cls.__dict__[name] + for cls in _mro(method.__self__.__class__) + if name in cls.__dict__ + ] + return len(funcs) > 1 + else: + raise TypeError("Expected an instance method.") + + +def _mro(cls): + """ + Return the method resolution order for ``cls`` -- i.e., a list + containing ``cls`` and all its base classes, in the order in which + they would be checked by ``getattr``. For new-style classes, this + is just cls.__mro__. For classic classes, this can be obtained by + a depth-first left-to-right traversal of ``__bases__``. + """ + if isinstance(cls, type): + return cls.__mro__ + else: + mro = [cls] + for base in cls.__bases__: + mro.extend(_mro(base)) + return mro + + +###################################################################### +# Deprecation decorator & base class +###################################################################### +# [xx] dedent msg first if it comes from a docstring. + + +def _add_epytext_field(obj, field, message): + """Add an epytext @field to a given object's docstring.""" + indent = "" + # If we already have a docstring, then add a blank line to separate + # it from the new field, and check its indentation. + if obj.__doc__: + obj.__doc__ = obj.__doc__.rstrip() + "\n\n" + indents = re.findall(r"(?<=\n)[ ]+(?!\s)", obj.__doc__.expandtabs()) + if indents: + indent = min(indents) + # If we don't have a docstring, add an empty one. + else: + obj.__doc__ = "" + + obj.__doc__ += textwrap.fill( + f"@{field}: {message}", + initial_indent=indent, + subsequent_indent=indent + " ", + ) + + +def deprecated(message): + """ + A decorator used to mark functions as deprecated. This will cause + a warning to be printed the when the function is used. Usage: + + >>> from nltk.internals import deprecated + >>> @deprecated('Use foo() instead') + ... def bar(x): + ... print(x/10) + + """ + + def decorator(func): + msg = f"Function {func.__name__}() has been deprecated. {message}" + msg = "\n" + textwrap.fill(msg, initial_indent=" ", subsequent_indent=" ") + + def newFunc(*args, **kwargs): + warnings.warn(msg, category=DeprecationWarning, stacklevel=2) + return func(*args, **kwargs) + + # Copy the old function's name, docstring, & dict + newFunc.__dict__.update(func.__dict__) + newFunc.__name__ = func.__name__ + newFunc.__doc__ = func.__doc__ + newFunc.__deprecated__ = True + # Add a @deprecated field to the docstring. + _add_epytext_field(newFunc, "deprecated", message) + return newFunc + + return decorator + + +class Deprecated: + """ + A base class used to mark deprecated classes. A typical usage is to + alert users that the name of a class has changed: + + >>> from nltk.internals import Deprecated + >>> class NewClassName: + ... pass # All logic goes here. + ... + >>> class OldClassName(Deprecated, NewClassName): + ... "Use NewClassName instead." + + The docstring of the deprecated class will be used in the + deprecation warning message. + """ + + def __new__(cls, *args, **kwargs): + # Figure out which class is the deprecated one. + dep_cls = None + for base in _mro(cls): + if Deprecated in base.__bases__: + dep_cls = base + break + assert dep_cls, "Unable to determine which base is deprecated." + + # Construct an appropriate warning. + doc = dep_cls.__doc__ or "".strip() + # If there's a @deprecated field, strip off the field marker. + doc = re.sub(r"\A\s*@deprecated:", r"", doc) + # Strip off any indentation. + doc = re.sub(r"(?m)^\s*", "", doc) + # Construct a 'name' string. + name = "Class %s" % dep_cls.__name__ + if cls != dep_cls: + name += " (base class for %s)" % cls.__name__ + # Put it all together. + msg = f"{name} has been deprecated. {doc}" + # Wrap it. + msg = "\n" + textwrap.fill(msg, initial_indent=" ", subsequent_indent=" ") + warnings.warn(msg, category=DeprecationWarning, stacklevel=2) + # Do the actual work of __new__. + return object.__new__(cls) + + +########################################################################## +# COUNTER, FOR UNIQUE NAMING +########################################################################## + + +class Counter: + """ + A counter that auto-increments each time its value is read. + """ + + def __init__(self, initial_value=0): + self._value = initial_value + + def get(self): + self._value += 1 + return self._value + + +########################################################################## +# Search for files/binaries +########################################################################## + + +def find_file_iter( + filename, + env_vars=(), + searchpath=(), + file_names=None, + url=None, + verbose=False, + finding_dir=False, +): + """ + Search for a file to be used by nltk. + + :param filename: The name or path of the file. + :param env_vars: A list of environment variable names to check. + :param file_names: A list of alternative file names to check. + :param searchpath: List of directories to search. + :param url: URL presented to user for download help. + :param verbose: Whether or not to print path when a file is found. + """ + file_names = [filename] + (file_names or []) + assert isinstance(filename, str) + assert not isinstance(file_names, str) + assert not isinstance(searchpath, str) + if isinstance(env_vars, str): + env_vars = env_vars.split() + yielded = False + + # File exists, no magic + for alternative in file_names: + path_to_file = os.path.join(filename, alternative) + if os.path.isfile(path_to_file): + if verbose: + print(f"[Found {filename}: {path_to_file}]") + yielded = True + yield path_to_file + # Check the bare alternatives + if os.path.isfile(alternative): + if verbose: + print(f"[Found {filename}: {alternative}]") + yielded = True + yield alternative + # Check if the alternative is inside a 'file' directory + path_to_file = os.path.join(filename, "file", alternative) + if os.path.isfile(path_to_file): + if verbose: + print(f"[Found {filename}: {path_to_file}]") + yielded = True + yield path_to_file + + # Check environment variables + for env_var in env_vars: + if env_var in os.environ: + if finding_dir: # This is to file a directory instead of file + yielded = True + yield os.environ[env_var] + + for env_dir in os.environ[env_var].split(os.pathsep): + # Check if the environment variable contains a direct path to the bin + if os.path.isfile(env_dir): + if verbose: + print(f"[Found {filename}: {env_dir}]") + yielded = True + yield env_dir + # Check if the possible bin names exist inside the environment variable directories + for alternative in file_names: + path_to_file = os.path.join(env_dir, alternative) + if os.path.isfile(path_to_file): + if verbose: + print(f"[Found {filename}: {path_to_file}]") + yielded = True + yield path_to_file + # Check if the alternative is inside a 'file' directory + # path_to_file = os.path.join(env_dir, 'file', alternative) + + # Check if the alternative is inside a 'bin' directory + path_to_file = os.path.join(env_dir, "bin", alternative) + + if os.path.isfile(path_to_file): + if verbose: + print(f"[Found {filename}: {path_to_file}]") + yielded = True + yield path_to_file + + # Check the path list. + for directory in searchpath: + for alternative in file_names: + path_to_file = os.path.join(directory, alternative) + if os.path.isfile(path_to_file): + yielded = True + yield path_to_file + + # If we're on a POSIX system, then try using the 'which' command + # to find the file. + if os.name == "posix": + for alternative in file_names: + try: + p = subprocess.Popen( + ["which", alternative], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + stdout, stderr = p.communicate() + path = _decode_stdoutdata(stdout).strip() + if path.endswith(alternative) and os.path.exists(path): + if verbose: + print(f"[Found {filename}: {path}]") + yielded = True + yield path + except (KeyboardInterrupt, SystemExit, OSError): + raise + finally: + pass + + if not yielded: + msg = ( + "NLTK was unable to find the %s file!" + "\nUse software specific " + "configuration parameters" % filename + ) + if env_vars: + msg += " or set the %s environment variable" % env_vars[0] + msg += "." + if searchpath: + msg += "\n\n Searched in:" + msg += "".join("\n - %s" % d for d in searchpath) + if url: + msg += f"\n\n For more information on {filename}, see:\n <{url}>" + div = "=" * 75 + raise LookupError(f"\n\n{div}\n{msg}\n{div}") + + +def find_file( + filename, env_vars=(), searchpath=(), file_names=None, url=None, verbose=False +): + return next( + find_file_iter(filename, env_vars, searchpath, file_names, url, verbose) + ) + + +def find_dir( + filename, env_vars=(), searchpath=(), file_names=None, url=None, verbose=False +): + return next( + find_file_iter( + filename, env_vars, searchpath, file_names, url, verbose, finding_dir=True + ) + ) + + +def find_binary_iter( + name, + path_to_bin=None, + env_vars=(), + searchpath=(), + binary_names=None, + url=None, + verbose=False, +): + """ + Search for a file to be used by nltk. + + :param name: The name or path of the file. + :param path_to_bin: The user-supplied binary location (deprecated) + :param env_vars: A list of environment variable names to check. + :param file_names: A list of alternative file names to check. + :param searchpath: List of directories to search. + :param url: URL presented to user for download help. + :param verbose: Whether or not to print path when a file is found. + """ + yield from find_file_iter( + path_to_bin or name, env_vars, searchpath, binary_names, url, verbose + ) + + +def find_binary( + name, + path_to_bin=None, + env_vars=(), + searchpath=(), + binary_names=None, + url=None, + verbose=False, +): + return next( + find_binary_iter( + name, path_to_bin, env_vars, searchpath, binary_names, url, verbose + ) + ) + + +def find_jar_iter( + name_pattern, + path_to_jar=None, + env_vars=(), + searchpath=(), + url=None, + verbose=False, + is_regex=False, +): + """ + Search for a jar that is used by nltk. + + :param name_pattern: The name of the jar file + :param path_to_jar: The user-supplied jar location, or None. + :param env_vars: A list of environment variable names to check + in addition to the CLASSPATH variable which is + checked by default. + :param searchpath: List of directories to search. + :param is_regex: Whether name is a regular expression. + """ + + assert isinstance(name_pattern, str) + assert not isinstance(searchpath, str) + if isinstance(env_vars, str): + env_vars = env_vars.split() + yielded = False + + # Make sure we check the CLASSPATH first + env_vars = ["CLASSPATH"] + list(env_vars) + + # If an explicit location was given, then check it, and yield it if + # it's present; otherwise, complain. + if path_to_jar is not None: + if os.path.isfile(path_to_jar): + yielded = True + yield path_to_jar + else: + raise LookupError( + f"Could not find {name_pattern} jar file at {path_to_jar}" + ) + + # Check environment variables + for env_var in env_vars: + if env_var in os.environ: + if env_var == "CLASSPATH": + classpath = os.environ["CLASSPATH"] + for cp in classpath.split(os.path.pathsep): + cp = os.path.expanduser(cp) + if os.path.isfile(cp): + filename = os.path.basename(cp) + if ( + is_regex + and re.match(name_pattern, filename) + or (not is_regex and filename == name_pattern) + ): + if verbose: + print(f"[Found {name_pattern}: {cp}]") + yielded = True + yield cp + # The case where user put directory containing the jar file in the classpath + if os.path.isdir(cp): + if not is_regex: + if os.path.isfile(os.path.join(cp, name_pattern)): + if verbose: + print(f"[Found {name_pattern}: {cp}]") + yielded = True + yield os.path.join(cp, name_pattern) + else: + # Look for file using regular expression + for file_name in os.listdir(cp): + if re.match(name_pattern, file_name): + if verbose: + print( + "[Found %s: %s]" + % ( + name_pattern, + os.path.join(cp, file_name), + ) + ) + yielded = True + yield os.path.join(cp, file_name) + + else: + jar_env = os.path.expanduser(os.environ[env_var]) + jar_iter = ( + ( + os.path.join(jar_env, path_to_jar) + for path_to_jar in os.listdir(jar_env) + ) + if os.path.isdir(jar_env) + else (jar_env,) + ) + for path_to_jar in jar_iter: + if os.path.isfile(path_to_jar): + filename = os.path.basename(path_to_jar) + if ( + is_regex + and re.match(name_pattern, filename) + or (not is_regex and filename == name_pattern) + ): + if verbose: + print(f"[Found {name_pattern}: {path_to_jar}]") + yielded = True + yield path_to_jar + + # Check the path list. + for directory in searchpath: + if is_regex: + for filename in os.listdir(directory): + path_to_jar = os.path.join(directory, filename) + if os.path.isfile(path_to_jar): + if re.match(name_pattern, filename): + if verbose: + print(f"[Found {filename}: {path_to_jar}]") + yielded = True + yield path_to_jar + else: + path_to_jar = os.path.join(directory, name_pattern) + if os.path.isfile(path_to_jar): + if verbose: + print(f"[Found {name_pattern}: {path_to_jar}]") + yielded = True + yield path_to_jar + + if not yielded: + # If nothing was found, raise an error + msg = "NLTK was unable to find %s!" % name_pattern + if env_vars: + msg += " Set the %s environment variable" % env_vars[0] + msg = textwrap.fill(msg + ".", initial_indent=" ", subsequent_indent=" ") + if searchpath: + msg += "\n\n Searched in:" + msg += "".join("\n - %s" % d for d in searchpath) + if url: + msg += "\n\n For more information, on {}, see:\n <{}>".format( + name_pattern, + url, + ) + div = "=" * 75 + raise LookupError(f"\n\n{div}\n{msg}\n{div}") + + +def find_jar( + name_pattern, + path_to_jar=None, + env_vars=(), + searchpath=(), + url=None, + verbose=False, + is_regex=False, +): + return next( + find_jar_iter( + name_pattern, path_to_jar, env_vars, searchpath, url, verbose, is_regex + ) + ) + + +def find_jars_within_path(path_to_jars): + return [ + os.path.join(root, filename) + for root, dirnames, filenames in os.walk(path_to_jars) + for filename in fnmatch.filter(filenames, "*.jar") + ] + + +def _decode_stdoutdata(stdoutdata): + """Convert data read from stdout/stderr to unicode""" + if not isinstance(stdoutdata, bytes): + return stdoutdata + + encoding = getattr(sys.__stdout__, "encoding", locale.getpreferredencoding()) + if encoding is None: + return stdoutdata.decode() + return stdoutdata.decode(encoding) + + +########################################################################## +# Import Stdlib Module +########################################################################## + + +def import_from_stdlib(module): + """ + When python is run from within the nltk/ directory tree, the + current directory is included at the beginning of the search path. + Unfortunately, that means that modules within nltk can sometimes + shadow standard library modules. As an example, the stdlib + 'inspect' module will attempt to import the stdlib 'tokenize' + module, but will instead end up importing NLTK's 'tokenize' module + instead (causing the import to fail). + """ + old_path = sys.path + sys.path = [d for d in sys.path if d not in ("", ".")] + m = __import__(module) + sys.path = old_path + return m + + +########################################################################## +# Wrapper for ElementTree Elements +########################################################################## + + +class ElementWrapper: + """ + A wrapper around ElementTree Element objects whose main purpose is + to provide nicer __repr__ and __str__ methods. In addition, any + of the wrapped Element's methods that return other Element objects + are overridden to wrap those values before returning them. + + This makes Elements more convenient to work with in + interactive sessions and doctests, at the expense of some + efficiency. + """ + + # Prevent double-wrapping: + def __new__(cls, etree): + """ + Create and return a wrapper around a given Element object. + If ``etree`` is an ``ElementWrapper``, then ``etree`` is + returned as-is. + """ + if isinstance(etree, ElementWrapper): + return etree + else: + return object.__new__(ElementWrapper) + + def __init__(self, etree): + r""" + Initialize a new Element wrapper for ``etree``. + + If ``etree`` is a string, then it will be converted to an + Element object using ``ElementTree.fromstring()`` first: + + >>> ElementWrapper("") + \n"> + + """ + if isinstance(etree, str): + etree = ElementTree.fromstring(etree) + self.__dict__["_etree"] = etree + + def unwrap(self): + """ + Return the Element object wrapped by this wrapper. + """ + return self._etree + + ##//////////////////////////////////////////////////////////// + # { String Representation + ##//////////////////////////////////////////////////////////// + + def __repr__(self): + s = ElementTree.tostring(self._etree, encoding="utf8").decode("utf8") + if len(s) > 60: + e = s.rfind("<") + if (len(s) - e) > 30: + e = -20 + s = f"{s[:30]}...{s[e:]}" + return "" % s + + def __str__(self): + """ + :return: the result of applying ``ElementTree.tostring()`` to + the wrapped Element object. + """ + return ( + ElementTree.tostring(self._etree, encoding="utf8").decode("utf8").rstrip() + ) + + ##//////////////////////////////////////////////////////////// + # { Element interface Delegation (pass-through) + ##//////////////////////////////////////////////////////////// + + def __getattr__(self, attrib): + return getattr(self._etree, attrib) + + def __setattr__(self, attr, value): + return setattr(self._etree, attr, value) + + def __delattr__(self, attr): + return delattr(self._etree, attr) + + def __setitem__(self, index, element): + self._etree[index] = element + + def __delitem__(self, index): + del self._etree[index] + + def __setslice__(self, start, stop, elements): + self._etree[start:stop] = elements + + def __delslice__(self, start, stop): + del self._etree[start:stop] + + def __len__(self): + return len(self._etree) + + ##//////////////////////////////////////////////////////////// + # { Element interface Delegation (wrap result) + ##//////////////////////////////////////////////////////////// + + def __getitem__(self, index): + return ElementWrapper(self._etree[index]) + + def __getslice__(self, start, stop): + return [ElementWrapper(elt) for elt in self._etree[start:stop]] + + def getchildren(self): + return [ElementWrapper(elt) for elt in self._etree] + + def getiterator(self, tag=None): + return (ElementWrapper(elt) for elt in self._etree.getiterator(tag)) + + def makeelement(self, tag, attrib): + return ElementWrapper(self._etree.makeelement(tag, attrib)) + + def find(self, path): + elt = self._etree.find(path) + if elt is None: + return elt + else: + return ElementWrapper(elt) + + def findall(self, path): + return [ElementWrapper(elt) for elt in self._etree.findall(path)] + + +###################################################################### +# Helper for Handling Slicing +###################################################################### + + +def slice_bounds(sequence, slice_obj, allow_step=False): + """ + Given a slice, return the corresponding (start, stop) bounds, + taking into account None indices and negative indices. The + following guarantees are made for the returned start and stop values: + + - 0 <= start <= len(sequence) + - 0 <= stop <= len(sequence) + - start <= stop + + :raise ValueError: If ``slice_obj.step`` is not None. + :param allow_step: If true, then the slice object may have a + non-None step. If it does, then return a tuple + (start, stop, step). + """ + start, stop = (slice_obj.start, slice_obj.stop) + + # If allow_step is true, then include the step in our return + # value tuple. + if allow_step: + step = slice_obj.step + if step is None: + step = 1 + # Use a recursive call without allow_step to find the slice + # bounds. If step is negative, then the roles of start and + # stop (in terms of default values, etc), are swapped. + if step < 0: + start, stop = slice_bounds(sequence, slice(stop, start)) + else: + start, stop = slice_bounds(sequence, slice(start, stop)) + return start, stop, step + + # Otherwise, make sure that no non-default step value is used. + elif slice_obj.step not in (None, 1): + raise ValueError( + "slices with steps are not supported by %s" % sequence.__class__.__name__ + ) + + # Supply default offsets. + if start is None: + start = 0 + if stop is None: + stop = len(sequence) + + # Handle negative indices. + if start < 0: + start = max(0, len(sequence) + start) + if stop < 0: + stop = max(0, len(sequence) + stop) + + # Make sure stop doesn't go past the end of the list. Note that + # we avoid calculating len(sequence) if possible, because for lazy + # sequences, calculating the length of a sequence can be expensive. + if stop > 0: + try: + sequence[stop - 1] + except IndexError: + stop = len(sequence) + + # Make sure start isn't past stop. + start = min(start, stop) + + # That's all folks! + return start, stop + + +###################################################################### +# Permission Checking +###################################################################### + + +def is_writable(path): + # Ensure that it exists. + if not os.path.exists(path): + return False + + # If we're on a posix system, check its permissions. + if hasattr(os, "getuid"): + statdata = os.stat(path) + perm = stat.S_IMODE(statdata.st_mode) + # is it world-writable? + if perm & 0o002: + return True + # do we own it? + elif statdata.st_uid == os.getuid() and (perm & 0o200): + return True + # are we in a group that can write to it? + elif (statdata.st_gid in [os.getgid()] + os.getgroups()) and (perm & 0o020): + return True + # otherwise, we can't write to it. + else: + return False + + # Otherwise, we'll assume it's writable. + # [xx] should we do other checks on other platforms? + return True + + +###################################################################### +# NLTK Error reporting +###################################################################### + + +def raise_unorderable_types(ordering, a, b): + raise TypeError( + "unorderable types: %s() %s %s()" + % (type(a).__name__, ordering, type(b).__name__) + ) diff --git a/lib/python3.10/site-packages/nltk/jsontags.py b/lib/python3.10/site-packages/nltk/jsontags.py new file mode 100644 index 0000000000000000000000000000000000000000..58d73bf138d07236bc38979bd69266c5972b62b2 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/jsontags.py @@ -0,0 +1,65 @@ +# Natural Language Toolkit: JSON Encoder/Decoder Helpers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Xu +# +# URL: +# For license information, see LICENSE.TXT + +""" +Register JSON tags, so the nltk data loader knows what module and class to look for. + +NLTK uses simple '!' tags to mark the types of objects, but the fully-qualified +"tag:nltk.org,2011:" prefix is also accepted in case anyone ends up +using it. +""" + +import json + +json_tags = {} + +TAG_PREFIX = "!" + + +def register_tag(cls): + """ + Decorates a class to register it's json tag. + """ + json_tags[TAG_PREFIX + getattr(cls, "json_tag")] = cls + return cls + + +class JSONTaggedEncoder(json.JSONEncoder): + def default(self, obj): + obj_tag = getattr(obj, "json_tag", None) + if obj_tag is None: + return super().default(obj) + obj_tag = TAG_PREFIX + obj_tag + obj = obj.encode_json_obj() + return {obj_tag: obj} + + +class JSONTaggedDecoder(json.JSONDecoder): + def decode(self, s): + return self.decode_obj(super().decode(s)) + + @classmethod + def decode_obj(cls, obj): + # Decode nested objects first. + if isinstance(obj, dict): + obj = {key: cls.decode_obj(val) for (key, val) in obj.items()} + elif isinstance(obj, list): + obj = list(cls.decode_obj(val) for val in obj) + # Check if we have a tagged object. + if not isinstance(obj, dict) or len(obj) != 1: + return obj + obj_tag = next(iter(obj.keys())) + if not obj_tag.startswith("!"): + return obj + if obj_tag not in json_tags: + raise ValueError("Unknown tag", obj_tag) + obj_cls = json_tags[obj_tag] + return obj_cls.decode_json_obj(obj[obj_tag]) + + +__all__ = ["register_tag", "json_tags", "JSONTaggedEncoder", "JSONTaggedDecoder"] diff --git a/lib/python3.10/site-packages/nltk/langnames.py b/lib/python3.10/site-packages/nltk/langnames.py new file mode 100644 index 0000000000000000000000000000000000000000..b7fa6b40a4b381b4b2c4f3ff42ee2450f3849465 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/langnames.py @@ -0,0 +1,730 @@ +# Natural Language Toolkit: Language Codes +# +# Copyright (C) 2022-2023 NLTK Project +# Author: Eric Kafe +# URL: +# For license information, see LICENSE.TXT +# +# iso639-3 language codes (C) https://iso639-3.sil.org/ + +""" +Translate between language names and language codes. + +The iso639-3 language codes were downloaded from the registration authority at +https://iso639-3.sil.org/ + +The iso639-3 codeset is evolving, so retired language codes are kept in the +"iso639retired" dictionary, which is used as fallback by the wrapper functions +"langname" and "langcode", in order to support the lookup of retired codes. + +The "langcode" function returns the current iso639-3 code if there is one, +and falls back to the retired code otherwise. As specified by BCP-47, +it returns the shortest (2-letter) code by default, but 3-letter codes +are also available: + + >>> import nltk.langnames as lgn + >>> lgn.langname('fri') #'fri' is a retired code + 'Western Frisian' + + The current code is different from the retired one: + >>> lgn.langcode('Western Frisian') + 'fy' + + >>> lgn.langcode('Western Frisian', typ = 3) + 'fry' + +""" + +import re +from warnings import warn + +from nltk.corpus import bcp47 + +codepattern = re.compile("[a-z][a-z][a-z]?") + + +def langname(tag, typ="full"): + """ + Convert a composite BCP-47 tag to a language name + + >>> from nltk.langnames import langname + >>> langname('ca-Latn-ES-valencia') + 'Catalan: Latin: Spain: Valencian' + + >>> langname('ca-Latn-ES-valencia', typ="short") + 'Catalan' + """ + tags = tag.split("-") + code = tags[0].lower() + if codepattern.fullmatch(code): + if code in iso639retired: # retired codes + return iso639retired[code] + elif code in iso639short: # 3-letter codes + code2 = iso639short[code] # convert to 2-letter code + warn(f"Shortening {code!r} to {code2!r}", stacklevel=2) + tag = "-".join([code2] + tags[1:]) + name = bcp47.name(tag) # parse according to BCP-47 + if typ == "full": + return name # include all subtags + elif name: + return name.split(":")[0] # only the language subtag + else: + warn(f"Could not find code in {code!r}", stacklevel=2) + + +def langcode(name, typ=2): + """ + Convert language name to iso639-3 language code. Returns the short 2-letter + code by default, if one is available, and the 3-letter code otherwise: + + >>> from nltk.langnames import langcode + >>> langcode('Modern Greek (1453-)') + 'el' + + Specify 'typ=3' to get the 3-letter code: + + >>> langcode('Modern Greek (1453-)', typ=3) + 'ell' + """ + if name in bcp47.langcode: + code = bcp47.langcode[name] + if typ == 3 and code in iso639long: + code = iso639long[code] # convert to 3-letter code + return code + elif name in iso639code_retired: + return iso639code_retired[name] + else: + warn(f"Could not find language in {name!r}", stacklevel=2) + + +# ======================================================================= +# Translate betwwen Wikidata Q-codes and BCP-47 codes or names +# ....................................................................... + + +def tag2q(tag): + """ + Convert BCP-47 tag to Wikidata Q-code + + >>> tag2q('nds-u-sd-demv') + 'Q4289225' + """ + return bcp47.wiki_q[tag] + + +def q2tag(qcode): + """ + Convert Wikidata Q-code to BCP-47 tag + + >>> q2tag('Q4289225') + 'nds-u-sd-demv' + """ + return wiki_bcp47[qcode] + + +def q2name(qcode, typ="full"): + """ + Convert Wikidata Q-code to BCP-47 (full or short) language name + + >>> q2name('Q4289225') + 'Low German: Mecklenburg-Vorpommern' + + >>> q2name('Q4289225', "short") + 'Low German' + """ + return langname(q2tag(qcode), typ) + + +def lang2q(name): + """ + Convert simple language name to Wikidata Q-code + + >>> lang2q('Low German') + 'Q25433' + """ + return tag2q(langcode(name)) + + +# ====================================================================== +# Data dictionaries +# ...................................................................... + + +def inverse_dict(dic): + """Return inverse mapping, but only if it is bijective""" + if len(dic.keys()) == len(set(dic.values())): + return {val: key for (key, val) in dic.items()} + else: + warn("This dictionary has no bijective inverse mapping.") + + +bcp47.load_wiki_q() # Wikidata conversion table needs to be loaded explicitly +wiki_bcp47 = inverse_dict(bcp47.wiki_q) + +iso639short = { + "aar": "aa", + "abk": "ab", + "afr": "af", + "aka": "ak", + "amh": "am", + "ara": "ar", + "arg": "an", + "asm": "as", + "ava": "av", + "ave": "ae", + "aym": "ay", + "aze": "az", + "bak": "ba", + "bam": "bm", + "bel": "be", + "ben": "bn", + "bis": "bi", + "bod": "bo", + "bos": "bs", + "bre": "br", + "bul": "bg", + "cat": "ca", + "ces": "cs", + "cha": "ch", + "che": "ce", + "chu": "cu", + "chv": "cv", + "cor": "kw", + "cos": "co", + "cre": "cr", + "cym": "cy", + "dan": "da", + "deu": "de", + "div": "dv", + "dzo": "dz", + "ell": "el", + "eng": "en", + "epo": "eo", + "est": "et", + "eus": "eu", + "ewe": "ee", + "fao": "fo", + "fas": "fa", + "fij": "fj", + "fin": "fi", + "fra": "fr", + "fry": "fy", + "ful": "ff", + "gla": "gd", + "gle": "ga", + "glg": "gl", + "glv": "gv", + "grn": "gn", + "guj": "gu", + "hat": "ht", + "hau": "ha", + "hbs": "sh", + "heb": "he", + "her": "hz", + "hin": "hi", + "hmo": "ho", + "hrv": "hr", + "hun": "hu", + "hye": "hy", + "ibo": "ig", + "ido": "io", + "iii": "ii", + "iku": "iu", + "ile": "ie", + "ina": "ia", + "ind": "id", + "ipk": "ik", + "isl": "is", + "ita": "it", + "jav": "jv", + "jpn": "ja", + "kal": "kl", + "kan": "kn", + "kas": "ks", + "kat": "ka", + "kau": "kr", + "kaz": "kk", + "khm": "km", + "kik": "ki", + "kin": "rw", + "kir": "ky", + "kom": "kv", + "kon": "kg", + "kor": "ko", + "kua": "kj", + "kur": "ku", + "lao": "lo", + "lat": "la", + "lav": "lv", + "lim": "li", + "lin": "ln", + "lit": "lt", + "ltz": "lb", + "lub": "lu", + "lug": "lg", + "mah": "mh", + "mal": "ml", + "mar": "mr", + "mkd": "mk", + "mlg": "mg", + "mlt": "mt", + "mon": "mn", + "mri": "mi", + "msa": "ms", + "mya": "my", + "nau": "na", + "nav": "nv", + "nbl": "nr", + "nde": "nd", + "ndo": "ng", + "nep": "ne", + "nld": "nl", + "nno": "nn", + "nob": "nb", + "nor": "no", + "nya": "ny", + "oci": "oc", + "oji": "oj", + "ori": "or", + "orm": "om", + "oss": "os", + "pan": "pa", + "pli": "pi", + "pol": "pl", + "por": "pt", + "pus": "ps", + "que": "qu", + "roh": "rm", + "ron": "ro", + "run": "rn", + "rus": "ru", + "sag": "sg", + "san": "sa", + "sin": "si", + "slk": "sk", + "slv": "sl", + "sme": "se", + "smo": "sm", + "sna": "sn", + "snd": "sd", + "som": "so", + "sot": "st", + "spa": "es", + "sqi": "sq", + "srd": "sc", + "srp": "sr", + "ssw": "ss", + "sun": "su", + "swa": "sw", + "swe": "sv", + "tah": "ty", + "tam": "ta", + "tat": "tt", + "tel": "te", + "tgk": "tg", + "tgl": "tl", + "tha": "th", + "tir": "ti", + "ton": "to", + "tsn": "tn", + "tso": "ts", + "tuk": "tk", + "tur": "tr", + "twi": "tw", + "uig": "ug", + "ukr": "uk", + "urd": "ur", + "uzb": "uz", + "ven": "ve", + "vie": "vi", + "vol": "vo", + "wln": "wa", + "wol": "wo", + "xho": "xh", + "yid": "yi", + "yor": "yo", + "zha": "za", + "zho": "zh", + "zul": "zu", +} + + +iso639retired = { + "fri": "Western Frisian", + "auv": "Auvergnat", + "gsc": "Gascon", + "lms": "Limousin", + "lnc": "Languedocien", + "prv": "Provençal", + "amd": "Amapá Creole", + "bgh": "Bogan", + "bnh": "Banawá", + "bvs": "Belgian Sign Language", + "ccy": "Southern Zhuang", + "cit": "Chittagonian", + "flm": "Falam Chin", + "jap": "Jaruára", + "kob": "Kohoroxitari", + "mob": "Moinba", + "mzf": "Aiku", + "nhj": "Tlalitzlipa Nahuatl", + "nhs": "Southeastern Puebla Nahuatl", + "occ": "Occidental", + "tmx": "Tomyang", + "tot": "Patla-Chicontla Totonac", + "xmi": "Miarrã", + "yib": "Yinglish", + "ztc": "Lachirioag Zapotec", + "atf": "Atuence", + "bqe": "Navarro-Labourdin Basque", + "bsz": "Souletin Basque", + "aex": "Amerax", + "ahe": "Ahe", + "aiz": "Aari", + "akn": "Amikoana", + "arf": "Arafundi", + "azr": "Adzera", + "bcx": "Pamona", + "bii": "Bisu", + "bke": "Bengkulu", + "blu": "Hmong Njua", + "boc": "Bakung Kenyah", + "bsd": "Sarawak Bisaya", + "bwv": "Bahau River Kenyah", + "bxt": "Buxinhua", + "byu": "Buyang", + "ccx": "Northern Zhuang", + "cru": "Carútana", + "dat": "Darang Deng", + "dyk": "Land Dayak", + "eni": "Enim", + "fiz": "Izere", + "gen": "Geman Deng", + "ggh": "Garreh-Ajuran", + "itu": "Itutang", + "kds": "Lahu Shi", + "knh": "Kayan River Kenyah", + "krg": "North Korowai", + "krq": "Krui", + "kxg": "Katingan", + "lmt": "Lematang", + "lnt": "Lintang", + "lod": "Berawan", + "mbg": "Northern Nambikuára", + "mdo": "Southwest Gbaya", + "mhv": "Arakanese", + "miv": "Mimi", + "mqd": "Madang", + "nky": "Khiamniungan Naga", + "nxj": "Nyadu", + "ogn": "Ogan", + "ork": "Orokaiva", + "paj": "Ipeka-Tapuia", + "pec": "Southern Pesisir", + "pen": "Penesak", + "plm": "Palembang", + "poj": "Lower Pokomo", + "pun": "Pubian", + "rae": "Ranau", + "rjb": "Rajbanshi", + "rws": "Rawas", + "sdd": "Semendo", + "sdi": "Sindang Kelingi", + "skl": "Selako", + "slb": "Kahumamahon Saluan", + "srj": "Serawai", + "suf": "Tarpia", + "suh": "Suba", + "suu": "Sungkai", + "szk": "Sizaki", + "tle": "Southern Marakwet", + "tnj": "Tanjong", + "ttx": "Tutong 1", + "ubm": "Upper Baram Kenyah", + "vky": "Kayu Agung", + "vmo": "Muko-Muko", + "wre": "Ware", + "xah": "Kahayan", + "xkm": "Mahakam Kenyah", + "xuf": "Kunfal", + "yio": "Dayao Yi", + "ymj": "Muji Yi", + "ypl": "Pula Yi", + "ypw": "Puwa Yi", + "ywm": "Wumeng Yi", + "yym": "Yuanjiang-Mojiang Yi", + "mly": "Malay (individual language)", + "muw": "Mundari", + "xst": "Silt'e", + "ope": "Old Persian", + "scc": "Serbian", + "scr": "Croatian", + "xsk": "Sakan", + "mol": "Moldavian", + "aay": "Aariya", + "acc": "Cubulco Achí", + "cbm": "Yepocapa Southwestern Cakchiquel", + "chs": "Chumash", + "ckc": "Northern Cakchiquel", + "ckd": "South Central Cakchiquel", + "cke": "Eastern Cakchiquel", + "ckf": "Southern Cakchiquel", + "cki": "Santa María De Jesús Cakchiquel", + "ckj": "Santo Domingo Xenacoj Cakchiquel", + "ckk": "Acatenango Southwestern Cakchiquel", + "ckw": "Western Cakchiquel", + "cnm": "Ixtatán Chuj", + "cti": "Tila Chol", + "cun": "Cunén Quiché", + "eml": "Emiliano-Romagnolo", + "eur": "Europanto", + "gmo": "Gamo-Gofa-Dawro", + "hsf": "Southeastern Huastec", + "hva": "San Luís Potosí Huastec", + "ixi": "Nebaj Ixil", + "ixj": "Chajul Ixil", + "jai": "Western Jacalteco", + "mms": "Southern Mam", + "mpf": "Tajumulco Mam", + "mtz": "Tacanec", + "mvc": "Central Mam", + "mvj": "Todos Santos Cuchumatán Mam", + "poa": "Eastern Pokomam", + "pob": "Western Pokomchí", + "pou": "Southern Pokomam", + "ppv": "Papavô", + "quj": "Joyabaj Quiché", + "qut": "West Central Quiché", + "quu": "Eastern Quiché", + "qxi": "San Andrés Quiché", + "sic": "Malinguat", + "stc": "Santa Cruz", + "tlz": "Toala'", + "tzb": "Bachajón Tzeltal", + "tzc": "Chamula Tzotzil", + "tze": "Chenalhó Tzotzil", + "tzs": "San Andrés Larrainzar Tzotzil", + "tzt": "Western Tzutujil", + "tzu": "Huixtán Tzotzil", + "tzz": "Zinacantán Tzotzil", + "vlr": "Vatrata", + "yus": "Chan Santa Cruz Maya", + "nfg": "Nyeng", + "nfk": "Shakara", + "agp": "Paranan", + "bhk": "Albay Bicolano", + "bkb": "Finallig", + "btb": "Beti (Cameroon)", + "cjr": "Chorotega", + "cmk": "Chimakum", + "drh": "Darkhat", + "drw": "Darwazi", + "gav": "Gabutamon", + "mof": "Mohegan-Montauk-Narragansett", + "mst": "Cataelano Mandaya", + "myt": "Sangab Mandaya", + "rmr": "Caló", + "sgl": "Sanglechi-Ishkashimi", + "sul": "Surigaonon", + "sum": "Sumo-Mayangna", + "tnf": "Tangshewi", + "wgw": "Wagawaga", + "ayx": "Ayi (China)", + "bjq": "Southern Betsimisaraka Malagasy", + "dha": "Dhanwar (India)", + "dkl": "Kolum So Dogon", + "mja": "Mahei", + "nbf": "Naxi", + "noo": "Nootka", + "tie": "Tingal", + "tkk": "Takpa", + "baz": "Tunen", + "bjd": "Bandjigali", + "ccq": "Chaungtha", + "cka": "Khumi Awa Chin", + "dap": "Nisi (India)", + "dwl": "Walo Kumbe Dogon", + "elp": "Elpaputih", + "gbc": "Garawa", + "gio": "Gelao", + "hrr": "Horuru", + "ibi": "Ibilo", + "jar": "Jarawa (Nigeria)", + "kdv": "Kado", + "kgh": "Upper Tanudan Kalinga", + "kpp": "Paku Karen", + "kzh": "Kenuzi-Dongola", + "lcq": "Luhu", + "mgx": "Omati", + "nln": "Durango Nahuatl", + "pbz": "Palu", + "pgy": "Pongyong", + "sca": "Sansu", + "tlw": "South Wemale", + "unp": "Worora", + "wiw": "Wirangu", + "ybd": "Yangbye", + "yen": "Yendang", + "yma": "Yamphe", + "daf": "Dan", + "djl": "Djiwarli", + "ggr": "Aghu Tharnggalu", + "ilw": "Talur", + "izi": "Izi-Ezaa-Ikwo-Mgbo", + "meg": "Mea", + "mld": "Malakhel", + "mnt": "Maykulan", + "mwd": "Mudbura", + "myq": "Forest Maninka", + "nbx": "Ngura", + "nlr": "Ngarla", + "pcr": "Panang", + "ppr": "Piru", + "tgg": "Tangga", + "wit": "Wintu", + "xia": "Xiandao", + "yiy": "Yir Yoront", + "yos": "Yos", + "emo": "Emok", + "ggm": "Gugu Mini", + "leg": "Lengua", + "lmm": "Lamam", + "mhh": "Maskoy Pidgin", + "puz": "Purum Naga", + "sap": "Sanapaná", + "yuu": "Yugh", + "aam": "Aramanik", + "adp": "Adap", + "aue": "ǂKxʼauǁʼein", + "bmy": "Bemba (Democratic Republic of Congo)", + "bxx": "Borna (Democratic Republic of Congo)", + "byy": "Buya", + "dzd": "Daza", + "gfx": "Mangetti Dune ǃXung", + "gti": "Gbati-ri", + "ime": "Imeraguen", + "kbf": "Kakauhua", + "koj": "Sara Dunjo", + "kwq": "Kwak", + "kxe": "Kakihum", + "lii": "Lingkhim", + "mwj": "Maligo", + "nnx": "Ngong", + "oun": "ǃOǃung", + "pmu": "Mirpur Panjabi", + "sgo": "Songa", + "thx": "The", + "tsf": "Southwestern Tamang", + "uok": "Uokha", + "xsj": "Subi", + "yds": "Yiddish Sign Language", + "ymt": "Mator-Taygi-Karagas", + "ynh": "Yangho", + "bgm": "Baga Mboteni", + "btl": "Bhatola", + "cbe": "Chipiajes", + "cbh": "Cagua", + "coy": "Coyaima", + "cqu": "Chilean Quechua", + "cum": "Cumeral", + "duj": "Dhuwal", + "ggn": "Eastern Gurung", + "ggo": "Southern Gondi", + "guv": "Gey", + "iap": "Iapama", + "ill": "Iranun", + "kgc": "Kasseng", + "kox": "Coxima", + "ktr": "Kota Marudu Tinagas", + "kvs": "Kunggara", + "kzj": "Coastal Kadazan", + "kzt": "Tambunan Dusun", + "nad": "Nijadali", + "nts": "Natagaimas", + "ome": "Omejes", + "pmc": "Palumata", + "pod": "Ponares", + "ppa": "Pao", + "pry": "Pray 3", + "rna": "Runa", + "svr": "Savara", + "tdu": "Tempasuk Dusun", + "thc": "Tai Hang Tong", + "tid": "Tidong", + "tmp": "Tai Mène", + "tne": "Tinoc Kallahan", + "toe": "Tomedes", + "xba": "Kamba (Brazil)", + "xbx": "Kabixí", + "xip": "Xipináwa", + "xkh": "Karahawyana", + "yri": "Yarí", + "jeg": "Jeng", + "kgd": "Kataang", + "krm": "Krim", + "prb": "Lua'", + "puk": "Pu Ko", + "rie": "Rien", + "rsi": "Rennellese Sign Language", + "skk": "Sok", + "snh": "Shinabo", + "lsg": "Lyons Sign Language", + "mwx": "Mediak", + "mwy": "Mosiro", + "ncp": "Ndaktup", + "ais": "Nataoran Amis", + "asd": "Asas", + "dit": "Dirari", + "dud": "Hun-Saare", + "lba": "Lui", + "llo": "Khlor", + "myd": "Maramba", + "myi": "Mina (India)", + "nns": "Ningye", + "aoh": "Arma", + "ayy": "Tayabas Ayta", + "bbz": "Babalia Creole Arabic", + "bpb": "Barbacoas", + "cca": "Cauca", + "cdg": "Chamari", + "dgu": "Degaru", + "drr": "Dororo", + "ekc": "Eastern Karnic", + "gli": "Guliguli", + "kjf": "Khalaj", + "kxl": "Nepali Kurux", + "kxu": "Kui (India)", + "lmz": "Lumbee", + "nxu": "Narau", + "plp": "Palpa", + "sdm": "Semandang", + "tbb": "Tapeba", + "xrq": "Karranga", + "xtz": "Tasmanian", + "zir": "Ziriya", + "thw": "Thudam", + "bic": "Bikaru", + "bij": "Vaghat-Ya-Bijim-Legeri", + "blg": "Balau", + "gji": "Geji", + "mvm": "Muya", + "ngo": "Ngoni", + "pat": "Papitalai", + "vki": "Ija-Zuba", + "wra": "Warapu", + "ajt": "Judeo-Tunisian Arabic", + "cug": "Chungmboko", + "lak": "Laka (Nigeria)", + "lno": "Lango (South Sudan)", + "pii": "Pini", + "smd": "Sama", + "snb": "Sebuyau", + "uun": "Kulon-Pazeh", + "wrd": "Warduji", + "wya": "Wyandot", +} + + +iso639long = inverse_dict(iso639short) + +iso639code_retired = inverse_dict(iso639retired) diff --git a/lib/python3.10/site-packages/nltk/lazyimport.py b/lib/python3.10/site-packages/nltk/lazyimport.py new file mode 100644 index 0000000000000000000000000000000000000000..ee0c8e4451fff3a19c3608d0d08e3422a77fd8f0 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/lazyimport.py @@ -0,0 +1,142 @@ +# This module is from mx/DateTime/LazyModule.py and is +# distributed under the terms of the eGenix.com Public License Agreement +# https://www.egenix.com/products/eGenix.com-Public-License-1.1.0.pdf + +""" Helper to enable simple lazy module import. + + 'Lazy' means the actual import is deferred until an attribute is + requested from the module's namespace. This has the advantage of + allowing all imports to be done at the top of a script (in a + prominent and visible place) without having a great impact + on startup time. + + Copyright (c) 1999-2005, Marc-Andre Lemburg; mailto:mal@lemburg.com + See the documentation for further information on copyrights, + or contact the author. All Rights Reserved. +""" + +### Constants + +_debug = 0 + +### + + +class LazyModule: + + """Lazy module class. + + Lazy modules are imported into the given namespaces whenever a + non-special attribute (there are some attributes like __doc__ + that class instances handle without calling __getattr__) is + requested. The module is then registered under the given name + in locals usually replacing the import wrapper instance. The + import itself is done using globals as global namespace. + + Example of creating a lazy load module: + + ISO = LazyModule('ISO',locals(),globals()) + + Later, requesting an attribute from ISO will load the module + automatically into the locals() namespace, overriding the + LazyModule instance: + + t = ISO.Week(1998,1,1) + + """ + + # Flag which indicates whether the LazyModule is initialized or not + __lazymodule_init = 0 + + # Name of the module to load + __lazymodule_name = "" + + # Flag which indicates whether the module was loaded or not + __lazymodule_loaded = 0 + + # Locals dictionary where to register the module + __lazymodule_locals = None + + # Globals dictionary to use for the module import + __lazymodule_globals = None + + def __init__(self, name, locals, globals=None): + + """Create a LazyModule instance wrapping module name. + + The module will later on be registered in locals under the + given module name. + + globals is optional and defaults to locals. + + """ + self.__lazymodule_locals = locals + if globals is None: + globals = locals + self.__lazymodule_globals = globals + mainname = globals.get("__name__", "") + if mainname: + self.__name__ = mainname + "." + name + self.__lazymodule_name = name + else: + self.__name__ = self.__lazymodule_name = name + self.__lazymodule_init = 1 + + def __lazymodule_import(self): + + """Import the module now.""" + # Load and register module + local_name = self.__lazymodule_name # e.g. "toolbox" + full_name = self.__name__ # e.g. "nltk.toolbox" + if self.__lazymodule_loaded: + return self.__lazymodule_locals[local_name] + if _debug: + print("LazyModule: Loading module %r" % full_name) + self.__lazymodule_locals[local_name] = module = __import__( + full_name, self.__lazymodule_locals, self.__lazymodule_globals, "*" + ) + + # Fill namespace with all symbols from original module to + # provide faster access. + self.__dict__.update(module.__dict__) + + # Set import flag + self.__dict__["__lazymodule_loaded"] = 1 + + if _debug: + print("LazyModule: Module %r loaded" % full_name) + return module + + def __getattr__(self, name): + + """Import the module on demand and get the attribute.""" + if self.__lazymodule_loaded: + raise AttributeError(name) + if _debug: + print( + "LazyModule: " + "Module load triggered by attribute %r read access" % name + ) + module = self.__lazymodule_import() + return getattr(module, name) + + def __setattr__(self, name, value): + + """Import the module on demand and set the attribute.""" + if not self.__lazymodule_init: + self.__dict__[name] = value + return + if self.__lazymodule_loaded: + self.__lazymodule_locals[self.__lazymodule_name] = value + self.__dict__[name] = value + return + if _debug: + print( + "LazyModule: " + "Module load triggered by attribute %r write access" % name + ) + module = self.__lazymodule_import() + setattr(module, name, value) + + def __repr__(self): + return "" % self.__name__ diff --git a/lib/python3.10/site-packages/nltk/probability.py b/lib/python3.10/site-packages/nltk/probability.py new file mode 100644 index 0000000000000000000000000000000000000000..a6de70732ac33e375c42d5e675aac124ffeafdf6 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/probability.py @@ -0,0 +1,2578 @@ +# Natural Language Toolkit: Probability and Statistics +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (additions) +# Trevor Cohn (additions) +# Peter Ljunglöf (additions) +# Liang Dong (additions) +# Geoffrey Sampson (additions) +# Ilia Kurenkov (additions) +# +# URL: +# For license information, see LICENSE.TXT + +""" +Classes for representing and processing probabilistic information. + +The ``FreqDist`` class is used to encode "frequency distributions", +which count the number of times that each outcome of an experiment +occurs. + +The ``ProbDistI`` class defines a standard interface for "probability +distributions", which encode the probability of each outcome for an +experiment. There are two types of probability distribution: + + - "derived probability distributions" are created from frequency + distributions. They attempt to model the probability distribution + that generated the frequency distribution. + - "analytic probability distributions" are created directly from + parameters (such as variance). + +The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface +are used to encode conditional distributions. Conditional probability +distributions can be derived or analytic; but currently the only +implementation of the ``ConditionalProbDistI`` interface is +``ConditionalProbDist``, a derived distribution. + +""" + +import array +import math +import random +import warnings +from abc import ABCMeta, abstractmethod +from collections import Counter, defaultdict +from functools import reduce + +from nltk.internals import raise_unorderable_types + +_NINF = float("-1e300") + +##////////////////////////////////////////////////////// +## Frequency Distributions +##////////////////////////////////////////////////////// + + +class FreqDist(Counter): + """ + A frequency distribution for the outcomes of an experiment. A + frequency distribution records the number of times each outcome of + an experiment has occurred. For example, a frequency distribution + could be used to record the frequency of each word type in a + document. Formally, a frequency distribution can be defined as a + function mapping from each sample to the number of times that + sample occurred as an outcome. + + Frequency distributions are generally constructed by running a + number of experiments, and incrementing the count for a sample + every time it is an outcome of an experiment. For example, the + following code will produce a frequency distribution that encodes + how often each word occurs in a text: + + >>> from nltk.tokenize import word_tokenize + >>> from nltk.probability import FreqDist + >>> sent = 'This is an example sentence' + >>> fdist = FreqDist() + >>> for word in word_tokenize(sent): + ... fdist[word.lower()] += 1 + + An equivalent way to do this is with the initializer: + + >>> fdist = FreqDist(word.lower() for word in word_tokenize(sent)) + + """ + + def __init__(self, samples=None): + """ + Construct a new frequency distribution. If ``samples`` is + given, then the frequency distribution will be initialized + with the count of each object in ``samples``; otherwise, it + will be initialized to be empty. + + In particular, ``FreqDist()`` returns an empty frequency + distribution; and ``FreqDist(samples)`` first creates an empty + frequency distribution, and then calls ``update`` with the + list ``samples``. + + :param samples: The samples to initialize the frequency + distribution with. + :type samples: Sequence + """ + Counter.__init__(self, samples) + + # Cached number of samples in this FreqDist + self._N = None + + def N(self): + """ + Return the total number of sample outcomes that have been + recorded by this FreqDist. For the number of unique + sample values (or bins) with counts greater than zero, use + ``FreqDist.B()``. + + :rtype: int + """ + if self._N is None: + # Not already cached, or cache has been invalidated + self._N = sum(self.values()) + return self._N + + def __setitem__(self, key, val): + """ + Override ``Counter.__setitem__()`` to invalidate the cached N + """ + self._N = None + super().__setitem__(key, val) + + def __delitem__(self, key): + """ + Override ``Counter.__delitem__()`` to invalidate the cached N + """ + self._N = None + super().__delitem__(key) + + def update(self, *args, **kwargs): + """ + Override ``Counter.update()`` to invalidate the cached N + """ + self._N = None + super().update(*args, **kwargs) + + def setdefault(self, key, val): + """ + Override ``Counter.setdefault()`` to invalidate the cached N + """ + self._N = None + super().setdefault(key, val) + + def B(self): + """ + Return the total number of sample values (or "bins") that + have counts greater than zero. For the total + number of sample outcomes recorded, use ``FreqDist.N()``. + (FreqDist.B() is the same as len(FreqDist).) + + :rtype: int + """ + return len(self) + + def hapaxes(self): + """ + Return a list of all samples that occur once (hapax legomena) + + :rtype: list + """ + return [item for item in self if self[item] == 1] + + def Nr(self, r, bins=None): + return self.r_Nr(bins)[r] + + def r_Nr(self, bins=None): + """ + Return the dictionary mapping r to Nr, the number of samples with frequency r, where Nr > 0. + + :type bins: int + :param bins: The number of possible sample outcomes. ``bins`` + is used to calculate Nr(0). In particular, Nr(0) is + ``bins-self.B()``. If ``bins`` is not specified, it + defaults to ``self.B()`` (so Nr(0) will be 0). + :rtype: int + """ + + _r_Nr = defaultdict(int) + for count in self.values(): + _r_Nr[count] += 1 + + # Special case for Nr[0]: + _r_Nr[0] = bins - self.B() if bins is not None else 0 + + return _r_Nr + + def _cumulative_frequencies(self, samples): + """ + Return the cumulative frequencies of the specified samples. + If no samples are specified, all counts are returned, starting + with the largest. + + :param samples: the samples whose frequencies should be returned. + :type samples: any + :rtype: list(float) + """ + cf = 0.0 + for sample in samples: + cf += self[sample] + yield cf + + # slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs, + # here, freq() does probs + def freq(self, sample): + """ + Return the frequency of a given sample. The frequency of a + sample is defined as the count of that sample divided by the + total number of sample outcomes that have been recorded by + this FreqDist. The count of a sample is defined as the + number of times that sample outcome was recorded by this + FreqDist. Frequencies are always real numbers in the range + [0, 1]. + + :param sample: the sample whose frequency + should be returned. + :type sample: any + :rtype: float + """ + n = self.N() + if n == 0: + return 0 + return self[sample] / n + + def max(self): + """ + Return the sample with the greatest number of outcomes in this + frequency distribution. If two or more samples have the same + number of outcomes, return one of them; which sample is + returned is undefined. If no outcomes have occurred in this + frequency distribution, return None. + + :return: The sample with the maximum number of outcomes in this + frequency distribution. + :rtype: any or None + """ + if len(self) == 0: + raise ValueError( + "A FreqDist must have at least one sample before max is defined." + ) + return self.most_common(1)[0][0] + + def plot( + self, *args, title="", cumulative=False, percents=False, show=True, **kwargs + ): + """ + Plot samples from the frequency distribution + displaying the most frequent sample first. If an integer + parameter is supplied, stop after this many samples have been + plotted. For a cumulative plot, specify cumulative=True. Additional + ``**kwargs`` are passed to matplotlib's plot function. + (Requires Matplotlib to be installed.) + + :param title: The title for the graph. + :type title: str + :param cumulative: Whether the plot is cumulative. (default = False) + :type cumulative: bool + :param percents: Whether the plot uses percents instead of counts. (default = False) + :type percents: bool + :param show: Whether to show the plot, or only return the ax. + :type show: bool + """ + try: + import matplotlib.pyplot as plt + except ImportError as e: + raise ValueError( + "The plot function requires matplotlib to be installed." + "See https://matplotlib.org/" + ) from e + + if len(args) == 0: + args = [len(self)] + samples = [item for item, _ in self.most_common(*args)] + + if cumulative: + freqs = list(self._cumulative_frequencies(samples)) + ylabel = "Cumulative " + else: + freqs = [self[sample] for sample in samples] + ylabel = "" + + if percents: + freqs = [f / self.N() * 100 for f in freqs] + ylabel += "Percents" + else: + ylabel += "Counts" + + ax = plt.gca() + ax.grid(True, color="silver") + + if "linewidth" not in kwargs: + kwargs["linewidth"] = 2 + if title: + ax.set_title(title) + + ax.plot(freqs, **kwargs) + ax.set_xticks(range(len(samples))) + ax.set_xticklabels([str(s) for s in samples], rotation=90) + ax.set_xlabel("Samples") + ax.set_ylabel(ylabel) + + if show: + plt.show() + + return ax + + def tabulate(self, *args, **kwargs): + """ + Tabulate the given samples from the frequency distribution (cumulative), + displaying the most frequent sample first. If an integer + parameter is supplied, stop after this many samples have been + plotted. + + :param samples: The samples to plot (default is all samples) + :type samples: list + :param cumulative: A flag to specify whether the freqs are cumulative (default = False) + :type title: bool + """ + if len(args) == 0: + args = [len(self)] + samples = _get_kwarg( + kwargs, "samples", [item for item, _ in self.most_common(*args)] + ) + + cumulative = _get_kwarg(kwargs, "cumulative", False) + if cumulative: + freqs = list(self._cumulative_frequencies(samples)) + else: + freqs = [self[sample] for sample in samples] + # percents = [f * 100 for f in freqs] only in ProbDist? + + width = max(len(f"{s}") for s in samples) + width = max(width, max(len("%d" % f) for f in freqs)) + + for i in range(len(samples)): + print("%*s" % (width, samples[i]), end=" ") + print() + for i in range(len(samples)): + print("%*d" % (width, freqs[i]), end=" ") + print() + + def copy(self): + """ + Create a copy of this frequency distribution. + + :rtype: FreqDist + """ + return self.__class__(self) + + # Mathematical operatiors + + def __add__(self, other): + """ + Add counts from two counters. + + >>> FreqDist('abbb') + FreqDist('bcc') + FreqDist({'b': 4, 'c': 2, 'a': 1}) + + """ + return self.__class__(super().__add__(other)) + + def __sub__(self, other): + """ + Subtract count, but keep only results with positive counts. + + >>> FreqDist('abbbc') - FreqDist('bccd') + FreqDist({'b': 2, 'a': 1}) + + """ + return self.__class__(super().__sub__(other)) + + def __or__(self, other): + """ + Union is the maximum of value in either of the input counters. + + >>> FreqDist('abbb') | FreqDist('bcc') + FreqDist({'b': 3, 'c': 2, 'a': 1}) + + """ + return self.__class__(super().__or__(other)) + + def __and__(self, other): + """ + Intersection is the minimum of corresponding counts. + + >>> FreqDist('abbb') & FreqDist('bcc') + FreqDist({'b': 1}) + + """ + return self.__class__(super().__and__(other)) + + def __le__(self, other): + """ + Returns True if this frequency distribution is a subset of the other + and for no key the value exceeds the value of the same key from + the other frequency distribution. + + The <= operator forms partial order and satisfying the axioms + reflexivity, antisymmetry and transitivity. + + >>> FreqDist('a') <= FreqDist('a') + True + >>> a = FreqDist('abc') + >>> b = FreqDist('aabc') + >>> (a <= b, b <= a) + (True, False) + >>> FreqDist('a') <= FreqDist('abcd') + True + >>> FreqDist('abc') <= FreqDist('xyz') + False + >>> FreqDist('xyz') <= FreqDist('abc') + False + >>> c = FreqDist('a') + >>> d = FreqDist('aa') + >>> e = FreqDist('aaa') + >>> c <= d and d <= e and c <= e + True + """ + if not isinstance(other, FreqDist): + raise_unorderable_types("<=", self, other) + return set(self).issubset(other) and all( + self[key] <= other[key] for key in self + ) + + def __ge__(self, other): + if not isinstance(other, FreqDist): + raise_unorderable_types(">=", self, other) + return set(self).issuperset(other) and all( + self[key] >= other[key] for key in other + ) + + __lt__ = lambda self, other: self <= other and not self == other + __gt__ = lambda self, other: self >= other and not self == other + + def __repr__(self): + """ + Return a string representation of this FreqDist. + + :rtype: string + """ + return self.pformat() + + def pprint(self, maxlen=10, stream=None): + """ + Print a string representation of this FreqDist to 'stream' + + :param maxlen: The maximum number of items to print + :type maxlen: int + :param stream: The stream to print to. stdout by default + """ + print(self.pformat(maxlen=maxlen), file=stream) + + def pformat(self, maxlen=10): + """ + Return a string representation of this FreqDist. + + :param maxlen: The maximum number of items to display + :type maxlen: int + :rtype: string + """ + items = ["{!r}: {!r}".format(*item) for item in self.most_common(maxlen)] + if len(self) > maxlen: + items.append("...") + return "FreqDist({{{0}}})".format(", ".join(items)) + + def __str__(self): + """ + Return a string representation of this FreqDist. + + :rtype: string + """ + return "" % (len(self), self.N()) + + def __iter__(self): + """ + Return an iterator which yields tokens ordered by frequency. + + :rtype: iterator + """ + for token, _ in self.most_common(self.B()): + yield token + + +##////////////////////////////////////////////////////// +## Probability Distributions +##////////////////////////////////////////////////////// + + +class ProbDistI(metaclass=ABCMeta): + """ + A probability distribution for the outcomes of an experiment. A + probability distribution specifies how likely it is that an + experiment will have any given outcome. For example, a + probability distribution could be used to predict the probability + that a token in a document will have a given type. Formally, a + probability distribution can be defined as a function mapping from + samples to nonnegative real numbers, such that the sum of every + number in the function's range is 1.0. A ``ProbDist`` is often + used to model the probability distribution of the experiment used + to generate a frequency distribution. + """ + + SUM_TO_ONE = True + """True if the probabilities of the samples in this probability + distribution will always sum to one.""" + + @abstractmethod + def __init__(self): + """ + Classes inheriting from ProbDistI should implement __init__. + """ + + @abstractmethod + def prob(self, sample): + """ + Return the probability for a given sample. Probabilities + are always real numbers in the range [0, 1]. + + :param sample: The sample whose probability + should be returned. + :type sample: any + :rtype: float + """ + + def logprob(self, sample): + """ + Return the base 2 logarithm of the probability for a given sample. + + :param sample: The sample whose probability + should be returned. + :type sample: any + :rtype: float + """ + # Default definition, in terms of prob() + p = self.prob(sample) + return math.log(p, 2) if p != 0 else _NINF + + @abstractmethod + def max(self): + """ + Return the sample with the greatest probability. If two or + more samples have the same probability, return one of them; + which sample is returned is undefined. + + :rtype: any + """ + + @abstractmethod + def samples(self): + """ + Return a list of all samples that have nonzero probabilities. + Use ``prob`` to find the probability of each sample. + + :rtype: list + """ + + # cf self.SUM_TO_ONE + def discount(self): + """ + Return the ratio by which counts are discounted on average: c*/c + + :rtype: float + """ + return 0.0 + + # Subclasses should define more efficient implementations of this, + # where possible. + def generate(self): + """ + Return a randomly selected sample from this probability distribution. + The probability of returning each sample ``samp`` is equal to + ``self.prob(samp)``. + """ + p = random.random() + p_init = p + for sample in self.samples(): + p -= self.prob(sample) + if p <= 0: + return sample + # allow for some rounding error: + if p < 0.0001: + return sample + # we *should* never get here + if self.SUM_TO_ONE: + warnings.warn( + "Probability distribution %r sums to %r; generate()" + " is returning an arbitrary sample." % (self, p_init - p) + ) + return random.choice(list(self.samples())) + + +class UniformProbDist(ProbDistI): + """ + A probability distribution that assigns equal probability to each + sample in a given set; and a zero probability to all other + samples. + """ + + def __init__(self, samples): + """ + Construct a new uniform probability distribution, that assigns + equal probability to each sample in ``samples``. + + :param samples: The samples that should be given uniform + probability. + :type samples: list + :raise ValueError: If ``samples`` is empty. + """ + if len(samples) == 0: + raise ValueError( + "A Uniform probability distribution must " + "have at least one sample." + ) + self._sampleset = set(samples) + self._prob = 1.0 / len(self._sampleset) + self._samples = list(self._sampleset) + + def prob(self, sample): + return self._prob if sample in self._sampleset else 0 + + def max(self): + return self._samples[0] + + def samples(self): + return self._samples + + def __repr__(self): + return "" % len(self._sampleset) + + +class RandomProbDist(ProbDistI): + """ + Generates a random probability distribution whereby each sample + will be between 0 and 1 with equal probability (uniform random distribution. + Also called a continuous uniform distribution). + """ + + def __init__(self, samples): + if len(samples) == 0: + raise ValueError( + "A probability distribution must " + "have at least one sample." + ) + self._probs = self.unirand(samples) + self._samples = list(self._probs.keys()) + + @classmethod + def unirand(cls, samples): + """ + The key function that creates a randomized initial distribution + that still sums to 1. Set as a dictionary of prob values so that + it can still be passed to MutableProbDist and called with identical + syntax to UniformProbDist + """ + samples = set(samples) + randrow = [random.random() for i in range(len(samples))] + total = sum(randrow) + for i, x in enumerate(randrow): + randrow[i] = x / total + + total = sum(randrow) + if total != 1: + # this difference, if present, is so small (near NINF) that it + # can be subtracted from any element without risking probs not (0 1) + randrow[-1] -= total - 1 + + return {s: randrow[i] for i, s in enumerate(samples)} + + def max(self): + if not hasattr(self, "_max"): + self._max = max((p, v) for (v, p) in self._probs.items())[1] + return self._max + + def prob(self, sample): + return self._probs.get(sample, 0) + + def samples(self): + return self._samples + + def __repr__(self): + return "" % len(self._probs) + + +class DictionaryProbDist(ProbDistI): + """ + A probability distribution whose probabilities are directly + specified by a given dictionary. The given dictionary maps + samples to probabilities. + """ + + def __init__(self, prob_dict=None, log=False, normalize=False): + """ + Construct a new probability distribution from the given + dictionary, which maps values to probabilities (or to log + probabilities, if ``log`` is true). If ``normalize`` is + true, then the probability values are scaled by a constant + factor such that they sum to 1. + + If called without arguments, the resulting probability + distribution assigns zero probability to all values. + """ + + self._prob_dict = prob_dict.copy() if prob_dict is not None else {} + self._log = log + + # Normalize the distribution, if requested. + if normalize: + if len(prob_dict) == 0: + raise ValueError( + "A DictionaryProbDist must have at least one sample " + + "before it can be normalized." + ) + if log: + value_sum = sum_logs(list(self._prob_dict.values())) + if value_sum <= _NINF: + logp = math.log(1.0 / len(prob_dict), 2) + for x in prob_dict: + self._prob_dict[x] = logp + else: + for (x, p) in self._prob_dict.items(): + self._prob_dict[x] -= value_sum + else: + value_sum = sum(self._prob_dict.values()) + if value_sum == 0: + p = 1.0 / len(prob_dict) + for x in prob_dict: + self._prob_dict[x] = p + else: + norm_factor = 1.0 / value_sum + for (x, p) in self._prob_dict.items(): + self._prob_dict[x] *= norm_factor + + def prob(self, sample): + if self._log: + return 2 ** (self._prob_dict[sample]) if sample in self._prob_dict else 0 + else: + return self._prob_dict.get(sample, 0) + + def logprob(self, sample): + if self._log: + return self._prob_dict.get(sample, _NINF) + else: + if sample not in self._prob_dict: + return _NINF + elif self._prob_dict[sample] == 0: + return _NINF + else: + return math.log(self._prob_dict[sample], 2) + + def max(self): + if not hasattr(self, "_max"): + self._max = max((p, v) for (v, p) in self._prob_dict.items())[1] + return self._max + + def samples(self): + return self._prob_dict.keys() + + def __repr__(self): + return "" % len(self._prob_dict) + + +class MLEProbDist(ProbDistI): + """ + The maximum likelihood estimate for the probability distribution + of the experiment used to generate a frequency distribution. The + "maximum likelihood estimate" approximates the probability of + each sample as the frequency of that sample in the frequency + distribution. + """ + + def __init__(self, freqdist, bins=None): + """ + Use the maximum likelihood estimate to create a probability + distribution for the experiment used to generate ``freqdist``. + + :type freqdist: FreqDist + :param freqdist: The frequency distribution that the + probability estimates should be based on. + """ + self._freqdist = freqdist + + def freqdist(self): + """ + Return the frequency distribution that this probability + distribution is based on. + + :rtype: FreqDist + """ + return self._freqdist + + def prob(self, sample): + return self._freqdist.freq(sample) + + def max(self): + return self._freqdist.max() + + def samples(self): + return self._freqdist.keys() + + def __repr__(self): + """ + :rtype: str + :return: A string representation of this ``ProbDist``. + """ + return "" % self._freqdist.N() + + +class LidstoneProbDist(ProbDistI): + """ + The Lidstone estimate for the probability distribution of the + experiment used to generate a frequency distribution. The + "Lidstone estimate" is parameterized by a real number *gamma*, + which typically ranges from 0 to 1. The Lidstone estimate + approximates the probability of a sample with count *c* from an + experiment with *N* outcomes and *B* bins as + ``c+gamma)/(N+B*gamma)``. This is equivalent to adding + *gamma* to the count for each bin, and taking the maximum + likelihood estimate of the resulting frequency distribution. + """ + + SUM_TO_ONE = False + + def __init__(self, freqdist, gamma, bins=None): + """ + Use the Lidstone estimate to create a probability distribution + for the experiment used to generate ``freqdist``. + + :type freqdist: FreqDist + :param freqdist: The frequency distribution that the + probability estimates should be based on. + :type gamma: float + :param gamma: A real number used to parameterize the + estimate. The Lidstone estimate is equivalent to adding + *gamma* to the count for each bin, and taking the + maximum likelihood estimate of the resulting frequency + distribution. + :type bins: int + :param bins: The number of sample values that can be generated + by the experiment that is described by the probability + distribution. This value must be correctly set for the + probabilities of the sample values to sum to one. If + ``bins`` is not specified, it defaults to ``freqdist.B()``. + """ + if (bins == 0) or (bins is None and freqdist.N() == 0): + name = self.__class__.__name__[:-8] + raise ValueError( + "A %s probability distribution " % name + "must have at least one bin." + ) + if (bins is not None) and (bins < freqdist.B()): + name = self.__class__.__name__[:-8] + raise ValueError( + "\nThe number of bins in a %s distribution " % name + + "(%d) must be greater than or equal to\n" % bins + + "the number of bins in the FreqDist used " + + "to create it (%d)." % freqdist.B() + ) + + self._freqdist = freqdist + self._gamma = float(gamma) + self._N = self._freqdist.N() + + if bins is None: + bins = freqdist.B() + self._bins = bins + + self._divisor = self._N + bins * gamma + if self._divisor == 0.0: + # In extreme cases we force the probability to be 0, + # which it will be, since the count will be 0: + self._gamma = 0 + self._divisor = 1 + + def freqdist(self): + """ + Return the frequency distribution that this probability + distribution is based on. + + :rtype: FreqDist + """ + return self._freqdist + + def prob(self, sample): + c = self._freqdist[sample] + return (c + self._gamma) / self._divisor + + def max(self): + # For Lidstone distributions, probability is monotonic with + # frequency, so the most probable sample is the one that + # occurs most frequently. + return self._freqdist.max() + + def samples(self): + return self._freqdist.keys() + + def discount(self): + gb = self._gamma * self._bins + return gb / (self._N + gb) + + def __repr__(self): + """ + Return a string representation of this ``ProbDist``. + + :rtype: str + """ + return "" % self._freqdist.N() + + +class LaplaceProbDist(LidstoneProbDist): + """ + The Laplace estimate for the probability distribution of the + experiment used to generate a frequency distribution. The + "Laplace estimate" approximates the probability of a sample with + count *c* from an experiment with *N* outcomes and *B* bins as + *(c+1)/(N+B)*. This is equivalent to adding one to the count for + each bin, and taking the maximum likelihood estimate of the + resulting frequency distribution. + """ + + def __init__(self, freqdist, bins=None): + """ + Use the Laplace estimate to create a probability distribution + for the experiment used to generate ``freqdist``. + + :type freqdist: FreqDist + :param freqdist: The frequency distribution that the + probability estimates should be based on. + :type bins: int + :param bins: The number of sample values that can be generated + by the experiment that is described by the probability + distribution. This value must be correctly set for the + probabilities of the sample values to sum to one. If + ``bins`` is not specified, it defaults to ``freqdist.B()``. + """ + LidstoneProbDist.__init__(self, freqdist, 1, bins) + + def __repr__(self): + """ + :rtype: str + :return: A string representation of this ``ProbDist``. + """ + return "" % self._freqdist.N() + + +class ELEProbDist(LidstoneProbDist): + """ + The expected likelihood estimate for the probability distribution + of the experiment used to generate a frequency distribution. The + "expected likelihood estimate" approximates the probability of a + sample with count *c* from an experiment with *N* outcomes and + *B* bins as *(c+0.5)/(N+B/2)*. This is equivalent to adding 0.5 + to the count for each bin, and taking the maximum likelihood + estimate of the resulting frequency distribution. + """ + + def __init__(self, freqdist, bins=None): + """ + Use the expected likelihood estimate to create a probability + distribution for the experiment used to generate ``freqdist``. + + :type freqdist: FreqDist + :param freqdist: The frequency distribution that the + probability estimates should be based on. + :type bins: int + :param bins: The number of sample values that can be generated + by the experiment that is described by the probability + distribution. This value must be correctly set for the + probabilities of the sample values to sum to one. If + ``bins`` is not specified, it defaults to ``freqdist.B()``. + """ + LidstoneProbDist.__init__(self, freqdist, 0.5, bins) + + def __repr__(self): + """ + Return a string representation of this ``ProbDist``. + + :rtype: str + """ + return "" % self._freqdist.N() + + +class HeldoutProbDist(ProbDistI): + """ + The heldout estimate for the probability distribution of the + experiment used to generate two frequency distributions. These + two frequency distributions are called the "heldout frequency + distribution" and the "base frequency distribution." The + "heldout estimate" uses uses the "heldout frequency + distribution" to predict the probability of each sample, given its + frequency in the "base frequency distribution". + + In particular, the heldout estimate approximates the probability + for a sample that occurs *r* times in the base distribution as + the average frequency in the heldout distribution of all samples + that occur *r* times in the base distribution. + + This average frequency is *Tr[r]/(Nr[r].N)*, where: + + - *Tr[r]* is the total count in the heldout distribution for + all samples that occur *r* times in the base distribution. + - *Nr[r]* is the number of samples that occur *r* times in + the base distribution. + - *N* is the number of outcomes recorded by the heldout + frequency distribution. + + In order to increase the efficiency of the ``prob`` member + function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r* + when the ``HeldoutProbDist`` is created. + + :type _estimate: list(float) + :ivar _estimate: A list mapping from *r*, the number of + times that a sample occurs in the base distribution, to the + probability estimate for that sample. ``_estimate[r]`` is + calculated by finding the average frequency in the heldout + distribution of all samples that occur *r* times in the base + distribution. In particular, ``_estimate[r]`` = + *Tr[r]/(Nr[r].N)*. + :type _max_r: int + :ivar _max_r: The maximum number of times that any sample occurs + in the base distribution. ``_max_r`` is used to decide how + large ``_estimate`` must be. + """ + + SUM_TO_ONE = False + + def __init__(self, base_fdist, heldout_fdist, bins=None): + """ + Use the heldout estimate to create a probability distribution + for the experiment used to generate ``base_fdist`` and + ``heldout_fdist``. + + :type base_fdist: FreqDist + :param base_fdist: The base frequency distribution. + :type heldout_fdist: FreqDist + :param heldout_fdist: The heldout frequency distribution. + :type bins: int + :param bins: The number of sample values that can be generated + by the experiment that is described by the probability + distribution. This value must be correctly set for the + probabilities of the sample values to sum to one. If + ``bins`` is not specified, it defaults to ``freqdist.B()``. + """ + + self._base_fdist = base_fdist + self._heldout_fdist = heldout_fdist + + # The max number of times any sample occurs in base_fdist. + self._max_r = base_fdist[base_fdist.max()] + + # Calculate Tr, Nr, and N. + Tr = self._calculate_Tr() + r_Nr = base_fdist.r_Nr(bins) + Nr = [r_Nr[r] for r in range(self._max_r + 1)] + N = heldout_fdist.N() + + # Use Tr, Nr, and N to compute the probability estimate for + # each value of r. + self._estimate = self._calculate_estimate(Tr, Nr, N) + + def _calculate_Tr(self): + """ + Return the list *Tr*, where *Tr[r]* is the total count in + ``heldout_fdist`` for all samples that occur *r* + times in ``base_fdist``. + + :rtype: list(float) + """ + Tr = [0.0] * (self._max_r + 1) + for sample in self._heldout_fdist: + r = self._base_fdist[sample] + Tr[r] += self._heldout_fdist[sample] + return Tr + + def _calculate_estimate(self, Tr, Nr, N): + """ + Return the list *estimate*, where *estimate[r]* is the probability + estimate for any sample that occurs *r* times in the base frequency + distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*. + In the special case that *N[r]=0*, *estimate[r]* will never be used; + so we define *estimate[r]=None* for those cases. + + :rtype: list(float) + :type Tr: list(float) + :param Tr: the list *Tr*, where *Tr[r]* is the total count in + the heldout distribution for all samples that occur *r* + times in base distribution. + :type Nr: list(float) + :param Nr: The list *Nr*, where *Nr[r]* is the number of + samples that occur *r* times in the base distribution. + :type N: int + :param N: The total number of outcomes recorded by the heldout + frequency distribution. + """ + estimate = [] + for r in range(self._max_r + 1): + if Nr[r] == 0: + estimate.append(None) + else: + estimate.append(Tr[r] / (Nr[r] * N)) + return estimate + + def base_fdist(self): + """ + Return the base frequency distribution that this probability + distribution is based on. + + :rtype: FreqDist + """ + return self._base_fdist + + def heldout_fdist(self): + """ + Return the heldout frequency distribution that this + probability distribution is based on. + + :rtype: FreqDist + """ + return self._heldout_fdist + + def samples(self): + return self._base_fdist.keys() + + def prob(self, sample): + # Use our precomputed probability estimate. + r = self._base_fdist[sample] + return self._estimate[r] + + def max(self): + # Note: the Heldout estimation is *not* necessarily monotonic; + # so this implementation is currently broken. However, it + # should give the right answer *most* of the time. :) + return self._base_fdist.max() + + def discount(self): + raise NotImplementedError() + + def __repr__(self): + """ + :rtype: str + :return: A string representation of this ``ProbDist``. + """ + s = "" + return s % (self._base_fdist.N(), self._heldout_fdist.N()) + + +class CrossValidationProbDist(ProbDistI): + """ + The cross-validation estimate for the probability distribution of + the experiment used to generate a set of frequency distribution. + The "cross-validation estimate" for the probability of a sample + is found by averaging the held-out estimates for the sample in + each pair of frequency distributions. + """ + + SUM_TO_ONE = False + + def __init__(self, freqdists, bins): + """ + Use the cross-validation estimate to create a probability + distribution for the experiment used to generate + ``freqdists``. + + :type freqdists: list(FreqDist) + :param freqdists: A list of the frequency distributions + generated by the experiment. + :type bins: int + :param bins: The number of sample values that can be generated + by the experiment that is described by the probability + distribution. This value must be correctly set for the + probabilities of the sample values to sum to one. If + ``bins`` is not specified, it defaults to ``freqdist.B()``. + """ + self._freqdists = freqdists + + # Create a heldout probability distribution for each pair of + # frequency distributions in freqdists. + self._heldout_probdists = [] + for fdist1 in freqdists: + for fdist2 in freqdists: + if fdist1 is not fdist2: + probdist = HeldoutProbDist(fdist1, fdist2, bins) + self._heldout_probdists.append(probdist) + + def freqdists(self): + """ + Return the list of frequency distributions that this ``ProbDist`` is based on. + + :rtype: list(FreqDist) + """ + return self._freqdists + + def samples(self): + # [xx] nb: this is not too efficient + return set(sum((list(fd) for fd in self._freqdists), [])) + + def prob(self, sample): + # Find the average probability estimate returned by each + # heldout distribution. + prob = 0.0 + for heldout_probdist in self._heldout_probdists: + prob += heldout_probdist.prob(sample) + return prob / len(self._heldout_probdists) + + def discount(self): + raise NotImplementedError() + + def __repr__(self): + """ + Return a string representation of this ``ProbDist``. + + :rtype: str + """ + return "" % len(self._freqdists) + + +class WittenBellProbDist(ProbDistI): + """ + The Witten-Bell estimate of a probability distribution. This distribution + allocates uniform probability mass to as yet unseen events by using the + number of events that have only been seen once. The probability mass + reserved for unseen events is equal to *T / (N + T)* + where *T* is the number of observed event types and *N* is the total + number of observed events. This equates to the maximum likelihood estimate + of a new type event occurring. The remaining probability mass is discounted + such that all probability estimates sum to one, yielding: + + - *p = T / Z (N + T)*, if count = 0 + - *p = c / (N + T)*, otherwise + """ + + def __init__(self, freqdist, bins=None): + """ + Creates a distribution of Witten-Bell probability estimates. This + distribution allocates uniform probability mass to as yet unseen + events by using the number of events that have only been seen once. The + probability mass reserved for unseen events is equal to *T / (N + T)* + where *T* is the number of observed event types and *N* is the total + number of observed events. This equates to the maximum likelihood + estimate of a new type event occurring. The remaining probability mass + is discounted such that all probability estimates sum to one, + yielding: + + - *p = T / Z (N + T)*, if count = 0 + - *p = c / (N + T)*, otherwise + + The parameters *T* and *N* are taken from the ``freqdist`` parameter + (the ``B()`` and ``N()`` values). The normalizing factor *Z* is + calculated using these values along with the ``bins`` parameter. + + :param freqdist: The frequency counts upon which to base the + estimation. + :type freqdist: FreqDist + :param bins: The number of possible event types. This must be at least + as large as the number of bins in the ``freqdist``. If None, then + it's assumed to be equal to that of the ``freqdist`` + :type bins: int + """ + assert bins is None or bins >= freqdist.B(), ( + "bins parameter must not be less than %d=freqdist.B()" % freqdist.B() + ) + if bins is None: + bins = freqdist.B() + self._freqdist = freqdist + self._T = self._freqdist.B() + self._Z = bins - self._freqdist.B() + self._N = self._freqdist.N() + # self._P0 is P(0), precalculated for efficiency: + if self._N == 0: + # if freqdist is empty, we approximate P(0) by a UniformProbDist: + self._P0 = 1.0 / self._Z + else: + self._P0 = self._T / (self._Z * (self._N + self._T)) + + def prob(self, sample): + # inherit docs from ProbDistI + c = self._freqdist[sample] + return c / (self._N + self._T) if c != 0 else self._P0 + + def max(self): + return self._freqdist.max() + + def samples(self): + return self._freqdist.keys() + + def freqdist(self): + return self._freqdist + + def discount(self): + raise NotImplementedError() + + def __repr__(self): + """ + Return a string representation of this ``ProbDist``. + + :rtype: str + """ + return "" % self._freqdist.N() + + +##////////////////////////////////////////////////////// +## Good-Turing Probability Distributions +##////////////////////////////////////////////////////// + +# Good-Turing frequency estimation was contributed by Alan Turing and +# his statistical assistant I.J. Good, during their collaboration in +# the WWII. It is a statistical technique for predicting the +# probability of occurrence of objects belonging to an unknown number +# of species, given past observations of such objects and their +# species. (In drawing balls from an urn, the 'objects' would be balls +# and the 'species' would be the distinct colors of the balls (finite +# but unknown in number). +# +# Good-Turing method calculates the probability mass to assign to +# events with zero or low counts based on the number of events with +# higher counts. It does so by using the adjusted count *c\**: +# +# - *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1 +# - *things with frequency zero in training* = N(1) for c == 0 +# +# where *c* is the original count, *N(i)* is the number of event types +# observed with count *i*. We can think the count of unseen as the count +# of frequency one (see Jurafsky & Martin 2nd Edition, p101). +# +# This method is problematic because the situation ``N(c+1) == 0`` +# is quite common in the original Good-Turing estimation; smoothing or +# interpolation of *N(i)* values is essential in practice. +# +# Bill Gale and Geoffrey Sampson present a simple and effective approach, +# Simple Good-Turing. As a smoothing curve they simply use a power curve: +# +# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic +# relationship) +# +# They estimate a and b by simple linear regression technique on the +# logarithmic form of the equation: +# +# log Nr = a + b*log(r) +# +# However, they suggest that such a simple curve is probably only +# appropriate for high values of r. For low values of r, they use the +# measured Nr directly. (see M&S, p.213) +# +# Gale and Sampson propose to use r while the difference between r and +# r* is 1.96 greater than the standard deviation, and switch to r* if +# it is less or equal: +# +# |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr)) +# +# The 1.96 coefficient correspond to a 0.05 significance criterion, +# some implementations can use a coefficient of 1.65 for a 0.1 +# significance criterion. +# + +##////////////////////////////////////////////////////// +## Simple Good-Turing Probablity Distributions +##////////////////////////////////////////////////////// + + +class SimpleGoodTuringProbDist(ProbDistI): + """ + SimpleGoodTuring ProbDist approximates from frequency to frequency of + frequency into a linear line under log space by linear regression. + Details of Simple Good-Turing algorithm can be found in: + + - Good Turing smoothing without tears" (Gale & Sampson 1995), + Journal of Quantitative Linguistics, vol. 2 pp. 217-237. + - "Speech and Language Processing (Jurafsky & Martin), + 2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c)) + - https://www.grsampson.net/RGoodTur.html + + Given a set of pair (xi, yi), where the xi denotes the frequency and + yi denotes the frequency of frequency, we want to minimize their + square variation. E(x) and E(y) represent the mean of xi and yi. + + - slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x))) + - intercept: a = E(y) - b.E(x) + """ + + SUM_TO_ONE = False + + def __init__(self, freqdist, bins=None): + """ + :param freqdist: The frequency counts upon which to base the + estimation. + :type freqdist: FreqDist + :param bins: The number of possible event types. This must be + larger than the number of bins in the ``freqdist``. If None, + then it's assumed to be equal to ``freqdist``.B() + 1 + :type bins: int + """ + assert ( + bins is None or bins > freqdist.B() + ), "bins parameter must not be less than %d=freqdist.B()+1" % (freqdist.B() + 1) + if bins is None: + bins = freqdist.B() + 1 + self._freqdist = freqdist + self._bins = bins + r, nr = self._r_Nr() + self.find_best_fit(r, nr) + self._switch(r, nr) + self._renormalize(r, nr) + + def _r_Nr_non_zero(self): + r_Nr = self._freqdist.r_Nr() + del r_Nr[0] + return r_Nr + + def _r_Nr(self): + """ + Split the frequency distribution in two list (r, Nr), where Nr(r) > 0 + """ + nonzero = self._r_Nr_non_zero() + + if not nonzero: + return [], [] + return zip(*sorted(nonzero.items())) + + def find_best_fit(self, r, nr): + """ + Use simple linear regression to tune parameters self._slope and + self._intercept in the log-log space based on count and Nr(count) + (Work in log space to avoid floating point underflow.) + """ + # For higher sample frequencies the data points becomes horizontal + # along line Nr=1. To create a more evident linear model in log-log + # space, we average positive Nr values with the surrounding zero + # values. (Church and Gale, 1991) + + if not r or not nr: + # Empty r or nr? + return + + zr = [] + for j in range(len(r)): + i = r[j - 1] if j > 0 else 0 + k = 2 * r[j] - i if j == len(r) - 1 else r[j + 1] + zr_ = 2.0 * nr[j] / (k - i) + zr.append(zr_) + + log_r = [math.log(i) for i in r] + log_zr = [math.log(i) for i in zr] + + xy_cov = x_var = 0.0 + x_mean = sum(log_r) / len(log_r) + y_mean = sum(log_zr) / len(log_zr) + for (x, y) in zip(log_r, log_zr): + xy_cov += (x - x_mean) * (y - y_mean) + x_var += (x - x_mean) ** 2 + self._slope = xy_cov / x_var if x_var != 0 else 0.0 + if self._slope >= -1: + warnings.warn( + "SimpleGoodTuring did not find a proper best fit " + "line for smoothing probabilities of occurrences. " + "The probability estimates are likely to be " + "unreliable." + ) + self._intercept = y_mean - self._slope * x_mean + + def _switch(self, r, nr): + """ + Calculate the r frontier where we must switch from Nr to Sr + when estimating E[Nr]. + """ + for i, r_ in enumerate(r): + if len(r) == i + 1 or r[i + 1] != r_ + 1: + # We are at the end of r, or there is a gap in r + self._switch_at = r_ + break + + Sr = self.smoothedNr + smooth_r_star = (r_ + 1) * Sr(r_ + 1) / Sr(r_) + unsmooth_r_star = (r_ + 1) * nr[i + 1] / nr[i] + + std = math.sqrt(self._variance(r_, nr[i], nr[i + 1])) + if abs(unsmooth_r_star - smooth_r_star) <= 1.96 * std: + self._switch_at = r_ + break + + def _variance(self, r, nr, nr_1): + r = float(r) + nr = float(nr) + nr_1 = float(nr_1) + return (r + 1.0) ** 2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr) + + def _renormalize(self, r, nr): + """ + It is necessary to renormalize all the probability estimates to + ensure a proper probability distribution results. This can be done + by keeping the estimate of the probability mass for unseen items as + N(1)/N and renormalizing all the estimates for previously seen items + (as Gale and Sampson (1995) propose). (See M&S P.213, 1999) + """ + prob_cov = 0.0 + for r_, nr_ in zip(r, nr): + prob_cov += nr_ * self._prob_measure(r_) + if prob_cov: + self._renormal = (1 - self._prob_measure(0)) / prob_cov + + def smoothedNr(self, r): + """ + Return the number of samples with count r. + + :param r: The amount of frequency. + :type r: int + :rtype: float + """ + + # Nr = a*r^b (with b < -1 to give the appropriate hyperbolic + # relationship) + # Estimate a and b by simple linear regression technique on + # the logarithmic form of the equation: log Nr = a + b*log(r) + + return math.exp(self._intercept + self._slope * math.log(r)) + + def prob(self, sample): + """ + Return the sample's probability. + + :param sample: sample of the event + :type sample: str + :rtype: float + """ + count = self._freqdist[sample] + p = self._prob_measure(count) + if count == 0: + if self._bins == self._freqdist.B(): + p = 0.0 + else: + p = p / (self._bins - self._freqdist.B()) + else: + p = p * self._renormal + return p + + def _prob_measure(self, count): + if count == 0 and self._freqdist.N() == 0: + return 1.0 + elif count == 0 and self._freqdist.N() != 0: + return self._freqdist.Nr(1) / self._freqdist.N() + + if self._switch_at > count: + Er_1 = self._freqdist.Nr(count + 1) + Er = self._freqdist.Nr(count) + else: + Er_1 = self.smoothedNr(count + 1) + Er = self.smoothedNr(count) + + r_star = (count + 1) * Er_1 / Er + return r_star / self._freqdist.N() + + def check(self): + prob_sum = 0.0 + for i in range(0, len(self._Nr)): + prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal + print("Probability Sum:", prob_sum) + # assert prob_sum != 1.0, "probability sum should be one!" + + def discount(self): + """ + This function returns the total mass of probability transfers from the + seen samples to the unseen samples. + """ + return self.smoothedNr(1) / self._freqdist.N() + + def max(self): + return self._freqdist.max() + + def samples(self): + return self._freqdist.keys() + + def freqdist(self): + return self._freqdist + + def __repr__(self): + """ + Return a string representation of this ``ProbDist``. + + :rtype: str + """ + return "" % self._freqdist.N() + + +class MutableProbDist(ProbDistI): + """ + An mutable probdist where the probabilities may be easily modified. This + simply copies an existing probdist, storing the probability values in a + mutable dictionary and providing an update method. + """ + + def __init__(self, prob_dist, samples, store_logs=True): + """ + Creates the mutable probdist based on the given prob_dist and using + the list of samples given. These values are stored as log + probabilities if the store_logs flag is set. + + :param prob_dist: the distribution from which to garner the + probabilities + :type prob_dist: ProbDist + :param samples: the complete set of samples + :type samples: sequence of any + :param store_logs: whether to store the probabilities as logarithms + :type store_logs: bool + """ + self._samples = samples + self._sample_dict = {samples[i]: i for i in range(len(samples))} + self._data = array.array("d", [0.0]) * len(samples) + for i in range(len(samples)): + if store_logs: + self._data[i] = prob_dist.logprob(samples[i]) + else: + self._data[i] = prob_dist.prob(samples[i]) + self._logs = store_logs + + def max(self): + # inherit documentation + return max((p, v) for (v, p) in self._sample_dict.items())[1] + + def samples(self): + # inherit documentation + return self._samples + + def prob(self, sample): + # inherit documentation + i = self._sample_dict.get(sample) + if i is None: + return 0.0 + return 2 ** (self._data[i]) if self._logs else self._data[i] + + def logprob(self, sample): + # inherit documentation + i = self._sample_dict.get(sample) + if i is None: + return float("-inf") + return self._data[i] if self._logs else math.log(self._data[i], 2) + + def update(self, sample, prob, log=True): + """ + Update the probability for the given sample. This may cause the object + to stop being the valid probability distribution - the user must + ensure that they update the sample probabilities such that all samples + have probabilities between 0 and 1 and that all probabilities sum to + one. + + :param sample: the sample for which to update the probability + :type sample: any + :param prob: the new probability + :type prob: float + :param log: is the probability already logged + :type log: bool + """ + i = self._sample_dict.get(sample) + assert i is not None + if self._logs: + self._data[i] = prob if log else math.log(prob, 2) + else: + self._data[i] = 2 ** (prob) if log else prob + + +##///////////////////////////////////////////////////// +## Kneser-Ney Probability Distribution +##////////////////////////////////////////////////////// + +# This method for calculating probabilities was introduced in 1995 by Reinhard +# Kneser and Hermann Ney. It was meant to improve the accuracy of language +# models that use backing-off to deal with sparse data. The authors propose two +# ways of doing so: a marginal distribution constraint on the back-off +# distribution and a leave-one-out distribution. For a start, the first one is +# implemented as a class below. +# +# The idea behind a back-off n-gram model is that we have a series of +# frequency distributions for our n-grams so that in case we have not seen a +# given n-gram during training (and as a result have a 0 probability for it) we +# can 'back off' (hence the name!) and try testing whether we've seen the +# n-1-gram part of the n-gram in training. +# +# The novelty of Kneser and Ney's approach was that they decided to fiddle +# around with the way this latter, backed off probability was being calculated +# whereas their peers seemed to focus on the primary probability. +# +# The implementation below uses one of the techniques described in their paper +# titled "Improved backing-off for n-gram language modeling." In the same paper +# another technique is introduced to attempt to smooth the back-off +# distribution as well as the primary one. There is also a much-cited +# modification of this method proposed by Chen and Goodman. +# +# In order for the implementation of Kneser-Ney to be more efficient, some +# changes have been made to the original algorithm. Namely, the calculation of +# the normalizing function gamma has been significantly simplified and +# combined slightly differently with beta. None of these changes affect the +# nature of the algorithm, but instead aim to cut out unnecessary calculations +# and take advantage of storing and retrieving information in dictionaries +# where possible. + + +class KneserNeyProbDist(ProbDistI): + """ + Kneser-Ney estimate of a probability distribution. This is a version of + back-off that counts how likely an n-gram is provided the n-1-gram had + been seen in training. Extends the ProbDistI interface, requires a trigram + FreqDist instance to train on. Optionally, a different from default discount + value can be specified. The default discount is set to 0.75. + + """ + + def __init__(self, freqdist, bins=None, discount=0.75): + """ + :param freqdist: The trigram frequency distribution upon which to base + the estimation + :type freqdist: FreqDist + :param bins: Included for compatibility with nltk.tag.hmm + :type bins: int or float + :param discount: The discount applied when retrieving counts of + trigrams + :type discount: float (preferred, but can be set to int) + """ + + if not bins: + self._bins = freqdist.B() + else: + self._bins = bins + self._D = discount + + # cache for probability calculation + self._cache = {} + + # internal bigram and trigram frequency distributions + self._bigrams = defaultdict(int) + self._trigrams = freqdist + + # helper dictionaries used to calculate probabilities + self._wordtypes_after = defaultdict(float) + self._trigrams_contain = defaultdict(float) + self._wordtypes_before = defaultdict(float) + for w0, w1, w2 in freqdist: + self._bigrams[(w0, w1)] += freqdist[(w0, w1, w2)] + self._wordtypes_after[(w0, w1)] += 1 + self._trigrams_contain[w1] += 1 + self._wordtypes_before[(w1, w2)] += 1 + + def prob(self, trigram): + # sample must be a triple + if len(trigram) != 3: + raise ValueError("Expected an iterable with 3 members.") + trigram = tuple(trigram) + w0, w1, w2 = trigram + + if trigram in self._cache: + return self._cache[trigram] + else: + # if the sample trigram was seen during training + if trigram in self._trigrams: + prob = (self._trigrams[trigram] - self.discount()) / self._bigrams[ + (w0, w1) + ] + + # else if the 'rougher' environment was seen during training + elif (w0, w1) in self._bigrams and (w1, w2) in self._wordtypes_before: + aftr = self._wordtypes_after[(w0, w1)] + bfr = self._wordtypes_before[(w1, w2)] + + # the probability left over from alphas + leftover_prob = (aftr * self.discount()) / self._bigrams[(w0, w1)] + + # the beta (including normalization) + beta = bfr / (self._trigrams_contain[w1] - aftr) + + prob = leftover_prob * beta + + # else the sample was completely unseen during training + else: + prob = 0.0 + + self._cache[trigram] = prob + return prob + + def discount(self): + """ + Return the value by which counts are discounted. By default set to 0.75. + + :rtype: float + """ + return self._D + + def set_discount(self, discount): + """ + Set the value by which counts are discounted to the value of discount. + + :param discount: the new value to discount counts by + :type discount: float (preferred, but int possible) + :rtype: None + """ + self._D = discount + + def samples(self): + return self._trigrams.keys() + + def max(self): + return self._trigrams.max() + + def __repr__(self): + """ + Return a string representation of this ProbDist + + :rtype: str + """ + return f">> from nltk.probability import ConditionalFreqDist + >>> from nltk.tokenize import word_tokenize + >>> sent = "the the the dog dog some other words that we do not care about" + >>> cfdist = ConditionalFreqDist() + >>> for word in word_tokenize(sent): + ... condition = len(word) + ... cfdist[condition][word] += 1 + + An equivalent way to do this is with the initializer: + + >>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent)) + + The frequency distribution for each condition is accessed using + the indexing operator: + + >>> cfdist[3] + FreqDist({'the': 3, 'dog': 2, 'not': 1}) + >>> cfdist[3].freq('the') + 0.5 + >>> cfdist[3]['dog'] + 2 + + When the indexing operator is used to access the frequency + distribution for a condition that has not been accessed before, + ``ConditionalFreqDist`` creates a new empty FreqDist for that + condition. + + """ + + def __init__(self, cond_samples=None): + """ + Construct a new empty conditional frequency distribution. In + particular, the count for every sample, under every condition, + is zero. + + :param cond_samples: The samples to initialize the conditional + frequency distribution with + :type cond_samples: Sequence of (condition, sample) tuples + """ + defaultdict.__init__(self, FreqDist) + + if cond_samples: + for (cond, sample) in cond_samples: + self[cond][sample] += 1 + + def __reduce__(self): + kv_pairs = ((cond, self[cond]) for cond in self.conditions()) + return (self.__class__, (), None, None, kv_pairs) + + def conditions(self): + """ + Return a list of the conditions that have been accessed for + this ``ConditionalFreqDist``. Use the indexing operator to + access the frequency distribution for a given condition. + Note that the frequency distributions for some conditions + may contain zero sample outcomes. + + :rtype: list + """ + return list(self.keys()) + + def N(self): + """ + Return the total number of sample outcomes that have been + recorded by this ``ConditionalFreqDist``. + + :rtype: int + """ + return sum(fdist.N() for fdist in self.values()) + + def plot( + self, + *args, + samples=None, + title="", + cumulative=False, + percents=False, + conditions=None, + show=True, + **kwargs, + ): + """ + Plot the given samples from the conditional frequency distribution. + For a cumulative plot, specify cumulative=True. Additional ``*args`` and + ``**kwargs`` are passed to matplotlib's plot function. + (Requires Matplotlib to be installed.) + + :param samples: The samples to plot + :type samples: list + :param title: The title for the graph + :type title: str + :param cumulative: Whether the plot is cumulative. (default = False) + :type cumulative: bool + :param percents: Whether the plot uses percents instead of counts. (default = False) + :type percents: bool + :param conditions: The conditions to plot (default is all) + :type conditions: list + :param show: Whether to show the plot, or only return the ax. + :type show: bool + """ + try: + import matplotlib.pyplot as plt # import statement fix + except ImportError as e: + raise ValueError( + "The plot function requires matplotlib to be installed." + "See https://matplotlib.org/" + ) from e + + if not conditions: + conditions = self.conditions() + else: + conditions = [c for c in conditions if c in self] + if not samples: + samples = sorted({v for c in conditions for v in self[c]}) + if "linewidth" not in kwargs: + kwargs["linewidth"] = 2 + ax = plt.gca() + if conditions: + freqs = [] + for condition in conditions: + if cumulative: + # freqs should be a list of list where each sub list will be a frequency of a condition + freq = list(self[condition]._cumulative_frequencies(samples)) + else: + freq = [self[condition][sample] for sample in samples] + + if percents: + freq = [f / self[condition].N() * 100 for f in freq] + + freqs.append(freq) + + if cumulative: + ylabel = "Cumulative " + legend_loc = "lower right" + else: + ylabel = "" + legend_loc = "upper right" + + if percents: + ylabel += "Percents" + else: + ylabel += "Counts" + + i = 0 + for freq in freqs: + kwargs["label"] = conditions[i] # label for each condition + i += 1 + ax.plot(freq, *args, **kwargs) + ax.legend(loc=legend_loc) + ax.grid(True, color="silver") + ax.set_xticks(range(len(samples))) + ax.set_xticklabels([str(s) for s in samples], rotation=90) + if title: + ax.set_title(title) + ax.set_xlabel("Samples") + ax.set_ylabel(ylabel) + + if show: + plt.show() + + return ax + + def tabulate(self, *args, **kwargs): + """ + Tabulate the given samples from the conditional frequency distribution. + + :param samples: The samples to plot + :type samples: list + :param conditions: The conditions to plot (default is all) + :type conditions: list + :param cumulative: A flag to specify whether the freqs are cumulative (default = False) + :type title: bool + """ + + cumulative = _get_kwarg(kwargs, "cumulative", False) + conditions = _get_kwarg(kwargs, "conditions", sorted(self.conditions())) + samples = _get_kwarg( + kwargs, + "samples", + sorted({v for c in conditions if c in self for v in self[c]}), + ) # this computation could be wasted + + width = max(len("%s" % s) for s in samples) + freqs = dict() + for c in conditions: + if cumulative: + freqs[c] = list(self[c]._cumulative_frequencies(samples)) + else: + freqs[c] = [self[c][sample] for sample in samples] + width = max(width, max(len("%d" % f) for f in freqs[c])) + + condition_size = max(len("%s" % c) for c in conditions) + print(" " * condition_size, end=" ") + for s in samples: + print("%*s" % (width, s), end=" ") + print() + for c in conditions: + print("%*s" % (condition_size, c), end=" ") + for f in freqs[c]: + print("%*d" % (width, f), end=" ") + print() + + # Mathematical operators + + def __add__(self, other): + """ + Add counts from two ConditionalFreqDists. + """ + if not isinstance(other, ConditionalFreqDist): + return NotImplemented + result = self.copy() + for cond in other.conditions(): + result[cond] += other[cond] + return result + + def __sub__(self, other): + """ + Subtract count, but keep only results with positive counts. + """ + if not isinstance(other, ConditionalFreqDist): + return NotImplemented + result = self.copy() + for cond in other.conditions(): + result[cond] -= other[cond] + if not result[cond]: + del result[cond] + return result + + def __or__(self, other): + """ + Union is the maximum of value in either of the input counters. + """ + if not isinstance(other, ConditionalFreqDist): + return NotImplemented + result = self.copy() + for cond in other.conditions(): + result[cond] |= other[cond] + return result + + def __and__(self, other): + """ + Intersection is the minimum of corresponding counts. + """ + if not isinstance(other, ConditionalFreqDist): + return NotImplemented + result = ConditionalFreqDist() + for cond in self.conditions(): + newfreqdist = self[cond] & other[cond] + if newfreqdist: + result[cond] = newfreqdist + return result + + # @total_ordering doesn't work here, since the class inherits from a builtin class + def __le__(self, other): + if not isinstance(other, ConditionalFreqDist): + raise_unorderable_types("<=", self, other) + return set(self.conditions()).issubset(other.conditions()) and all( + self[c] <= other[c] for c in self.conditions() + ) + + def __lt__(self, other): + if not isinstance(other, ConditionalFreqDist): + raise_unorderable_types("<", self, other) + return self <= other and self != other + + def __ge__(self, other): + if not isinstance(other, ConditionalFreqDist): + raise_unorderable_types(">=", self, other) + return other <= self + + def __gt__(self, other): + if not isinstance(other, ConditionalFreqDist): + raise_unorderable_types(">", self, other) + return other < self + + def deepcopy(self): + from copy import deepcopy + + return deepcopy(self) + + copy = deepcopy + + def __repr__(self): + """ + Return a string representation of this ``ConditionalFreqDist``. + + :rtype: str + """ + return "" % len(self) + + +class ConditionalProbDistI(dict, metaclass=ABCMeta): + """ + A collection of probability distributions for a single experiment + run under different conditions. Conditional probability + distributions are used to estimate the likelihood of each sample, + given the condition under which the experiment was run. For + example, a conditional probability distribution could be used to + estimate the probability of each word type in a document, given + the length of the word type. Formally, a conditional probability + distribution can be defined as a function that maps from each + condition to the ``ProbDist`` for the experiment under that + condition. + """ + + @abstractmethod + def __init__(self): + """ + Classes inheriting from ConditionalProbDistI should implement __init__. + """ + + def conditions(self): + """ + Return a list of the conditions that are represented by + this ``ConditionalProbDist``. Use the indexing operator to + access the probability distribution for a given condition. + + :rtype: list + """ + return list(self.keys()) + + def __repr__(self): + """ + Return a string representation of this ``ConditionalProbDist``. + + :rtype: str + """ + return "<%s with %d conditions>" % (type(self).__name__, len(self)) + + +class ConditionalProbDist(ConditionalProbDistI): + """ + A conditional probability distribution modeling the experiments + that were used to generate a conditional frequency distribution. + A ConditionalProbDist is constructed from a + ``ConditionalFreqDist`` and a ``ProbDist`` factory: + + - The ``ConditionalFreqDist`` specifies the frequency + distribution for each condition. + - The ``ProbDist`` factory is a function that takes a + condition's frequency distribution, and returns its + probability distribution. A ``ProbDist`` class's name (such as + ``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify + that class's constructor. + + The first argument to the ``ProbDist`` factory is the frequency + distribution that it should model; and the remaining arguments are + specified by the ``factory_args`` parameter to the + ``ConditionalProbDist`` constructor. For example, the following + code constructs a ``ConditionalProbDist``, where the probability + distribution for each condition is an ``ELEProbDist`` with 10 bins: + + >>> from nltk.corpus import brown + >>> from nltk.probability import ConditionalFreqDist + >>> from nltk.probability import ConditionalProbDist, ELEProbDist + >>> cfdist = ConditionalFreqDist(brown.tagged_words()[:5000]) + >>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10) + >>> cpdist['passed'].max() + 'VBD' + >>> cpdist['passed'].prob('VBD') #doctest: +ELLIPSIS + 0.423... + + """ + + def __init__(self, cfdist, probdist_factory, *factory_args, **factory_kw_args): + """ + Construct a new conditional probability distribution, based on + the given conditional frequency distribution and ``ProbDist`` + factory. + + :type cfdist: ConditionalFreqDist + :param cfdist: The ``ConditionalFreqDist`` specifying the + frequency distribution for each condition. + :type probdist_factory: class or function + :param probdist_factory: The function or class that maps + a condition's frequency distribution to its probability + distribution. The function is called with the frequency + distribution as its first argument, + ``factory_args`` as its remaining arguments, and + ``factory_kw_args`` as keyword arguments. + :type factory_args: (any) + :param factory_args: Extra arguments for ``probdist_factory``. + These arguments are usually used to specify extra + properties for the probability distributions of individual + conditions, such as the number of bins they contain. + :type factory_kw_args: (any) + :param factory_kw_args: Extra keyword arguments for ``probdist_factory``. + """ + self._probdist_factory = probdist_factory + self._factory_args = factory_args + self._factory_kw_args = factory_kw_args + + for condition in cfdist: + self[condition] = probdist_factory( + cfdist[condition], *factory_args, **factory_kw_args + ) + + def __missing__(self, key): + self[key] = self._probdist_factory( + FreqDist(), *self._factory_args, **self._factory_kw_args + ) + return self[key] + + +class DictionaryConditionalProbDist(ConditionalProbDistI): + """ + An alternative ConditionalProbDist that simply wraps a dictionary of + ProbDists rather than creating these from FreqDists. + """ + + def __init__(self, probdist_dict): + """ + :param probdist_dict: a dictionary containing the probdists indexed + by the conditions + :type probdist_dict: dict any -> probdist + """ + self.update(probdist_dict) + + def __missing__(self, key): + self[key] = DictionaryProbDist() + return self[key] + + +##////////////////////////////////////////////////////// +## Adding in log-space. +##////////////////////////////////////////////////////// + +# If the difference is bigger than this, then just take the bigger one: +_ADD_LOGS_MAX_DIFF = math.log(1e-30, 2) + + +def add_logs(logx, logy): + """ + Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return + *log(x+y)*. Conceptually, this is the same as returning + ``log(2**(logx)+2**(logy))``, but the actual implementation + avoids overflow errors that could result from direct computation. + """ + if logx < logy + _ADD_LOGS_MAX_DIFF: + return logy + if logy < logx + _ADD_LOGS_MAX_DIFF: + return logx + base = min(logx, logy) + return base + math.log(2 ** (logx - base) + 2 ** (logy - base), 2) + + +def sum_logs(logs): + return reduce(add_logs, logs[1:], logs[0]) if len(logs) != 0 else _NINF + + +##////////////////////////////////////////////////////// +## Probabilistic Mix-in +##////////////////////////////////////////////////////// + + +class ProbabilisticMixIn: + """ + A mix-in class to associate probabilities with other classes + (trees, rules, etc.). To use the ``ProbabilisticMixIn`` class, + define a new class that derives from an existing class and from + ProbabilisticMixIn. You will need to define a new constructor for + the new class, which explicitly calls the constructors of both its + parent classes. For example: + + >>> from nltk.probability import ProbabilisticMixIn + >>> class A: + ... def __init__(self, x, y): self.data = (x,y) + ... + >>> class ProbabilisticA(A, ProbabilisticMixIn): + ... def __init__(self, x, y, **prob_kwarg): + ... A.__init__(self, x, y) + ... ProbabilisticMixIn.__init__(self, **prob_kwarg) + + See the documentation for the ProbabilisticMixIn + ``constructor<__init__>`` for information about the arguments it + expects. + + You should generally also redefine the string representation + methods, the comparison methods, and the hashing method. + """ + + def __init__(self, **kwargs): + """ + Initialize this object's probability. This initializer should + be called by subclass constructors. ``prob`` should generally be + the first argument for those constructors. + + :param prob: The probability associated with the object. + :type prob: float + :param logprob: The log of the probability associated with + the object. + :type logprob: float + """ + if "prob" in kwargs: + if "logprob" in kwargs: + raise TypeError("Must specify either prob or logprob " "(not both)") + else: + ProbabilisticMixIn.set_prob(self, kwargs["prob"]) + elif "logprob" in kwargs: + ProbabilisticMixIn.set_logprob(self, kwargs["logprob"]) + else: + self.__prob = self.__logprob = None + + def set_prob(self, prob): + """ + Set the probability associated with this object to ``prob``. + + :param prob: The new probability + :type prob: float + """ + self.__prob = prob + self.__logprob = None + + def set_logprob(self, logprob): + """ + Set the log probability associated with this object to + ``logprob``. I.e., set the probability associated with this + object to ``2**(logprob)``. + + :param logprob: The new log probability + :type logprob: float + """ + self.__logprob = logprob + self.__prob = None + + def prob(self): + """ + Return the probability associated with this object. + + :rtype: float + """ + if self.__prob is None: + if self.__logprob is None: + return None + self.__prob = 2 ** (self.__logprob) + return self.__prob + + def logprob(self): + """ + Return ``log(p)``, where ``p`` is the probability associated + with this object. + + :rtype: float + """ + if self.__logprob is None: + if self.__prob is None: + return None + self.__logprob = math.log(self.__prob, 2) + return self.__logprob + + +class ImmutableProbabilisticMixIn(ProbabilisticMixIn): + def set_prob(self, prob): + raise ValueError("%s is immutable" % self.__class__.__name__) + + def set_logprob(self, prob): + raise ValueError("%s is immutable" % self.__class__.__name__) + + +## Helper function for processing keyword arguments + + +def _get_kwarg(kwargs, key, default): + if key in kwargs: + arg = kwargs[key] + del kwargs[key] + else: + arg = default + return arg + + +##////////////////////////////////////////////////////// +## Demonstration +##////////////////////////////////////////////////////// + + +def _create_rand_fdist(numsamples, numoutcomes): + """ + Create a new frequency distribution, with random samples. The + samples are numbers from 1 to ``numsamples``, and are generated by + summing two numbers, each of which has a uniform distribution. + """ + + fdist = FreqDist() + for x in range(numoutcomes): + y = random.randint(1, (1 + numsamples) // 2) + random.randint( + 0, numsamples // 2 + ) + fdist[y] += 1 + return fdist + + +def _create_sum_pdist(numsamples): + """ + Return the true probability distribution for the experiment + ``_create_rand_fdist(numsamples, x)``. + """ + fdist = FreqDist() + for x in range(1, (1 + numsamples) // 2 + 1): + for y in range(0, numsamples // 2 + 1): + fdist[x + y] += 1 + return MLEProbDist(fdist) + + +def demo(numsamples=6, numoutcomes=500): + """ + A demonstration of frequency distributions and probability + distributions. This demonstration creates three frequency + distributions with, and uses them to sample a random process with + ``numsamples`` samples. Each frequency distribution is sampled + ``numoutcomes`` times. These three frequency distributions are + then used to build six probability distributions. Finally, the + probability estimates of these distributions are compared to the + actual probability of each sample. + + :type numsamples: int + :param numsamples: The number of samples to use in each demo + frequency distributions. + :type numoutcomes: int + :param numoutcomes: The total number of outcomes for each + demo frequency distribution. These outcomes are divided into + ``numsamples`` bins. + :rtype: None + """ + + # Randomly sample a stochastic process three times. + fdist1 = _create_rand_fdist(numsamples, numoutcomes) + fdist2 = _create_rand_fdist(numsamples, numoutcomes) + fdist3 = _create_rand_fdist(numsamples, numoutcomes) + + # Use our samples to create probability distributions. + pdists = [ + MLEProbDist(fdist1), + LidstoneProbDist(fdist1, 0.5, numsamples), + HeldoutProbDist(fdist1, fdist2, numsamples), + HeldoutProbDist(fdist2, fdist1, numsamples), + CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples), + SimpleGoodTuringProbDist(fdist1), + SimpleGoodTuringProbDist(fdist1, 7), + _create_sum_pdist(numsamples), + ] + + # Find the probability of each sample. + vals = [] + for n in range(1, numsamples + 1): + vals.append(tuple([n, fdist1.freq(n)] + [pdist.prob(n) for pdist in pdists])) + + # Print the results in a formatted table. + print( + "%d samples (1-%d); %d outcomes were sampled for each FreqDist" + % (numsamples, numsamples, numoutcomes) + ) + print("=" * 9 * (len(pdists) + 2)) + FORMATSTR = " FreqDist " + "%8s " * (len(pdists) - 1) + "| Actual" + print(FORMATSTR % tuple(repr(pdist)[1:9] for pdist in pdists[:-1])) + print("-" * 9 * (len(pdists) + 2)) + FORMATSTR = "%3d %8.6f " + "%8.6f " * (len(pdists) - 1) + "| %8.6f" + for val in vals: + print(FORMATSTR % val) + + # Print the totals for each column (should all be 1.0) + zvals = list(zip(*vals)) + sums = [sum(val) for val in zvals[1:]] + print("-" * 9 * (len(pdists) + 2)) + FORMATSTR = "Total " + "%8.6f " * (len(pdists)) + "| %8.6f" + print(FORMATSTR % tuple(sums)) + print("=" * 9 * (len(pdists) + 2)) + + # Display the distributions themselves, if they're short enough. + if len("%s" % fdist1) < 70: + print(" fdist1: %s" % fdist1) + print(" fdist2: %s" % fdist2) + print(" fdist3: %s" % fdist3) + print() + + print("Generating:") + for pdist in pdists: + fdist = FreqDist(pdist.generate() for i in range(5000)) + print("{:>20} {}".format(pdist.__class__.__name__[:20], ("%s" % fdist)[:55])) + print() + + +def gt_demo(): + from nltk import corpus + + emma_words = corpus.gutenberg.words("austen-emma.txt") + fd = FreqDist(emma_words) + sgt = SimpleGoodTuringProbDist(fd) + print("{:>18} {:>8} {:>14}".format("word", "frequency", "SimpleGoodTuring")) + fd_keys_sorted = ( + key for key, value in sorted(fd.items(), key=lambda item: item[1], reverse=True) + ) + for key in fd_keys_sorted: + print("%18s %8d %14e" % (key, fd[key], sgt.prob(key))) + + +if __name__ == "__main__": + demo(6, 10) + demo(5, 5000) + gt_demo() + +__all__ = [ + "ConditionalFreqDist", + "ConditionalProbDist", + "ConditionalProbDistI", + "CrossValidationProbDist", + "DictionaryConditionalProbDist", + "DictionaryProbDist", + "ELEProbDist", + "FreqDist", + "SimpleGoodTuringProbDist", + "HeldoutProbDist", + "ImmutableProbabilisticMixIn", + "LaplaceProbDist", + "LidstoneProbDist", + "MLEProbDist", + "MutableProbDist", + "KneserNeyProbDist", + "ProbDistI", + "ProbabilisticMixIn", + "UniformProbDist", + "WittenBellProbDist", + "add_logs", + "log_likelihood", + "sum_logs", + "entropy", +] diff --git a/lib/python3.10/site-packages/nltk/test/bleu.doctest b/lib/python3.10/site-packages/nltk/test/bleu.doctest new file mode 100644 index 0000000000000000000000000000000000000000..d7e6e41f5a17e6f048a7264c72f657615e8567cc --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/bleu.doctest @@ -0,0 +1,29 @@ +========== +BLEU tests +========== + +>>> from nltk.translate import bleu + +If the candidate has no alignment to any of the references, the BLEU score is 0. + +>>> bleu( +... ['The candidate has no alignment to any of the references'.split()], +... 'John loves Mary'.split(), +... (1,), +... ) +0 + +This is an implementation of the smoothing techniques +for segment-level BLEU scores that was presented in +Boxing Chen and Collin Cherry (2014) A Systematic Comparison of +Smoothing Techniques for Sentence-Level BLEU. In WMT14. +http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf +>>> from nltk.translate.bleu_score import sentence_bleu,SmoothingFunction + + +>>> sentence_bleu( +... ['It is a place of quiet contemplation .'.split()], +... 'It is .'.split(), +... smoothing_function=SmoothingFunction().method4, +... )*100 +4.4267... diff --git a/lib/python3.10/site-packages/nltk/test/ccg.doctest b/lib/python3.10/site-packages/nltk/test/ccg.doctest new file mode 100644 index 0000000000000000000000000000000000000000..9c1e642c5e32ee0afe4c3d689d6461807a1db738 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/ccg.doctest @@ -0,0 +1,376 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +============================== +Combinatory Categorial Grammar +============================== + +Relative Clauses +---------------- + + >>> from nltk.ccg import chart, lexicon + +Construct a lexicon: + + >>> lex = lexicon.fromstring(''' + ... :- S, NP, N, VP + ... + ... Det :: NP/N + ... Pro :: NP + ... Modal :: S\\NP/VP + ... + ... TV :: VP/NP + ... DTV :: TV/NP + ... + ... the => Det + ... + ... that => Det + ... that => NP + ... + ... I => Pro + ... you => Pro + ... we => Pro + ... + ... chef => N + ... cake => N + ... children => N + ... dough => N + ... + ... will => Modal + ... should => Modal + ... might => Modal + ... must => Modal + ... + ... and => var\\.,var/.,var + ... + ... to => VP[to]/VP + ... + ... without => (VP\\VP)/VP[ing] + ... + ... be => TV + ... cook => TV + ... eat => TV + ... + ... cooking => VP[ing]/NP + ... + ... give => DTV + ... + ... is => (S\\NP)/NP + ... prefer => (S\\NP)/NP + ... + ... which => (N\\N)/(S/NP) + ... + ... persuade => (VP/VP[to])/NP + ... ''') + + >>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet) + >>> for parse in parser.parse("you prefer that cake".split()): + ... chart.printCCGDerivation(parse) + ... break + ... + you prefer that cake + NP ((S\NP)/NP) (NP/N) N + --------------> + NP + ---------------------------> + (S\NP) + --------------------------------< + S + + >>> for parse in parser.parse("that is the cake which you prefer".split()): + ... chart.printCCGDerivation(parse) + ... break + ... + that is the cake which you prefer + NP ((S\NP)/NP) (NP/N) N ((N\N)/(S/NP)) NP ((S\NP)/NP) + ----->T + (S/(S\NP)) + ------------------>B + (S/NP) + ----------------------------------> + (N\N) + ----------------------------------------< + N + ------------------------------------------------> + NP + -------------------------------------------------------------> + (S\NP) + -------------------------------------------------------------------< + S + + +Some other sentences to try: +"that is the cake which we will persuade the chef to cook" +"that is the cake which we will persuade the chef to give the children" + + >>> sent = "that is the dough which you will eat without cooking".split() + >>> nosub_parser = chart.CCGChartParser(lex, chart.ApplicationRuleSet + + ... chart.CompositionRuleSet + chart.TypeRaiseRuleSet) + +Without Substitution (no output) + + >>> for parse in nosub_parser.parse(sent): + ... chart.printCCGDerivation(parse) + +With Substitution: + + >>> for parse in parser.parse(sent): + ... chart.printCCGDerivation(parse) + ... break + ... + that is the dough which you will eat without cooking + NP ((S\NP)/NP) (NP/N) N ((N\N)/(S/NP)) NP ((S\NP)/VP) (VP/NP) ((VP\VP)/VP['ing']) (VP['ing']/NP) + ----->T + (S/(S\NP)) + ------------------------------------->B + ((VP\VP)/NP) + ----------------------------------------------B + ((S\NP)/NP) + ---------------------------------------------------------------->B + (S/NP) + --------------------------------------------------------------------------------> + (N\N) + ---------------------------------------------------------------------------------------< + N + -----------------------------------------------------------------------------------------------> + NP + ------------------------------------------------------------------------------------------------------------> + (S\NP) + ------------------------------------------------------------------------------------------------------------------< + S + + +Conjunction +----------- + + >>> from nltk.ccg.chart import CCGChartParser, ApplicationRuleSet, CompositionRuleSet + >>> from nltk.ccg.chart import SubstitutionRuleSet, TypeRaiseRuleSet, printCCGDerivation + >>> from nltk.ccg import lexicon + +Lexicons for the tests: + + >>> test1_lex = ''' + ... :- S,N,NP,VP + ... I => NP + ... you => NP + ... will => S\\NP/VP + ... cook => VP/NP + ... which => (N\\N)/(S/NP) + ... and => var\\.,var/.,var + ... might => S\\NP/VP + ... eat => VP/NP + ... the => NP/N + ... mushrooms => N + ... parsnips => N''' + >>> test2_lex = ''' + ... :- N, S, NP, VP + ... articles => N + ... the => NP/N + ... and => var\\.,var/.,var + ... which => (N\\N)/(S/NP) + ... I => NP + ... anyone => NP + ... will => (S/VP)\\NP + ... file => VP/NP + ... without => (VP\\VP)/VP[ing] + ... forget => VP/NP + ... reading => VP[ing]/NP + ... ''' + +Tests handling of conjunctions. +Note that while the two derivations are different, they are semantically equivalent. + + >>> lex = lexicon.fromstring(test1_lex) + >>> parser = CCGChartParser(lex, ApplicationRuleSet + CompositionRuleSet + SubstitutionRuleSet) + >>> for parse in parser.parse("I will cook and might eat the mushrooms and parsnips".split()): + ... printCCGDerivation(parse) + I will cook and might eat the mushrooms and parsnips + NP ((S\NP)/VP) (VP/NP) ((_var0\.,_var0)/.,_var0) ((S\NP)/VP) (VP/NP) (NP/N) N ((_var0\.,_var0)/.,_var0) N + ---------------------->B + ((S\NP)/NP) + ---------------------->B + ((S\NP)/NP) + -------------------------------------------------> + (((S\NP)/NP)\.,((S\NP)/NP)) + -----------------------------------------------------------------------< + ((S\NP)/NP) + -------------------------------------> + (N\.,N) + ------------------------------------------------< + N + --------------------------------------------------------> + NP + -------------------------------------------------------------------------------------------------------------------------------> + (S\NP) + -----------------------------------------------------------------------------------------------------------------------------------< + S + I will cook and might eat the mushrooms and parsnips + NP ((S\NP)/VP) (VP/NP) ((_var0\.,_var0)/.,_var0) ((S\NP)/VP) (VP/NP) (NP/N) N ((_var0\.,_var0)/.,_var0) N + ---------------------->B + ((S\NP)/NP) + ---------------------->B + ((S\NP)/NP) + -------------------------------------------------> + (((S\NP)/NP)\.,((S\NP)/NP)) + -----------------------------------------------------------------------< + ((S\NP)/NP) + ------------------------------------------------------------------------------->B + ((S\NP)/N) + -------------------------------------> + (N\.,N) + ------------------------------------------------< + N + -------------------------------------------------------------------------------------------------------------------------------> + (S\NP) + -----------------------------------------------------------------------------------------------------------------------------------< + S + + +Tests handling subject extraction. +Interesting to point that the two parses are clearly semantically different. + + >>> lex = lexicon.fromstring(test2_lex) + >>> parser = CCGChartParser(lex, ApplicationRuleSet + CompositionRuleSet + SubstitutionRuleSet) + >>> for parse in parser.parse("articles which I will file and forget without reading".split()): + ... printCCGDerivation(parse) + articles which I will file and forget without reading + N ((N\N)/(S/NP)) NP ((S/VP)\NP) (VP/NP) ((_var0\.,_var0)/.,_var0) (VP/NP) ((VP\VP)/VP['ing']) (VP['ing']/NP) + -----------------< + (S/VP) + ------------------------------------->B + ((VP\VP)/NP) + ---------------------------------------------- + ((VP/NP)\.,(VP/NP)) + ----------------------------------------------------------------------------------< + (VP/NP) + --------------------------------------------------------------------------------------------------->B + (S/NP) + -------------------------------------------------------------------------------------------------------------------> + (N\N) + -----------------------------------------------------------------------------------------------------------------------------< + N + articles which I will file and forget without reading + N ((N\N)/(S/NP)) NP ((S/VP)\NP) (VP/NP) ((_var0\.,_var0)/.,_var0) (VP/NP) ((VP\VP)/VP['ing']) (VP['ing']/NP) + -----------------< + (S/VP) + ------------------------------------> + ((VP/NP)\.,(VP/NP)) + ---------------------------------------------< + (VP/NP) + ------------------------------------->B + ((VP\VP)/NP) + ----------------------------------------------------------------------------------B + (S/NP) + -------------------------------------------------------------------------------------------------------------------> + (N\N) + -----------------------------------------------------------------------------------------------------------------------------< + N + + +Unicode support +--------------- + +Unicode words are supported. + + >>> from nltk.ccg import chart, lexicon + +Lexicons for the tests: + + >>> lex = lexicon.fromstring(''' + ... :- S, N, NP, PP + ... + ... AdjI :: N\\N + ... AdjD :: N/N + ... AdvD :: S/S + ... AdvI :: S\\S + ... Det :: NP/N + ... PrepNPCompl :: PP/NP + ... PrepNAdjN :: S\\S/N + ... PrepNAdjNP :: S\\S/NP + ... VPNP :: S\\NP/NP + ... VPPP :: S\\NP/PP + ... VPser :: S\\NP/AdjI + ... + ... auto => N + ... bebidas => N + ... cine => N + ... ley => N + ... libro => N + ... ministro => N + ... panadería => N + ... presidente => N + ... super => N + ... + ... el => Det + ... la => Det + ... las => Det + ... un => Det + ... + ... Ana => NP + ... Pablo => NP + ... + ... y => var\\.,var/.,var + ... + ... pero => (S/NP)\\(S/NP)/(S/NP) + ... + ... anunció => VPNP + ... compró => VPNP + ... cree => S\\NP/S[dep] + ... desmintió => VPNP + ... lee => VPNP + ... fueron => VPPP + ... + ... es => VPser + ... + ... interesante => AdjD + ... interesante => AdjI + ... nueva => AdjD + ... nueva => AdjI + ... + ... a => PrepNPCompl + ... en => PrepNAdjN + ... en => PrepNAdjNP + ... + ... ayer => AdvI + ... + ... que => (NP\\NP)/(S/NP) + ... que => S[dep]/S + ... ''') + + >>> parser = chart.CCGChartParser(lex, chart.DefaultRuleSet) + >>> for parse in parser.parse(u"el ministro anunció pero el presidente desmintió la nueva ley".split()): + ... printCCGDerivation(parse) # doctest: +SKIP + ... # it fails on python2.7 because of the unicode problem explained in https://github.com/nltk/nltk/pull/1354 + ... break + el ministro anunció pero el presidente desmintió la nueva ley + (NP/N) N ((S\NP)/NP) (((S/NP)\(S/NP))/(S/NP)) (NP/N) N ((S\NP)/NP) (NP/N) (N/N) N + ------------------> + NP + ------------------>T + (S/(S\NP)) + --------------------> + NP + -------------------->T + (S/(S\NP)) + --------------------------------->B + (S/NP) + -----------------------------------------------------------> + ((S/NP)\(S/NP)) + ------------> + N + --------------------> + NP + -------------------- + S diff --git a/lib/python3.10/site-packages/nltk/test/chat80.doctest b/lib/python3.10/site-packages/nltk/test/chat80.doctest new file mode 100644 index 0000000000000000000000000000000000000000..b17a95fb254208823711bb8285c48060a2a6ce3e --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/chat80.doctest @@ -0,0 +1,232 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +======= +Chat-80 +======= + +Chat-80 was a natural language system which allowed the user to +interrogate a Prolog knowledge base in the domain of world +geography. It was developed in the early '80s by Warren and Pereira; see +``_ for a description and +``_ for the source +files. + +The ``chat80`` module contains functions to extract data from the Chat-80 +relation files ('the world database'), and convert then into a format +that can be incorporated in the FOL models of +``nltk.sem.evaluate``. The code assumes that the Prolog +input files are available in the NLTK corpora directory. + +The Chat-80 World Database consists of the following files:: + + world0.pl + rivers.pl + cities.pl + countries.pl + contain.pl + borders.pl + +This module uses a slightly modified version of ``world0.pl``, in which +a set of Prolog rules have been omitted. The modified file is named +``world1.pl``. Currently, the file ``rivers.pl`` is not read in, since +it uses a list rather than a string in the second field. + +Reading Chat-80 Files +===================== + +Chat-80 relations are like tables in a relational database. The +relation acts as the name of the table; the first argument acts as the +'primary key'; and subsequent arguments are further fields in the +table. In general, the name of the table provides a label for a unary +predicate whose extension is all the primary keys. For example, +relations in ``cities.pl`` are of the following form:: + + 'city(athens,greece,1368).' + +Here, ``'athens'`` is the key, and will be mapped to a member of the +unary predicate *city*. + +By analogy with NLTK corpora, ``chat80`` defines a number of 'items' +which correspond to the relations. + + >>> from nltk.sem import chat80 + >>> print(chat80.items) + ('borders', 'circle_of_lat', 'circle_of_long', 'city', ...) + +The fields in the table are mapped to binary predicates. The first +argument of the predicate is the primary key, while the second +argument is the data in the relevant field. Thus, in the above +example, the third field is mapped to the binary predicate +*population_of*, whose extension is a set of pairs such as +``'(athens, 1368)'``. + +An exception to this general framework is required by the relations in +the files ``borders.pl`` and ``contains.pl``. These contain facts of the +following form:: + + 'borders(albania,greece).' + + 'contains0(africa,central_africa).' + +We do not want to form a unary concept out the element in +the first field of these records, and we want the label of the binary +relation just to be ``'border'``/``'contain'`` respectively. + +In order to drive the extraction process, we use 'relation metadata bundles' +which are Python dictionaries such as the following:: + + city = {'label': 'city', + 'closures': [], + 'schema': ['city', 'country', 'population'], + 'filename': 'cities.pl'} + +According to this, the file ``city['filename']`` contains a list of +relational tuples (or more accurately, the corresponding strings in +Prolog form) whose predicate symbol is ``city['label']`` and whose +relational schema is ``city['schema']``. The notion of a ``closure`` is +discussed in the next section. + +Concepts +======== +In order to encapsulate the results of the extraction, a class of +``Concept``\ s is introduced. A ``Concept`` object has a number of +attributes, in particular a ``prefLabel``, an arity and ``extension``. + + >>> c1 = chat80.Concept('dog', arity=1, extension=set(['d1', 'd2'])) + >>> print(c1) + Label = 'dog' + Arity = 1 + Extension = ['d1', 'd2'] + + + +The ``extension`` attribute makes it easier to inspect the output of +the extraction. + + >>> schema = ['city', 'country', 'population'] + >>> concepts = chat80.clause2concepts('cities.pl', 'city', schema) + >>> concepts + [Concept('city'), Concept('country_of'), Concept('population_of')] + >>> for c in concepts: + ... print("%s:\n\t%s" % (c.prefLabel, c.extension[:4])) + city: + ['athens', 'bangkok', 'barcelona', 'berlin'] + country_of: + [('athens', 'greece'), ('bangkok', 'thailand'), ('barcelona', 'spain'), ('berlin', 'east_germany')] + population_of: + [('athens', '1368'), ('bangkok', '1178'), ('barcelona', '1280'), ('berlin', '3481')] + +In addition, the ``extension`` can be further +processed: in the case of the ``'border'`` relation, we check that the +relation is **symmetric**, and in the case of the ``'contain'`` +relation, we carry out the **transitive closure**. The closure +properties associated with a concept is indicated in the relation +metadata, as indicated earlier. + + >>> borders = set([('a1', 'a2'), ('a2', 'a3')]) + >>> c2 = chat80.Concept('borders', arity=2, extension=borders) + >>> print(c2) + Label = 'borders' + Arity = 2 + Extension = [('a1', 'a2'), ('a2', 'a3')] + >>> c3 = chat80.Concept('borders', arity=2, closures=['symmetric'], extension=borders) + >>> c3.close() + >>> print(c3) + Label = 'borders' + Arity = 2 + Extension = [('a1', 'a2'), ('a2', 'a1'), ('a2', 'a3'), ('a3', 'a2')] + +The ``extension`` of a ``Concept`` object is then incorporated into a +``Valuation`` object. + +Persistence +=========== +The functions ``val_dump`` and ``val_load`` are provided to allow a +valuation to be stored in a persistent database and re-loaded, rather +than having to be re-computed each time. + +Individuals and Lexical Items +============================= +As well as deriving relations from the Chat-80 data, we also create a +set of individual constants, one for each entity in the domain. The +individual constants are string-identical to the entities. For +example, given a data item such as ``'zloty'``, we add to the valuation +a pair ``('zloty', 'zloty')``. In order to parse English sentences that +refer to these entities, we also create a lexical item such as the +following for each individual constant:: + + PropN[num=sg, sem=<\P.(P zloty)>] -> 'Zloty' + +The set of rules is written to the file ``chat_pnames.fcfg`` in the +current directory. + +SQL Query +========= + +The ``city`` relation is also available in RDB form and can be queried +using SQL statements. + + >>> import nltk + >>> q = "SELECT City, Population FROM city_table WHERE Country = 'china' and Population > 1000" + >>> for answer in chat80.sql_query('corpora/city_database/city.db', q): + ... print("%-10s %4s" % answer) + canton 1496 + chungking 1100 + mukden 1551 + peking 2031 + shanghai 5407 + tientsin 1795 + +The (deliberately naive) grammar ``sql.fcfg`` translates from English +to SQL: + + >>> nltk.data.show_cfg('grammars/book_grammars/sql0.fcfg') + % start S + S[SEM=(?np + WHERE + ?vp)] -> NP[SEM=?np] VP[SEM=?vp] + VP[SEM=(?v + ?pp)] -> IV[SEM=?v] PP[SEM=?pp] + VP[SEM=(?v + ?ap)] -> IV[SEM=?v] AP[SEM=?ap] + NP[SEM=(?det + ?n)] -> Det[SEM=?det] N[SEM=?n] + PP[SEM=(?p + ?np)] -> P[SEM=?p] NP[SEM=?np] + AP[SEM=?pp] -> A[SEM=?a] PP[SEM=?pp] + NP[SEM='Country="greece"'] -> 'Greece' + NP[SEM='Country="china"'] -> 'China' + Det[SEM='SELECT'] -> 'Which' | 'What' + N[SEM='City FROM city_table'] -> 'cities' + IV[SEM=''] -> 'are' + A[SEM=''] -> 'located' + P[SEM=''] -> 'in' + +Given this grammar, we can express, and then execute, queries in English. + + >>> cp = nltk.parse.load_parser('grammars/book_grammars/sql0.fcfg') + >>> query = 'What cities are in China' + >>> for tree in cp.parse(query.split()): + ... answer = tree.label()['SEM'] + ... q = " ".join(answer) + ... print(q) + ... + SELECT City FROM city_table WHERE Country="china" + + >>> rows = chat80.sql_query('corpora/city_database/city.db', q) + >>> for r in rows: print("%s" % r, end=' ') + canton chungking dairen harbin kowloon mukden peking shanghai sian tientsin + + +Using Valuations +----------------- + +In order to convert such an extension into a valuation, we use the +``make_valuation()`` method; setting ``read=True`` creates and returns +a new ``Valuation`` object which contains the results. + + >>> val = chat80.make_valuation(concepts, read=True) + >>> 'calcutta' in val['city'] + True + >>> [town for (town, country) in val['country_of'] if country == 'india'] + ['bombay', 'calcutta', 'delhi', 'hyderabad', 'madras'] + >>> dom = val.domain + >>> g = nltk.sem.Assignment(dom) + >>> m = nltk.sem.Model(dom, val) + >>> m.evaluate(r'population_of(jakarta, 533)', g) + True diff --git a/lib/python3.10/site-packages/nltk/test/childes_fixt.py b/lib/python3.10/site-packages/nltk/test/childes_fixt.py new file mode 100644 index 0000000000000000000000000000000000000000..0c3b84fd5f089d55e30f10f8233ec7ce2cb5f1b7 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/childes_fixt.py @@ -0,0 +1,13 @@ +def setup_module(): + import pytest + + import nltk.data + + try: + nltk.data.find("corpora/childes/data-xml/Eng-USA-MOR/") + except LookupError as e: + pytest.skip( + "The CHILDES corpus is not found. " + "It should be manually downloaded and saved/unpacked " + "to [NLTK_Data_Dir]/corpora/childes/" + ) diff --git a/lib/python3.10/site-packages/nltk/test/classify_fixt.py b/lib/python3.10/site-packages/nltk/test/classify_fixt.py new file mode 100644 index 0000000000000000000000000000000000000000..17b037281aff04a7d9a1faf56ccd9b055e1a1071 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/classify_fixt.py @@ -0,0 +1,5 @@ +# most of classify.doctest requires numpy +def setup_module(): + import pytest + + pytest.importorskip("numpy") diff --git a/lib/python3.10/site-packages/nltk/test/concordance.doctest b/lib/python3.10/site-packages/nltk/test/concordance.doctest new file mode 100644 index 0000000000000000000000000000000000000000..8dbd81a01818b99681be51491bd3eaadd0c86e38 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/concordance.doctest @@ -0,0 +1,75 @@ +.. Copyright (C) 2001-2016 NLTK Project +.. For license information, see LICENSE.TXT + +================================== +Concordance Example +================================== + +A concordance view shows us every occurrence of a given +word, together with some context. Here we look up the word monstrous +in Moby Dick by entering text1 followed by a period, then the term +concordance, and then placing "monstrous" in parentheses: + +>>> from nltk.corpus import gutenberg +>>> from nltk.text import Text +>>> corpus = gutenberg.words('melville-moby_dick.txt') +>>> text = Text(corpus) + +>>> text.concordance("monstrous") +Displaying 11 of 11 matches: +ong the former , one was of a most monstrous size . ... This came towards us , +ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r +ll over with a heathenish array of monstrous clubs and spears . Some were thick +d as you gazed , and wondered what monstrous cannibal and savage could ever hav +that has survived the flood ; most monstrous and most mountainous ! That Himmal +they might scout at Moby Dick as a monstrous fable , or still worse and more de +th of Radney .'" CHAPTER 55 Of the Monstrous Pictures of Whales . I shall ere l +ing Scenes . In connexion with the monstrous pictures of whales , I am strongly +ere to enter upon those still more monstrous stories of them which are to be fo +ght have been rummaged out of this monstrous cabinet there is no telling . But +of Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead u + +>>> text.concordance("monstrous") +Displaying 11 of 11 matches: +ong the former , one was of a most monstrous size . ... This came towards us , +ON OF THE PSALMS . " Touching that monstrous bulk of the whale or ork we have r +ll over with a heathenish array of monstrous clubs and spears . Some were thick +... + +We can also search for a multi-word phrase by passing a list of strings: + +>>> text.concordance(["monstrous", "size"]) +Displaying 2 of 2 matches: +the former , one was of a most monstrous size . ... This came towards us , op +Whale - Bones ; for Whales of a monstrous size are oftentimes cast up dead upo + +================================= +Concordance List +================================= + +Often we need to store the results of concordance for further usage. +To do so, call the concordance function with the stdout argument set +to false: + +>>> from nltk.corpus import gutenberg +>>> from nltk.text import Text +>>> corpus = gutenberg.words('melville-moby_dick.txt') +>>> text = Text(corpus) +>>> con_list = text.concordance_list("monstrous") +>>> con_list[2].line +'ll over with a heathenish array of monstrous clubs and spears . Some were thick' +>>> len(con_list) +11 + +================================= +Patching Issue #2088 +================================= + +Patching https://github.com/nltk/nltk/issues/2088 +The left slice of the left context should be clip to 0 if the `i-context` < 0. + +>>> from nltk import Text, word_tokenize +>>> jane_eyre = 'Chapter 1\nTHERE was no possibility of taking a walk that day. We had been wandering, indeed, in the leafless shrubbery an hour in the morning; but since dinner (Mrs. Reed, when there was no company, dined early) the cold winter wind had brought with it clouds so sombre, and a rain so penetrating, that further outdoor exercise was now out of the question.' +>>> text = Text(word_tokenize(jane_eyre)) +>>> text.concordance_list('taking')[0].left +['Chapter', '1', 'THERE', 'was', 'no', 'possibility', 'of'] diff --git a/lib/python3.10/site-packages/nltk/test/corpus.doctest b/lib/python3.10/site-packages/nltk/test/corpus.doctest new file mode 100644 index 0000000000000000000000000000000000000000..4e650d850bbe5327266f159db637f563867ef2b3 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/corpus.doctest @@ -0,0 +1,2196 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +================ + Corpus Readers +================ + +The `nltk.corpus` package defines a collection of *corpus reader* +classes, which can be used to access the contents of a diverse set of +corpora. The list of available corpora is given at: + +https://www.nltk.org/nltk_data/ + +Each corpus reader class is specialized to handle a specific +corpus format. In addition, the `nltk.corpus` package automatically +creates a set of corpus reader instances that can be used to access +the corpora in the NLTK data package. +Section `Corpus Reader Objects`_ ("Corpus Reader Objects") describes +the corpus reader instances that can be used to read the corpora in +the NLTK data package. Section `Corpus Reader Classes`_ ("Corpus +Reader Classes") describes the corpus reader classes themselves, and +discusses the issues involved in creating new corpus reader objects +and new corpus reader classes. Section `Regression Tests`_ +("Regression Tests") contains regression tests for the corpus readers +and associated functions and classes. + +.. contents:: **Table of Contents** + :depth: 4 + :backlinks: none + +--------------------- +Corpus Reader Objects +--------------------- + +Overview +======== + +NLTK includes a diverse set of corpora which can be +read using the ``nltk.corpus`` package. Each corpus is accessed by +means of a "corpus reader" object from ``nltk.corpus``: + + >>> import nltk.corpus + >>> # The Brown corpus: + >>> print(str(nltk.corpus.brown).replace('\\\\','/')) + + >>> # The Penn Treebank Corpus: + >>> print(str(nltk.corpus.treebank).replace('\\\\','/')) + + >>> # The Name Genders Corpus: + >>> print(str(nltk.corpus.names).replace('\\\\','/')) + + >>> # The Inaugural Address Corpus: + >>> print(str(nltk.corpus.inaugural).replace('\\\\','/')) + + +Most corpora consist of a set of files, each containing a document (or +other pieces of text). A list of identifiers for these files is +accessed via the ``fileids()`` method of the corpus reader: + + >>> nltk.corpus.treebank.fileids() + ['wsj_0001.mrg', 'wsj_0002.mrg', 'wsj_0003.mrg', 'wsj_0004.mrg', ...] + >>> nltk.corpus.inaugural.fileids() + ['1789-Washington.txt', '1793-Washington.txt', '1797-Adams.txt', ...] + +Each corpus reader provides a variety of methods to read data from the +corpus, depending on the format of the corpus. For example, plaintext +corpora support methods to read the corpus as raw text, a list of +words, a list of sentences, or a list of paragraphs. + + >>> from nltk.corpus import inaugural + >>> inaugural.raw('1789-Washington.txt') + 'Fellow-Citizens of the Senate ...' + >>> inaugural.words('1789-Washington.txt') + ['Fellow', '-', 'Citizens', 'of', 'the', ...] + >>> inaugural.sents('1789-Washington.txt') + [['Fellow', '-', 'Citizens'...], ['Among', 'the', 'vicissitudes'...]...] + >>> inaugural.paras('1789-Washington.txt') + [[['Fellow', '-', 'Citizens'...]], + [['Among', 'the', 'vicissitudes'...], + ['On', 'the', 'one', 'hand', ',', 'I'...]...]...] + +Each of these reader methods may be given a single document's item +name or a list of document item names. When given a list of document +item names, the reader methods will concatenate together the contents +of the individual documents. + + >>> l1 = len(inaugural.words('1789-Washington.txt')) + >>> l2 = len(inaugural.words('1793-Washington.txt')) + >>> l3 = len(inaugural.words(['1789-Washington.txt', '1793-Washington.txt'])) + >>> print('%s+%s == %s' % (l1, l2, l3)) + 1538+147 == 1685 + +If the reader methods are called without any arguments, they will +typically load all documents in the corpus. + + >>> len(inaugural.words()) + 152901 + +If a corpus contains a README file, it can be accessed with a ``readme()`` method: + + >>> inaugural.readme()[:32] + 'C-Span Inaugural Address Corpus\n' + +Plaintext Corpora +================= + +Here are the first few words from each of NLTK's plaintext corpora: + + >>> nltk.corpus.abc.words() + ['PM', 'denies', 'knowledge', 'of', 'AWB', ...] + >>> nltk.corpus.genesis.words() + ['In', 'the', 'beginning', 'God', 'created', ...] + >>> nltk.corpus.gutenberg.words(fileids='austen-emma.txt') + ['[', 'Emma', 'by', 'Jane', 'Austen', '1816', ...] + >>> nltk.corpus.inaugural.words() + ['Fellow', '-', 'Citizens', 'of', 'the', ...] + >>> nltk.corpus.state_union.words() + ['PRESIDENT', 'HARRY', 'S', '.', 'TRUMAN', "'", ...] + >>> nltk.corpus.webtext.words() + ['Cookie', 'Manager', ':', '"', 'Don', "'", 't', ...] + +Tagged Corpora +============== + +In addition to the plaintext corpora, NLTK's data package also +contains a wide variety of annotated corpora. For example, the Brown +Corpus is annotated with part-of-speech tags, and defines additional +methods ``tagged_*()`` which words as `(word,tag)` tuples, rather +than just bare word strings. + + >>> from nltk.corpus import brown + >>> print(brown.words()) + ['The', 'Fulton', 'County', 'Grand', 'Jury', ...] + >>> print(brown.tagged_words()) + [('The', 'AT'), ('Fulton', 'NP-TL'), ...] + >>> print(brown.sents()) + [['The', 'Fulton', 'County'...], ['The', 'jury', 'further'...], ...] + >>> print(brown.tagged_sents()) + [[('The', 'AT'), ('Fulton', 'NP-TL')...], + [('The', 'AT'), ('jury', 'NN'), ('further', 'RBR')...]...] + >>> print(brown.paras(categories='reviews')) + [[['It', 'is', 'not', 'news', 'that', 'Nathan', 'Milstein'...], + ['Certainly', 'not', 'in', 'Orchestra', 'Hall', 'where'...]], + [['There', 'was', 'about', 'that', 'song', 'something', ...], + ['Not', 'the', 'noblest', 'performance', 'we', 'have', ...], ...], ...] + >>> print(brown.tagged_paras(categories='reviews')) + [[[('It', 'PPS'), ('is', 'BEZ'), ('not', '*'), ...], + [('Certainly', 'RB'), ('not', '*'), ('in', 'IN'), ...]], + [[('There', 'EX'), ('was', 'BEDZ'), ('about', 'IN'), ...], + [('Not', '*'), ('the', 'AT'), ('noblest', 'JJT'), ...], ...], ...] + +Similarly, the Indian Language POS-Tagged Corpus includes samples of +Indian text annotated with part-of-speech tags: + + >>> from nltk.corpus import indian + >>> print(indian.words()) # doctest: +SKIP + ['\xe0\xa6\xae\xe0\xa6\xb9\xe0\xa6\xbf\...', + '\xe0\xa6\xb8\xe0\xa6\xa8\xe0\xa7\x8d\xe0...', ...] + >>> print(indian.tagged_words()) # doctest: +SKIP + [('\xe0\xa6\xae\xe0\xa6\xb9\xe0\xa6\xbf...', 'NN'), + ('\xe0\xa6\xb8\xe0\xa6\xa8\xe0\xa7\x8d\xe0...', 'NN'), ...] + +Several tagged corpora support access to a simplified, universal tagset, e.g. where all nouns +tags are collapsed to a single category ``NOUN``: + + >>> print(brown.tagged_sents(tagset='universal')) + [[('The', 'DET'), ('Fulton', 'NOUN'), ('County', 'NOUN'), ('Grand', 'ADJ'), ('Jury', 'NOUN'), ...], + [('The', 'DET'), ('jury', 'NOUN'), ('further', 'ADV'), ('said', 'VERB'), ('in', 'ADP'), ...]...] + >>> from nltk.corpus import conll2000, switchboard + >>> print(conll2000.tagged_words(tagset='universal')) + [('Confidence', 'NOUN'), ('in', 'ADP'), ...] + +Use ``nltk.app.pos_concordance()`` to access a GUI for searching tagged corpora. + +Chunked Corpora +=============== + +The CoNLL corpora also provide chunk structures, which are encoded as +flat trees. The CoNLL 2000 Corpus includes phrasal chunks; and the +CoNLL 2002 Corpus includes named entity chunks. + + >>> from nltk.corpus import conll2000, conll2002 + >>> print(conll2000.sents()) + [['Confidence', 'in', 'the', 'pound', 'is', 'widely', ...], + ['Chancellor', 'of', 'the', 'Exchequer', ...], ...] + >>> for tree in conll2000.chunked_sents()[:2]: + ... print(tree) + (S + (NP Confidence/NN) + (PP in/IN) + (NP the/DT pound/NN) + (VP is/VBZ widely/RB expected/VBN to/TO take/VB) + (NP another/DT sharp/JJ dive/NN) + if/IN + ...) + (S + Chancellor/NNP + (PP of/IN) + (NP the/DT Exchequer/NNP) + ...) + >>> print(conll2002.sents()) + [['Sao', 'Paulo', '(', 'Brasil', ')', ',', ...], ['-'], ...] + >>> for tree in conll2002.chunked_sents()[:2]: + ... print(tree) + (S + (LOC Sao/NC Paulo/VMI) + (/Fpa + (LOC Brasil/NC) + )/Fpt + ...) + (S -/Fg) + +.. note:: Since the CONLL corpora do not contain paragraph break + information, these readers do not support the ``para()`` method.) + +.. warning:: if you call the conll corpora reader methods without any + arguments, they will return the contents of the entire corpus, + *including* the 'test' portions of the corpus.) + +SemCor is a subset of the Brown corpus tagged with WordNet senses and +named entities. Both kinds of lexical items include multiword units, +which are encoded as chunks (senses and part-of-speech tags pertain +to the entire chunk). + + >>> from nltk.corpus import semcor + >>> semcor.words() + ['The', 'Fulton', 'County', 'Grand', 'Jury', ...] + >>> semcor.chunks() + [['The'], ['Fulton', 'County', 'Grand', 'Jury'], ...] + >>> semcor.sents() + [['The', 'Fulton', 'County', 'Grand', 'Jury', 'said', ...], + ['The', 'jury', 'further', 'said', ...], ...] + >>> semcor.chunk_sents() + [[['The'], ['Fulton', 'County', 'Grand', 'Jury'], ['said'], ... + ['.']], [['The'], ['jury'], ['further'], ['said'], ... ['.']], ...] + >>> list(map(str, semcor.tagged_chunks(tag='both')[:3])) + ['(DT The)', "(Lemma('group.n.01.group') (NE (NNP Fulton County Grand Jury)))", "(Lemma('state.v.01.say') (VB said))"] + >>> [[str(c) for c in s] for s in semcor.tagged_sents(tag='both')[:2]] + [['(DT The)', "(Lemma('group.n.01.group') (NE (NNP Fulton County Grand Jury)))", ... + '(None .)'], ['(DT The)', ... '(None .)']] + + +The IEER corpus is another chunked corpus. This corpus is unusual in +that each corpus item contains multiple documents. (This reflects the +fact that each corpus file contains multiple documents.) The IEER +corpus defines the `parsed_docs` method, which returns the documents +in a given item as `IEERDocument` objects: + + >>> from nltk.corpus import ieer + >>> ieer.fileids() + ['APW_19980314', 'APW_19980424', 'APW_19980429', + 'NYT_19980315', 'NYT_19980403', 'NYT_19980407'] + >>> docs = ieer.parsed_docs('APW_19980314') + >>> print(docs[0]) + + >>> print(docs[0].docno) + APW19980314.0391 + >>> print(docs[0].doctype) + NEWS STORY + >>> print(docs[0].date_time) + 03/14/1998 10:36:00 + >>> print(docs[0].headline) + (DOCUMENT Kenyans protest tax hikes) + >>> print(docs[0].text) + (DOCUMENT + (LOCATION NAIROBI) + , + (LOCATION Kenya) + ( + (ORGANIZATION AP) + ) + _ + (CARDINAL Thousands) + of + laborers, + ... + on + (DATE Saturday) + ...) + +Parsed Corpora +============== + +The Treebank corpora provide a syntactic parse for each sentence. The +NLTK data package includes a 10% sample of the Penn Treebank (in +``treebank``), as well as the Sinica Treebank (in ``sinica_treebank``). + +Reading the Penn Treebank (Wall Street Journal sample): + + >>> from nltk.corpus import treebank + >>> print(treebank.fileids()) + ['wsj_0001.mrg', 'wsj_0002.mrg', 'wsj_0003.mrg', 'wsj_0004.mrg', ...] + >>> print(treebank.words('wsj_0003.mrg')) + ['A', 'form', 'of', 'asbestos', 'once', 'used', ...] + >>> print(treebank.tagged_words('wsj_0003.mrg')) + [('A', 'DT'), ('form', 'NN'), ('of', 'IN'), ...] + >>> print(treebank.parsed_sents('wsj_0003.mrg')[0]) + (S + (S-TPC-1 + (NP-SBJ + (NP (NP (DT A) (NN form)) (PP (IN of) (NP (NN asbestos)))) + (RRC ...)...)...) + ... + (VP (VBD reported) (SBAR (-NONE- 0) (S (-NONE- *T*-1)))) + (. .)) + +If you have access to a full installation of the Penn Treebank, NLTK +can be configured to load it as well. Download the ``ptb`` package, +and in the directory ``nltk_data/corpora/ptb`` place the ``BROWN`` +and ``WSJ`` directories of the Treebank installation (symlinks work +as well). Then use the ``ptb`` module instead of ``treebank``: + + >>> from nltk.corpus import ptb + >>> print(ptb.fileids()) # doctest: +SKIP + ['BROWN/CF/CF01.MRG', 'BROWN/CF/CF02.MRG', 'BROWN/CF/CF03.MRG', 'BROWN/CF/CF04.MRG', ...] + >>> print(ptb.words('WSJ/00/WSJ_0003.MRG')) # doctest: +SKIP + ['A', 'form', 'of', 'asbestos', 'once', 'used', '*', ...] + >>> print(ptb.tagged_words('WSJ/00/WSJ_0003.MRG')) # doctest: +SKIP + [('A', 'DT'), ('form', 'NN'), ('of', 'IN'), ...] + +...and so forth, like ``treebank`` but with extended fileids. Categories +specified in ``allcats.txt`` can be used to filter by genre; they consist +of ``news`` (for WSJ articles) and names of the Brown subcategories +(``fiction``, ``humor``, ``romance``, etc.): + + >>> ptb.categories() # doctest: +SKIP + ['adventure', 'belles_lettres', 'fiction', 'humor', 'lore', 'mystery', 'news', 'romance', 'science_fiction'] + >>> print(ptb.fileids('news')) # doctest: +SKIP + ['WSJ/00/WSJ_0001.MRG', 'WSJ/00/WSJ_0002.MRG', 'WSJ/00/WSJ_0003.MRG', ...] + >>> print(ptb.words(categories=['humor','fiction'])) # doctest: +SKIP + ['Thirty-three', 'Scotty', 'did', 'not', 'go', 'back', ...] + +As PropBank and NomBank depend on the (WSJ portion of the) Penn Treebank, +the modules ``propbank_ptb`` and ``nombank_ptb`` are provided for access +to a full PTB installation. + +Reading the Sinica Treebank: + + >>> from nltk.corpus import sinica_treebank + >>> print(sinica_treebank.sents()) # doctest: +SKIP + [['\xe4\xb8\x80'], ['\xe5\x8f\x8b\xe6\x83\x85'], ...] + >>> sinica_treebank.parsed_sents()[25] # doctest: +SKIP + Tree('S', + [Tree('NP', + [Tree('Nba', ['\xe5\x98\x89\xe7\x8f\x8d'])]), + Tree('V\xe2\x80\xa7\xe5\x9c\xb0', + [Tree('VA11', ['\xe4\xb8\x8d\xe5\x81\x9c']), + Tree('DE', ['\xe7\x9a\x84'])]), + Tree('VA4', ['\xe5\x93\xad\xe6\xb3\xa3'])]) + +Reading the CoNLL 2007 Dependency Treebanks: + + >>> from nltk.corpus import conll2007 + >>> conll2007.sents('esp.train')[0] # doctest: +SKIP + ['El', 'aumento', 'del', 'índice', 'de', 'desempleo', ...] + >>> conll2007.parsed_sents('esp.train')[0] # doctest: +SKIP + + >>> print(conll2007.parsed_sents('esp.train')[0].tree()) # doctest: +SKIP + (fortaleció + (aumento El (del (índice (de (desempleo estadounidense))))) + hoy + considerablemente + (al + (euro + (cotizaba + , + que + (a (15.35 las GMT)) + se + (en (mercado el (de divisas) (de Fráncfort))) + (a 0,9452_dólares) + (frente_a , (0,9349_dólares los (de (mañana esta))))))) + .) + +Word Lists and Lexicons +======================= + +The NLTK data package also includes a number of lexicons and word +lists. These are accessed just like text corpora. The following +examples illustrate the use of the wordlist corpora: + + >>> from nltk.corpus import names, stopwords, words + >>> words.fileids() + ['en', 'en-basic'] + >>> words.words('en') + ['A', 'a', 'aa', 'aal', 'aalii', 'aam', 'Aani', 'aardvark', 'aardwolf', ...] + + >>> stopwords.fileids() # doctest: +SKIP + ['arabic', 'azerbaijani', 'bengali', 'danish', 'dutch', 'english', 'finnish', 'french', ...] + >>> sorted(stopwords.words('portuguese')) + ['a', 'ao', 'aos', 'aquela', 'aquelas', 'aquele', 'aqueles', ...] + >>> names.fileids() + ['female.txt', 'male.txt'] + >>> names.words('male.txt') + ['Aamir', 'Aaron', 'Abbey', 'Abbie', 'Abbot', 'Abbott', ...] + >>> names.words('female.txt') + ['Abagael', 'Abagail', 'Abbe', 'Abbey', 'Abbi', 'Abbie', ...] + +The CMU Pronunciation Dictionary corpus contains pronunciation +transcriptions for over 100,000 words. It can be accessed as a list +of entries (where each entry consists of a word, an identifier, and a +transcription) or as a dictionary from words to lists of +transcriptions. Transcriptions are encoded as tuples of phoneme +strings. + + >>> from nltk.corpus import cmudict + >>> print(cmudict.entries()[653:659]) + [('acetate', ['AE1', 'S', 'AH0', 'T', 'EY2', 'T']), + ('acetic', ['AH0', 'S', 'EH1', 'T', 'IH0', 'K']), + ('acetic', ['AH0', 'S', 'IY1', 'T', 'IH0', 'K']), + ('aceto', ['AA0', 'S', 'EH1', 'T', 'OW0']), + ('acetochlor', ['AA0', 'S', 'EH1', 'T', 'OW0', 'K', 'L', 'AO2', 'R']), + ('acetone', ['AE1', 'S', 'AH0', 'T', 'OW2', 'N'])] + >>> # Load the entire cmudict corpus into a Python dictionary: + >>> transcr = cmudict.dict() + >>> print([transcr[w][0] for w in 'Natural Language Tool Kit'.lower().split()]) + [['N', 'AE1', 'CH', 'ER0', 'AH0', 'L'], + ['L', 'AE1', 'NG', 'G', 'W', 'AH0', 'JH'], + ['T', 'UW1', 'L'], + ['K', 'IH1', 'T']] + + +WordNet +======= + +Please see the separate WordNet howto. + +FrameNet +======== + +Please see the separate FrameNet howto. + +PropBank +======== + +Please see the separate PropBank howto. + +SentiWordNet +============ + +Please see the separate SentiWordNet howto. + +Categorized Corpora +=================== + +Several corpora included with NLTK contain documents that have been categorized for +topic, genre, polarity, etc. In addition to the standard corpus interface, these +corpora provide access to the list of categories and the mapping between the documents +and their categories (in both directions). Access the categories using the ``categories()`` +method, e.g.: + + >>> from nltk.corpus import brown, movie_reviews, reuters + >>> brown.categories() + ['adventure', 'belles_lettres', 'editorial', 'fiction', 'government', 'hobbies', 'humor', + 'learned', 'lore', 'mystery', 'news', 'religion', 'reviews', 'romance', 'science_fiction'] + >>> movie_reviews.categories() + ['neg', 'pos'] + >>> reuters.categories() + ['acq', 'alum', 'barley', 'bop', 'carcass', 'castor-oil', 'cocoa', + 'coconut', 'coconut-oil', 'coffee', 'copper', 'copra-cake', 'corn', + 'cotton', 'cotton-oil', 'cpi', 'cpu', 'crude', 'dfl', 'dlr', ...] + +This method has an optional argument that specifies a document or a list +of documents, allowing us to map from (one or more) documents to (one or more) categories: + + >>> brown.categories('ca01') + ['news'] + >>> brown.categories(['ca01','cb01']) + ['editorial', 'news'] + >>> reuters.categories('training/9865') + ['barley', 'corn', 'grain', 'wheat'] + >>> reuters.categories(['training/9865', 'training/9880']) + ['barley', 'corn', 'grain', 'money-fx', 'wheat'] + +We can go back the other way using the optional argument of the ``fileids()`` method: + + >>> reuters.fileids('barley') + ['test/15618', 'test/15649', 'test/15676', 'test/15728', 'test/15871', ...] + +Both the ``categories()`` and ``fileids()`` methods return a sorted list containing +no duplicates. + +In addition to mapping between categories and documents, these corpora permit +direct access to their contents via the categories. Instead of accessing a subset +of a corpus by specifying one or more fileids, we can identify one or more categories, e.g.: + + >>> brown.tagged_words(categories='news') + [('The', 'AT'), ('Fulton', 'NP-TL'), ...] + >>> brown.sents(categories=['editorial','reviews']) + [['Assembly', 'session', 'brought', 'much', 'good'], ['The', 'General', + 'Assembly', ',', 'which', 'adjourns', 'today', ',', 'has', 'performed', + 'in', 'an', 'atmosphere', 'of', 'crisis', 'and', 'struggle', 'from', + 'the', 'day', 'it', 'convened', '.'], ...] + +Note that it is an error to specify both documents and categories. + +In the context of a text categorization system, we can easily test if the +category assigned to a document is correct as follows: + + >>> def classify(doc): return 'news' # Trivial classifier + >>> doc = 'ca01' + >>> classify(doc) in brown.categories(doc) + True + + +Other Corpora +============= + +comparative_sentences +--------------------- +A list of sentences from various sources, especially reviews and articles. Each +line contains one sentence; sentences were separated by using a sentence tokenizer. +Comparative sentences have been annotated with their type, entities, features and +keywords. + + >>> from nltk.corpus import comparative_sentences + >>> comparison = comparative_sentences.comparisons()[0] + >>> comparison.text + ['its', 'fast-forward', 'and', 'rewind', 'work', 'much', 'more', 'smoothly', + 'and', 'consistently', 'than', 'those', 'of', 'other', 'models', 'i', "'ve", + 'had', '.'] + >>> comparison.entity_2 + 'models' + >>> (comparison.feature, comparison.keyword) + ('rewind', 'more') + >>> len(comparative_sentences.comparisons()) + 853 + +opinion_lexicon +--------------- +A list of positive and negative opinion words or sentiment words for English. + + >>> from nltk.corpus import opinion_lexicon + >>> opinion_lexicon.words()[:4] + ['2-faced', '2-faces', 'abnormal', 'abolish'] + +The OpinionLexiconCorpusReader also provides shortcuts to retrieve positive/negative +words: + + >>> opinion_lexicon.negative()[:4] + ['2-faced', '2-faces', 'abnormal', 'abolish'] + +Note that words from `words()` method in opinion_lexicon are sorted by file id, +not alphabetically: + + >>> opinion_lexicon.words()[0:10] + ['2-faced', '2-faces', 'abnormal', 'abolish', 'abominable', 'abominably', + 'abominate', 'abomination', 'abort', 'aborted'] + >>> sorted(opinion_lexicon.words())[0:10] + ['2-faced', '2-faces', 'a+', 'abnormal', 'abolish', 'abominable', 'abominably', + 'abominate', 'abomination', 'abort'] + +ppattach +-------- +The Prepositional Phrase Attachment corpus is a corpus of +prepositional phrase attachment decisions. Each instance in the +corpus is encoded as a ``PPAttachment`` object: + + >>> from nltk.corpus import ppattach + >>> ppattach.attachments('training') + [PPAttachment(sent='0', verb='join', noun1='board', + prep='as', noun2='director', attachment='V'), + PPAttachment(sent='1', verb='is', noun1='chairman', + prep='of', noun2='N.V.', attachment='N'), + ...] + >>> inst = ppattach.attachments('training')[0] + >>> (inst.sent, inst.verb, inst.noun1, inst.prep, inst.noun2) + ('0', 'join', 'board', 'as', 'director') + >>> inst.attachment + 'V' + +product_reviews_1 and product_reviews_2 +--------------------------------------- +These two datasets respectively contain annotated customer reviews of 5 and 9 +products from amazon.com. + + >>> from nltk.corpus import product_reviews_1 + >>> camera_reviews = product_reviews_1.reviews('Canon_G3.txt') + >>> review = camera_reviews[0] + >>> review.sents()[0] + ['i', 'recently', 'purchased', 'the', 'canon', 'powershot', 'g3', 'and', 'am', + 'extremely', 'satisfied', 'with', 'the', 'purchase', '.'] + >>> review.features() + [('canon powershot g3', '+3'), ('use', '+2'), ('picture', '+2'), + ('picture quality', '+1'), ('picture quality', '+1'), ('camera', '+2'), + ('use', '+2'), ('feature', '+1'), ('picture quality', '+3'), ('use', '+1'), + ('option', '+1')] + +It is also possible to reach the same information directly from the stream: + + >>> product_reviews_1.features('Canon_G3.txt') + [('canon powershot g3', '+3'), ('use', '+2'), ...] + +We can compute stats for specific product features: + + >>> n_reviews = len([(feat,score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture']) + >>> tot = sum([int(score) for (feat,score) in product_reviews_1.features('Canon_G3.txt') if feat=='picture']) + >>> mean = tot / n_reviews + >>> print(n_reviews, tot, mean) + 15 24 1.6 + +pros_cons +--------- +A list of pros/cons sentences for determining context (aspect) dependent +sentiment words, which are then applied to sentiment analysis of comparative +sentences. + + >>> from nltk.corpus import pros_cons + >>> pros_cons.sents(categories='Cons') + [['East', 'batteries', '!', 'On', '-', 'off', 'switch', 'too', 'easy', + 'to', 'maneuver', '.'], ['Eats', '...', 'no', ',', 'GULPS', 'batteries'], + ...] + >>> pros_cons.words('IntegratedPros.txt') + ['Easy', 'to', 'use', ',', 'economical', '!', ...] + +semcor +------ +The Brown Corpus, annotated with WordNet senses. + + >>> from nltk.corpus import semcor + >>> semcor.words('brown2/tagfiles/br-n12.xml') + ['When', 'several', 'minutes', 'had', 'passed', ...] + +senseval +-------- +The Senseval 2 corpus is a word sense disambiguation corpus. Each +item in the corpus corresponds to a single ambiguous word. For each +of these words, the corpus contains a list of instances, corresponding +to occurrences of that word. Each instance provides the word; a list +of word senses that apply to the word occurrence; and the word's +context. + + >>> from nltk.corpus import senseval + >>> senseval.fileids() + ['hard.pos', 'interest.pos', 'line.pos', 'serve.pos'] + >>> senseval.instances('hard.pos') + ... + [SensevalInstance(word='hard-a', + position=20, + context=[('``', '``'), ('he', 'PRP'), ...('hard', 'JJ'), ...], + senses=('HARD1',)), + SensevalInstance(word='hard-a', + position=10, + context=[('clever', 'NNP'), ...('hard', 'JJ'), ('time', 'NN'), ...], + senses=('HARD1',)), ...] + +The following code looks at instances of the word 'interest', and +displays their local context (2 words on each side) and word sense(s): + + >>> for inst in senseval.instances('interest.pos')[:10]: + ... p = inst.position + ... left = ' '.join(w for (w,t) in inst.context[p-2:p]) + ... word = ' '.join(w for (w,t) in inst.context[p:p+1]) + ... right = ' '.join(w for (w,t) in inst.context[p+1:p+3]) + ... senses = ' '.join(inst.senses) + ... print('%20s |%10s | %-15s -> %s' % (left, word, right, senses)) + declines in | interest | rates . -> interest_6 + indicate declining | interest | rates because -> interest_6 + in short-term | interest | rates . -> interest_6 + 4 % | interest | in this -> interest_5 + company with | interests | in the -> interest_5 + , plus | interest | . -> interest_6 + set the | interest | rate on -> interest_6 + 's own | interest | , prompted -> interest_4 + principal and | interest | is the -> interest_6 + increase its | interest | to 70 -> interest_5 + +sentence_polarity +----------------- +The Sentence Polarity dataset contains 5331 positive and 5331 negative processed +sentences. + + >>> from nltk.corpus import sentence_polarity + >>> sentence_polarity.sents() + [['simplistic', ',', 'silly', 'and', 'tedious', '.'], ["it's", 'so', 'laddish', + 'and', 'juvenile', ',', 'only', 'teenage', 'boys', 'could', 'possibly', 'find', + 'it', 'funny', '.'], ...] + >>> sentence_polarity.categories() + ['neg', 'pos'] + >>> sentence_polarity.sents()[1] + ["it's", 'so', 'laddish', 'and', 'juvenile', ',', 'only', 'teenage', 'boys', + 'could', 'possibly', 'find', 'it', 'funny', '.'] + +shakespeare +----------- +The Shakespeare corpus contains a set of Shakespeare plays, formatted +as XML files. These corpora are returned as ElementTree objects: + + >>> from nltk.corpus import shakespeare + >>> from xml.etree import ElementTree + >>> shakespeare.fileids() + ['a_and_c.xml', 'dream.xml', 'hamlet.xml', 'j_caesar.xml', ...] + >>> play = shakespeare.xml('dream.xml') + >>> print(play) + + >>> print('%s: %s' % (play[0].tag, play[0].text)) + TITLE: A Midsummer Night's Dream + >>> personae = [persona.text for persona in + ... play.findall('PERSONAE/PERSONA')] + >>> print(personae) + ['THESEUS, Duke of Athens.', 'EGEUS, father to Hermia.', ...] + >>> # Find and print speakers not listed as personae + >>> names = [persona.split(',')[0] for persona in personae] + >>> speakers = set(speaker.text for speaker in + ... play.findall('*/*/*/SPEAKER')) + >>> print(sorted(speakers.difference(names))) + ['ALL', 'COBWEB', 'DEMETRIUS', 'Fairy', 'HERNIA', 'LYSANDER', + 'Lion', 'MOTH', 'MUSTARDSEED', 'Moonshine', 'PEASEBLOSSOM', + 'Prologue', 'Pyramus', 'Thisbe', 'Wall'] + +subjectivity +------------ +The Subjectivity Dataset contains 5000 subjective and 5000 objective processed +sentences. + + >>> from nltk.corpus import subjectivity + >>> subjectivity.categories() + ['obj', 'subj'] + >>> subjectivity.sents()[23] + ['television', 'made', 'him', 'famous', ',', 'but', 'his', 'biggest', 'hits', + 'happened', 'off', 'screen', '.'] + >>> subjectivity.words(categories='subj') + ['smart', 'and', 'alert', ',', 'thirteen', ...] + +toolbox +------- +The Toolbox corpus distributed with NLTK contains a sample lexicon and +several sample texts from the Rotokas language. The Toolbox corpus +reader returns Toolbox files as XML ElementTree objects. The +following example loads the Rotokas dictionary, and figures out the +distribution of part-of-speech tags for reduplicated words. + +.. doctest: +SKIP + + >>> from nltk.corpus import toolbox + >>> from nltk.probability import FreqDist + >>> from xml.etree import ElementTree + >>> import re + >>> rotokas = toolbox.xml('rotokas.dic') + >>> redup_pos_freqdist = FreqDist() + >>> # Note: we skip over the first record, which is actually + >>> # the header. + >>> for record in rotokas[1:]: + ... lexeme = record.find('lx').text + ... if re.match(r'(.*)\1$', lexeme): + ... redup_pos_freqdist[record.find('ps').text] += 1 + >>> for item, count in redup_pos_freqdist.most_common(): + ... print(item, count) + V 41 + N 14 + ??? 4 + +This example displays some records from a Rotokas text: + +.. doctest: +SKIP + + >>> river = toolbox.xml('rotokas/river.txt', key='ref') + >>> for record in river.findall('record')[:3]: + ... for piece in record: + ... if len(piece.text) > 60: + ... print('%-6s %s...' % (piece.tag, piece.text[:57])) + ... else: + ... print('%-6s %s' % (piece.tag, piece.text)) + ref Paragraph 1 + t ``Viapau oisio ra ovaupasi ... + m viapau oisio ra ovau -pa -si ... + g NEG this way/like this and forget -PROG -2/3.DL... + p NEG ??? CONJ V.I -SUFF.V.3 -SUFF.V... + f ``No ken lus tingting wanema samting papa i bin tok,'' Na... + fe ``Don't forget what Dad said,'' yelled Naomi. + ref 2 + t Osa Ira ora Reviti viapau uvupasiva. + m osa Ira ora Reviti viapau uvu -pa -si ... + g as/like name and name NEG hear/smell -PROG -2/3... + p CONJ N.PN CONJ N.PN NEG V.T -SUFF.V.3 -SUF... + f Tasol Ila na David no bin harim toktok. + fe But Ila and David took no notice. + ref 3 + t Ikaupaoro rokosiva ... + m ikau -pa -oro roko -si -va ... + g run/hurry -PROG -SIM go down -2/3.DL.M -RP ... + p V.T -SUFF.V.3 -SUFF.V.4 ADV -SUFF.V.4 -SUFF.VT.... + f Tupela i bin hariap i go long wara . + fe They raced to the river. + +timit +----- +The NLTK data package includes a fragment of the TIMIT +Acoustic-Phonetic Continuous Speech Corpus. This corpus is broken +down into small speech samples, each of which is available as a wave +file, a phonetic transcription, and a tokenized word list. + + >>> from nltk.corpus import timit + >>> print(timit.utteranceids()) + ['dr1-fvmh0/sa1', 'dr1-fvmh0/sa2', 'dr1-fvmh0/si1466', + 'dr1-fvmh0/si2096', 'dr1-fvmh0/si836', 'dr1-fvmh0/sx116', + 'dr1-fvmh0/sx206', 'dr1-fvmh0/sx26', 'dr1-fvmh0/sx296', ...] + + >>> item = timit.utteranceids()[5] + >>> print(timit.phones(item)) + ['h#', 'k', 'l', 'ae', 's', 'pcl', 'p', 'dh', 'ax', + 's', 'kcl', 'k', 'r', 'ux', 'ix', 'nx', 'y', 'ax', + 'l', 'eh', 'f', 'tcl', 't', 'hh', 'ae', 'n', 'dcl', + 'd', 'h#'] + >>> print(timit.words(item)) + ['clasp', 'the', 'screw', 'in', 'your', 'left', 'hand'] + >>> timit.play(item) # doctest: +SKIP + +The corpus reader can combine the word segmentation information with +the phonemes to produce a single tree structure: + + >>> for tree in timit.phone_trees(item): + ... print(tree) + (S + h# + (clasp k l ae s pcl p) + (the dh ax) + (screw s kcl k r ux) + (in ix nx) + (your y ax) + (left l eh f tcl t) + (hand hh ae n dcl d) + h#) + +The start time and stop time of each phoneme, word, and sentence are +also available: + + >>> print(timit.phone_times(item)) + [('h#', 0, 2190), ('k', 2190, 3430), ('l', 3430, 4326), ...] + >>> print(timit.word_times(item)) + [('clasp', 2190, 8804), ('the', 8804, 9734), ...] + >>> print(timit.sent_times(item)) + [('Clasp the screw in your left hand.', 0, 32154)] + +We can use these times to play selected pieces of a speech sample: + + >>> timit.play(item, 2190, 8804) # 'clasp' # doctest: +SKIP + +The corpus reader can also be queried for information about the +speaker and sentence identifier for a given speech sample: + + >>> print(timit.spkrid(item)) + dr1-fvmh0 + >>> print(timit.sentid(item)) + sx116 + >>> print(timit.spkrinfo(timit.spkrid(item))) + SpeakerInfo(id='VMH0', + sex='F', + dr='1', + use='TRN', + recdate='03/11/86', + birthdate='01/08/60', + ht='5\'05"', + race='WHT', + edu='BS', + comments='BEST NEW ENGLAND ACCENT SO FAR') + + >>> # List the speech samples from the same speaker: + >>> timit.utteranceids(spkrid=timit.spkrid(item)) + ['dr1-fvmh0/sa1', 'dr1-fvmh0/sa2', 'dr1-fvmh0/si1466', ...] + +twitter_samples +--------------- + +Twitter is well-known microblog service that allows public data to be +collected via APIs. NLTK's twitter corpus currently contains a sample of 20k Tweets +retrieved from the Twitter Streaming API. + + >>> from nltk.corpus import twitter_samples + >>> twitter_samples.fileids() + ['negative_tweets.json', 'positive_tweets.json', 'tweets.20150430-223406.json'] + +We follow standard practice in storing full Tweets as line-separated +JSON. These data structures can be accessed via `tweets.docs()`. However, in general it +is more practical to focus just on the text field of the Tweets, which +are accessed via the `strings()` method. + + >>> twitter_samples.strings('tweets.20150430-223406.json')[:5] + ['RT @KirkKus: Indirect cost of the UK being in the EU is estimated to be costing Britain \xa3170 billion per year! #BetterOffOut #UKIP', ...] + +The default tokenizer for Tweets is specialised for 'casual' text, and +the `tokenized()` method returns a list of lists of tokens. + + >>> twitter_samples.tokenized('tweets.20150430-223406.json')[:5] + [['RT', '@KirkKus', ':', 'Indirect', 'cost', 'of', 'the', 'UK', 'being', 'in', ...], + ['VIDEO', ':', 'Sturgeon', 'on', 'post-election', 'deals', 'http://t.co/BTJwrpbmOY'], ...] + +rte +--- +The RTE (Recognizing Textual Entailment) corpus was derived from the +RTE1, RTE2 and RTE3 datasets (dev and test data), and consists of a +list of XML-formatted 'text'/'hypothesis' pairs. + + >>> from nltk.corpus import rte + >>> print(rte.fileids()) + ['rte1_dev.xml', 'rte1_test.xml', 'rte2_dev.xml', ..., 'rte3_test.xml'] + >>> rtepairs = rte.pairs(['rte2_test.xml', 'rte3_test.xml']) + >>> print(rtepairs) + [, , , ...] + +In the gold standard test sets, each pair is labeled according to +whether or not the text 'entails' the hypothesis; the +entailment value is mapped to an integer 1 (True) or 0 (False). + + >>> rtepairs[5] + + >>> rtepairs[5].text + 'His wife Strida won a seat in parliament after forging an alliance + with the main anti-Syrian coalition in the recent election.' + >>> rtepairs[5].hyp + 'Strida elected to parliament.' + >>> rtepairs[5].value + 1 + +The RTE corpus also supports an ``xml()`` method which produces ElementTrees. + + >>> xmltree = rte.xml('rte3_dev.xml') + >>> xmltree # doctest: +SKIP + + >>> xmltree[7].findtext('t') + "Mrs. Bush's approval ratings have remained very high, above 80%, + even as her husband's have recently dropped below 50%." + +verbnet +------- +The VerbNet corpus is a lexicon that divides verbs into classes, based +on their syntax-semantics linking behavior. The basic elements in the +lexicon are verb lemmas, such as 'abandon' and 'accept', and verb +classes, which have identifiers such as 'remove-10.1' and +'admire-31.2-1'. These class identifiers consist of a representative +verb selected from the class, followed by a numerical identifier. The +list of verb lemmas, and the list of class identifiers, can be +retrieved with the following methods: + + >>> from nltk.corpus import verbnet + >>> verbnet.lemmas()[20:25] + ['accelerate', 'accept', 'acclaim', 'accompany', 'accrue'] + >>> verbnet.classids()[:5] + ['accompany-51.7', 'admire-31.2', 'admire-31.2-1', 'admit-65', 'adopt-93'] + +The `classids()` method may also be used to retrieve the classes that +a given lemma belongs to: + + >>> verbnet.classids('accept') + ['approve-77', 'characterize-29.2-1-1', 'obtain-13.5.2'] + +The `classids()` method may additionally be used to retrieve all classes +within verbnet if nothing is passed: + + >>> verbnet.classids() + ['accompany-51.7', 'admire-31.2', 'admire-31.2-1', 'admit-65', 'adopt-93', 'advise-37.9', 'advise-37.9-1', 'allow-64', 'amalgamate-22.2', 'amalgamate-22.2-1', 'amalgamate-22.2-1-1', 'amalgamate-22.2-2', 'amalgamate-22.2-2-1', 'amalgamate-22.2-3', 'amalgamate-22.2-3-1', 'amalgamate-22.2-3-1-1', 'amalgamate-22.2-3-2', 'amuse-31.1', 'animal_sounds-38', 'appeal-31.4', 'appeal-31.4-1', 'appeal-31.4-2', 'appeal-31.4-3', 'appear-48.1.1', 'appoint-29.1', 'approve-77', 'assessment-34', 'assuming_position-50', 'avoid-52', 'banish-10.2', 'battle-36.4', 'battle-36.4-1', 'begin-55.1', 'begin-55.1-1', 'being_dressed-41.3.3', 'bend-45.2', 'berry-13.7', 'bill-54.5', 'body_internal_motion-49', 'body_internal_states-40.6', 'braid-41.2.2', 'break-45.1', 'breathe-40.1.2', 'breathe-40.1.2-1', 'bring-11.3', 'bring-11.3-1', 'build-26.1', 'build-26.1-1', 'bulge-47.5.3', 'bump-18.4', 'bump-18.4-1', 'butter-9.9', 'calibratable_cos-45.6', 'calibratable_cos-45.6-1', 'calve-28', 'captain-29.8', 'captain-29.8-1', 'captain-29.8-1-1', 'care-88', 'care-88-1', 'carry-11.4', 'carry-11.4-1', 'carry-11.4-1-1', 'carve-21.2', 'carve-21.2-1', 'carve-21.2-2', 'change_bodily_state-40.8.4', 'characterize-29.2', 'characterize-29.2-1', 'characterize-29.2-1-1', 'characterize-29.2-1-2', 'chase-51.6', 'cheat-10.6', 'cheat-10.6-1', 'cheat-10.6-1-1', 'chew-39.2', 'chew-39.2-1', 'chew-39.2-2', 'chit_chat-37.6', 'clear-10.3', 'clear-10.3-1', 'cling-22.5', 'coil-9.6', 'coil-9.6-1', 'coloring-24', 'complain-37.8', 'complete-55.2', 'concealment-16', 'concealment-16-1', 'confess-37.10', 'confine-92', 'confine-92-1', 'conjecture-29.5', 'conjecture-29.5-1', 'conjecture-29.5-2', 'consider-29.9', 'consider-29.9-1', 'consider-29.9-1-1', 'consider-29.9-1-1-1', 'consider-29.9-2', 'conspire-71', 'consume-66', 'consume-66-1', 'contiguous_location-47.8', 'contiguous_location-47.8-1', 'contiguous_location-47.8-2', 'continue-55.3', 'contribute-13.2', 'contribute-13.2-1', 'contribute-13.2-1-1', 'contribute-13.2-1-1-1', 'contribute-13.2-2', 'contribute-13.2-2-1', 'convert-26.6.2', 'convert-26.6.2-1', 'cooking-45.3', 'cooperate-73', 'cooperate-73-1', 'cooperate-73-2', 'cooperate-73-3', 'cope-83', 'cope-83-1', 'cope-83-1-1', 'correlate-86', 'correspond-36.1', 'correspond-36.1-1', 'correspond-36.1-1-1', 'cost-54.2', 'crane-40.3.2', 'create-26.4', 'create-26.4-1', 'curtsey-40.3.3', 'cut-21.1', 'cut-21.1-1', 'debone-10.8', 'declare-29.4', 'declare-29.4-1', 'declare-29.4-1-1', 'declare-29.4-1-1-1', 'declare-29.4-1-1-2', 'declare-29.4-1-1-3', 'declare-29.4-2', 'dedicate-79', 'defend-85', 'destroy-44', 'devour-39.4', 'devour-39.4-1', 'devour-39.4-2', 'differ-23.4', 'dine-39.5', 'disappearance-48.2', 'disassemble-23.3', 'discover-84', 'discover-84-1', 'discover-84-1-1', 'dress-41.1.1', 'dressing_well-41.3.2', 'drive-11.5', 'drive-11.5-1', 'dub-29.3', 'dub-29.3-1', 'eat-39.1', 'eat-39.1-1', 'eat-39.1-2', 'enforce-63', 'engender-27', 'entity_specific_cos-45.5', 'entity_specific_modes_being-47.2', 'equip-13.4.2', 'equip-13.4.2-1', 'equip-13.4.2-1-1', 'escape-51.1', 'escape-51.1-1', 'escape-51.1-2', 'escape-51.1-2-1', 'exceed-90', 'exchange-13.6', 'exchange-13.6-1', 'exchange-13.6-1-1', 'exhale-40.1.3', 'exhale-40.1.3-1', 'exhale-40.1.3-2', 'exist-47.1', 'exist-47.1-1', 'exist-47.1-1-1', 'feeding-39.7', 'ferret-35.6', 'fill-9.8', 'fill-9.8-1', 'fit-54.3', 'flinch-40.5', 'floss-41.2.1', 'focus-87', 'forbid-67', 'force-59', 'force-59-1', 'free-80', 'free-80-1', 'fulfilling-13.4.1', 'fulfilling-13.4.1-1', 'fulfilling-13.4.1-2', 'funnel-9.3', 'funnel-9.3-1', 'funnel-9.3-2', 'funnel-9.3-2-1', 'future_having-13.3', 'get-13.5.1', 'get-13.5.1-1', 'give-13.1', 'give-13.1-1', 'gobble-39.3', 'gobble-39.3-1', 'gobble-39.3-2', 'gorge-39.6', 'groom-41.1.2', 'grow-26.2', 'help-72', 'help-72-1', 'herd-47.5.2', 'hiccup-40.1.1', 'hit-18.1', 'hit-18.1-1', 'hold-15.1', 'hold-15.1-1', 'hunt-35.1', 'hurt-40.8.3', 'hurt-40.8.3-1', 'hurt-40.8.3-1-1', 'hurt-40.8.3-2', 'illustrate-25.3', 'image_impression-25.1', 'indicate-78', 'indicate-78-1', 'indicate-78-1-1', 'inquire-37.1.2', 'instr_communication-37.4', 'investigate-35.4', 'judgement-33', 'keep-15.2', 'knead-26.5', 'learn-14', 'learn-14-1', 'learn-14-2', 'learn-14-2-1', 'leave-51.2', 'leave-51.2-1', 'lecture-37.11', 'lecture-37.11-1', 'lecture-37.11-1-1', 'lecture-37.11-2', 'light_emission-43.1', 'limit-76', 'linger-53.1', 'linger-53.1-1', 'lodge-46', 'long-32.2', 'long-32.2-1', 'long-32.2-2', 'manner_speaking-37.3', 'marry-36.2', 'marvel-31.3', 'marvel-31.3-1', 'marvel-31.3-2', 'marvel-31.3-3', 'marvel-31.3-4', 'marvel-31.3-5', 'marvel-31.3-6', 'marvel-31.3-7', 'marvel-31.3-8', 'marvel-31.3-9', 'masquerade-29.6', 'masquerade-29.6-1', 'masquerade-29.6-2', 'matter-91', 'meander-47.7', 'meet-36.3', 'meet-36.3-1', 'meet-36.3-2', 'mine-10.9', 'mix-22.1', 'mix-22.1-1', 'mix-22.1-1-1', 'mix-22.1-2', 'mix-22.1-2-1', 'modes_of_being_with_motion-47.3', 'murder-42.1', 'murder-42.1-1', 'neglect-75', 'neglect-75-1', 'neglect-75-1-1', 'neglect-75-2', 'nonvehicle-51.4.2', 'nonverbal_expression-40.2', 'obtain-13.5.2', 'obtain-13.5.2-1', 'occurrence-48.3', 'order-60', 'order-60-1', 'orphan-29.7', 'other_cos-45.4', 'pain-40.8.1', 'pay-68', 'peer-30.3', 'pelt-17.2', 'performance-26.7', 'performance-26.7-1', 'performance-26.7-1-1', 'performance-26.7-2', 'performance-26.7-2-1', 'pit-10.7', 'pocket-9.10', 'pocket-9.10-1', 'poison-42.2', 'poke-19', 'pour-9.5', 'preparing-26.3', 'preparing-26.3-1', 'preparing-26.3-2', 'price-54.4', 'push-12', 'push-12-1', 'push-12-1-1', 'put-9.1', 'put-9.1-1', 'put-9.1-2', 'put_direction-9.4', 'put_spatial-9.2', 'put_spatial-9.2-1', 'reach-51.8', 'reflexive_appearance-48.1.2', 'refrain-69', 'register-54.1', 'rely-70', 'remove-10.1', 'risk-94', 'risk-94-1', 'roll-51.3.1', 'rummage-35.5', 'run-51.3.2', 'rush-53.2', 'say-37.7', 'say-37.7-1', 'say-37.7-1-1', 'say-37.7-2', 'scribble-25.2', 'search-35.2', 'see-30.1', 'see-30.1-1', 'see-30.1-1-1', 'send-11.1', 'send-11.1-1', 'separate-23.1', 'separate-23.1-1', 'separate-23.1-2', 'settle-89', 'shake-22.3', 'shake-22.3-1', 'shake-22.3-1-1', 'shake-22.3-2', 'shake-22.3-2-1', 'sight-30.2', 'simple_dressing-41.3.1', 'slide-11.2', 'slide-11.2-1-1', 'smell_emission-43.3', 'snooze-40.4', 'sound_emission-43.2', 'sound_existence-47.4', 'spank-18.3', 'spatial_configuration-47.6', 'split-23.2', 'spray-9.7', 'spray-9.7-1', 'spray-9.7-1-1', 'spray-9.7-2', 'stalk-35.3', 'steal-10.5', 'stimulus_subject-30.4', 'stop-55.4', 'stop-55.4-1', 'substance_emission-43.4', 'succeed-74', 'succeed-74-1', 'succeed-74-1-1', 'succeed-74-2', 'suffocate-40.7', 'suspect-81', 'swarm-47.5.1', 'swarm-47.5.1-1', 'swarm-47.5.1-2', 'swarm-47.5.1-2-1', 'swat-18.2', 'talk-37.5', 'tape-22.4', 'tape-22.4-1', 'tell-37.2', 'throw-17.1', 'throw-17.1-1', 'throw-17.1-1-1', 'tingle-40.8.2', 'touch-20', 'touch-20-1', 'transcribe-25.4', 'transfer_mesg-37.1.1', 'transfer_mesg-37.1.1-1', 'transfer_mesg-37.1.1-1-1', 'try-61', 'turn-26.6.1', 'turn-26.6.1-1', 'urge-58', 'vehicle-51.4.1', 'vehicle-51.4.1-1', 'waltz-51.5', 'want-32.1', 'want-32.1-1', 'want-32.1-1-1', 'weather-57', 'weekend-56', 'wink-40.3.1', 'wink-40.3.1-1', 'wipe_instr-10.4.2', 'wipe_instr-10.4.2-1', 'wipe_manner-10.4.1', 'wipe_manner-10.4.1-1', 'wish-62', 'withdraw-82', 'withdraw-82-1', 'withdraw-82-2', 'withdraw-82-3'] + +The primary object in the lexicon is a class record, which is stored +as an ElementTree xml object. The class record for a given class +identifier is returned by the `vnclass()` method: + + >>> verbnet.vnclass('remove-10.1') + + +The `vnclass()` method also accepts "short" identifiers, such as '10.1': + + >>> verbnet.vnclass('10.1') + + +See the Verbnet documentation, or the Verbnet files, for information +about the structure of this xml. As an example, we can retrieve a +list of thematic roles for a given Verbnet class: + + >>> vn_31_2 = verbnet.vnclass('admire-31.2') + >>> for themrole in vn_31_2.findall('THEMROLES/THEMROLE'): + ... print(themrole.attrib['type'], end=' ') + ... for selrestr in themrole.findall('SELRESTRS/SELRESTR'): + ... print('[%(Value)s%(type)s]' % selrestr.attrib, end=' ') + ... print() + Theme + Experiencer [+animate] + Predicate + +The Verbnet corpus also provides a variety of pretty printing +functions that can be used to display the xml contents in a more +concise form. The simplest such method is `pprint()`: + + >>> print(verbnet.pprint('57')) + weather-57 + Subclasses: (none) + Members: blow clear drizzle fog freeze gust hail howl lightning mist + mizzle pelt pour precipitate rain roar shower sleet snow spit spot + sprinkle storm swelter teem thaw thunder + Thematic roles: + * Theme[+concrete +force] + Frames: + Intransitive (Expletive Subject) + Example: It's raining. + Syntax: LEX[it] LEX[[+be]] VERB + Semantics: + * weather(during(E), Weather_type, ?Theme) + NP (Expletive Subject, Theme Object) + Example: It's raining cats and dogs. + Syntax: LEX[it] LEX[[+be]] VERB NP[Theme] + Semantics: + * weather(during(E), Weather_type, Theme) + PP (Expletive Subject, Theme-PP) + Example: It was pelting with rain. + Syntax: LEX[it[+be]] VERB PREP[with] NP[Theme] + Semantics: + * weather(during(E), Weather_type, Theme) + +Verbnet gives us frames that link the syntax and semantics using an example. +These frames are part of the corpus and we can use `frames()` to get a frame +for a given verbnet class. + + >>> frame = verbnet.frames('57') + >>> frame == [{'example': "It's raining.", 'description': {'primary': 'Intransitive', 'secondary': 'Expletive Subject'}, 'syntax': [{'pos_tag': 'LEX', 'modifiers': {'value': 'it', 'selrestrs': [], 'synrestrs': []}}, {'pos_tag': 'LEX', 'modifiers': {'value': '[+be]', 'selrestrs': [], 'synrestrs': []}}, {'pos_tag': 'VERB', 'modifiers': {'value': '', 'selrestrs': [], 'synrestrs': []}}], 'semantics': [{'predicate_value': 'weather', 'arguments': [{'type': 'Event', 'value': 'during(E)'}, {'type': 'VerbSpecific', 'value': 'Weather_type'}, {'type': 'ThemRole', 'value': '?Theme'}], 'negated': False}]}, {'example': "It's raining cats and dogs.", 'description': {'primary': 'NP', 'secondary': 'Expletive Subject, Theme Object'}, 'syntax': [{'pos_tag': 'LEX', 'modifiers': {'value': 'it', 'selrestrs': [], 'synrestrs': []}}, {'pos_tag': 'LEX', 'modifiers': {'value': '[+be]', 'selrestrs': [], 'synrestrs': []}}, {'pos_tag': 'VERB', 'modifiers': {'value': '', 'selrestrs': [], 'synrestrs': []}}, {'pos_tag': 'NP', 'modifiers': {'value': 'Theme', 'selrestrs': [], 'synrestrs': []}}], 'semantics': [{'predicate_value': 'weather', 'arguments': [{'type': 'Event', 'value': 'during(E)'}, {'type': 'VerbSpecific', 'value': 'Weather_type'}, {'type': 'ThemRole', 'value': 'Theme'}], 'negated': False}]}, {'example': 'It was pelting with rain.', 'description': {'primary': 'PP', 'secondary': 'Expletive Subject, Theme-PP'}, 'syntax': [{'pos_tag': 'LEX', 'modifiers': {'value': 'it[+be]', 'selrestrs': [], 'synrestrs': []}}, {'pos_tag': 'VERB', 'modifiers': {'value': '', 'selrestrs': [], 'synrestrs': []}}, {'pos_tag': 'PREP', 'modifiers': {'value': 'with', 'selrestrs': [], 'synrestrs': []}}, {'pos_tag': 'NP', 'modifiers': {'value': 'Theme', 'selrestrs': [], 'synrestrs': []}}], 'semantics': [{'predicate_value': 'weather', 'arguments': [{'type': 'Event', 'value': 'during(E)'}, {'type': 'VerbSpecific', 'value': 'Weather_type'}, {'type': 'ThemRole', 'value': 'Theme'}], 'negated': False}]}] + True + +Verbnet corpus lets us access thematic roles individually using `themroles()`. + + >>> themroles = verbnet.themroles('57') + >>> themroles == [{'modifiers': [{'type': 'concrete', 'value': '+'}, {'type': 'force', 'value': '+'}], 'type': 'Theme'}] + True + +Verbnet classes may also have subclasses sharing similar syntactic and semantic properties +while having differences with the superclass. The Verbnet corpus allows us to access these +subclasses using `subclasses()`. + + >>> print(verbnet.subclasses('9.1')) #Testing for 9.1 since '57' does not have subclasses + ['put-9.1-1', 'put-9.1-2'] + + +nps_chat +-------- + +The NPS Chat Corpus, Release 1.0 consists of over 10,000 posts in age-specific +chat rooms, which have been anonymized, POS-tagged and dialogue-act tagged. + + >>> print(nltk.corpus.nps_chat.words()) + ['now', 'im', 'left', 'with', 'this', 'gay', ...] + >>> print(nltk.corpus.nps_chat.tagged_words()) + [('now', 'RB'), ('im', 'PRP'), ('left', 'VBD'), ...] + >>> print(nltk.corpus.nps_chat.tagged_posts()) + [[('now', 'RB'), ('im', 'PRP'), ('left', 'VBD'), ('with', 'IN'), + ('this', 'DT'), ('gay', 'JJ'), ('name', 'NN')], [(':P', 'UH')], ...] + +We can access the XML elements corresponding to individual posts. These elements +have ``class`` and ``user`` attributes that we can access using ``p.attrib['class']`` +and ``p.attrib['user']``. They also have text content, accessed using ``p.text``. + + >>> print(nltk.corpus.nps_chat.xml_posts()) + [, , ...] + >>> posts = nltk.corpus.nps_chat.xml_posts() + >>> sorted(nltk.FreqDist(p.attrib['class'] for p in posts).keys()) + ['Accept', 'Bye', 'Clarify', 'Continuer', 'Emotion', 'Emphasis', + 'Greet', 'Other', 'Reject', 'Statement', 'System', 'nAnswer', + 'whQuestion', 'yAnswer', 'ynQuestion'] + >>> posts[0].text + 'now im left with this gay name' + +In addition to the above methods for accessing tagged text, we can navigate +the XML structure directly, as follows: + + >>> tokens = posts[0].findall('terminals/t') + >>> [t.attrib['pos'] + "/" + t.attrib['word'] for t in tokens] + ['RB/now', 'PRP/im', 'VBD/left', 'IN/with', 'DT/this', 'JJ/gay', 'NN/name'] + +multext_east +------------ + +The Multext-East Corpus consists of POS-tagged versions of George Orwell's book +1984 in 12 languages: English, Czech, Hungarian, Macedonian, Slovenian, Serbian, +Slovak, Romanian, Estonian, Farsi, Bulgarian and Polish. +The corpus can be accessed using the usual methods for tagged corpora. The tagset +can be transformed from the Multext-East specific MSD tags to the Universal tagset +using the "tagset" parameter of all functions returning tagged parts of the corpus. + + >>> print(nltk.corpus.multext_east.words("oana-en.xml")) + ['It', 'was', 'a', 'bright', ...] + >>> print(nltk.corpus.multext_east.tagged_words("oana-en.xml")) + [('It', '#Pp3ns'), ('was', '#Vmis3s'), ('a', '#Di'), ...] + >>> print(nltk.corpus.multext_east.tagged_sents("oana-en.xml", "universal")) + [[('It', 'PRON'), ('was', 'VERB'), ('a', 'DET'), ...] + + + +--------------------- +Corpus Reader Classes +--------------------- + +NLTK's *corpus reader* classes are used to access the contents of a +diverse set of corpora. Each corpus reader class is specialized to +handle a specific corpus format. Examples include the +`PlaintextCorpusReader`, which handles corpora that consist of a set +of unannotated text files, and the `BracketParseCorpusReader`, which +handles corpora that consist of files containing +parenthesis-delineated parse trees. + +Automatically Created Corpus Reader Instances +============================================= + +When the `nltk.corpus` module is imported, it automatically creates a +set of corpus reader instances that can be used to access the corpora +in the NLTK data distribution. Here is a small sample of those +corpus reader instances: + + >>> import nltk + >>> nltk.corpus.brown + + >>> nltk.corpus.treebank + + >>> nltk.corpus.names + + >>> nltk.corpus.genesis + + >>> nltk.corpus.inaugural + + +This sample illustrates that different corpus reader classes are used +to read different corpora; but that the same corpus reader class may +be used for more than one corpus (e.g., ``genesis`` and ``inaugural``). + +Creating New Corpus Reader Instances +==================================== + +Although the `nltk.corpus` module automatically creates corpus reader +instances for the corpora in the NLTK data distribution, you may +sometimes need to create your own corpus reader. In particular, you +would need to create your own corpus reader if you want... + +- To access a corpus that is not included in the NLTK data + distribution. + +- To access a full copy of a corpus for which the NLTK data + distribution only provides a sample. + +- To access a corpus using a customized corpus reader (e.g., with + a customized tokenizer). + +To create a new corpus reader, you will first need to look up the +signature for that corpus reader's constructor. Different corpus +readers have different constructor signatures, but most of the +constructor signatures have the basic form:: + + SomeCorpusReader(root, files, ...options...) + +Where ``root`` is an absolute path to the directory containing the +corpus data files; ``files`` is either a list of file names (relative +to ``root``) or a regexp specifying which files should be included; +and ``options`` are additional reader-specific options. For example, +we can create a customized corpus reader for the genesis corpus that +uses a different sentence tokenizer as follows: + + >>> # Find the directory where the corpus lives. + >>> genesis_dir = nltk.data.find('corpora/genesis') + >>> # Create our custom sentence tokenizer. + >>> my_sent_tokenizer = nltk.RegexpTokenizer('[^.!?]+') + >>> # Create the new corpus reader object. + >>> my_genesis = nltk.corpus.PlaintextCorpusReader( + ... genesis_dir, r'.*\.txt', sent_tokenizer=my_sent_tokenizer) + >>> # Use the new corpus reader object. + >>> print(my_genesis.sents('english-kjv.txt')[0]) + ['In', 'the', 'beginning', 'God', 'created', 'the', 'heaven', + 'and', 'the', 'earth'] + +If you wish to read your own plaintext corpus, which is stored in the +directory '/usr/share/some-corpus', then you can create a corpus +reader for it with:: + + >>> my_corpus = nltk.corpus.PlaintextCorpusReader( + ... '/usr/share/some-corpus', r'.*\.txt') # doctest: +SKIP + +For a complete list of corpus reader subclasses, see the API +documentation for `nltk.corpus.reader`. + +Corpus Types +============ + +Corpora vary widely in the types of content they include. This is +reflected in the fact that the base class `CorpusReader` only defines +a few general-purpose methods for listing and accessing the files that +make up a corpus. It is up to the subclasses to define *data access +methods* that provide access to the information in the corpus. +However, corpus reader subclasses should be consistent in their +definitions of these data access methods wherever possible. + +At a high level, corpora can be divided into three basic types: + +- A *token corpus* contains information about specific occurrences of + language use (or linguistic tokens), such as dialogues or written + texts. Examples of token corpora are collections of written text + and collections of speech. + +- A *type corpus*, or *lexicon*, contains information about a coherent + set of lexical items (or linguistic types). Examples of lexicons + are dictionaries and word lists. + +- A *language description corpus* contains information about a set of + non-lexical linguistic constructs, such as grammar rules. + +However, many individual corpora blur the distinctions between these +types. For example, corpora that are primarily lexicons may include +token data in the form of example sentences; and corpora that are +primarily token corpora may be accompanied by one or more word lists +or other lexical data sets. + +Because corpora vary so widely in their information content, we have +decided that it would not be wise to use separate corpus reader base +classes for different corpus types. Instead, we simply try to make +the corpus readers consistent wherever possible, but let them differ +where the underlying data itself differs. + +Common Corpus Reader Methods +============================ + +As mentioned above, there are only a handful of methods that all +corpus readers are guaranteed to implement. These methods provide +access to the files that contain the corpus data. Every corpus is +assumed to consist of one or more files, all located in a common root +directory (or in subdirectories of that root directory). The absolute +path to the root directory is stored in the ``root`` property: + + >>> import os + >>> str(nltk.corpus.genesis.root).replace(os.path.sep,'/') + '.../nltk_data/corpora/genesis' + +Each file within the corpus is identified by a platform-independent +identifier, which is basically a path string that uses ``/`` as the +path separator. I.e., this identifier can be converted to a relative +path as follows: + + >>> some_corpus_file_id = nltk.corpus.reuters.fileids()[0] + >>> import os.path + >>> os.path.normpath(some_corpus_file_id).replace(os.path.sep,'/') + 'test/14826' + +To get a list of all data files that make up a corpus, use the +``fileids()`` method. In some corpora, these files will not all contain +the same type of data; for example, for the ``nltk.corpus.timit`` +corpus, ``fileids()`` will return a list including text files, word +segmentation files, phonetic transcription files, sound files, and +metadata files. For corpora with diverse file types, the ``fileids()`` +method will often take one or more optional arguments, which can be +used to get a list of the files with a specific file type: + + >>> nltk.corpus.timit.fileids() + ['dr1-fvmh0/sa1.phn', 'dr1-fvmh0/sa1.txt', 'dr1-fvmh0/sa1.wav', ...] + >>> nltk.corpus.timit.fileids('phn') + ['dr1-fvmh0/sa1.phn', 'dr1-fvmh0/sa2.phn', 'dr1-fvmh0/si1466.phn', ...] + +In some corpora, the files are divided into distinct categories. For +these corpora, the ``fileids()`` method takes an optional argument, +which can be used to get a list of the files within a specific category: + + >>> nltk.corpus.brown.fileids('hobbies') + ['ce01', 'ce02', 'ce03', 'ce04', 'ce05', 'ce06', 'ce07', ...] + +The ``abspath()`` method can be used to find the absolute path to a +corpus file, given its file identifier: + + >>> str(nltk.corpus.brown.abspath('ce06')).replace(os.path.sep,'/') + '.../corpora/brown/ce06' + +The ``abspaths()`` method can be used to find the absolute paths for +one corpus file, a list of corpus files, or (if no fileids are specified), +all corpus files. + +This method is mainly useful as a helper method when defining corpus +data access methods, since data access methods can usually be called +with a string argument (to get a view for a specific file), with a +list argument (to get a view for a specific list of files), or with no +argument (to get a view for the whole corpus). + +Data Access Methods +=================== + +Individual corpus reader subclasses typically extend this basic set of +file-access methods with one or more *data access methods*, which provide +easy access to the data contained in the corpus. The signatures for +data access methods often have the basic form:: + + corpus_reader.some_data access(fileids=None, ...options...) + +Where ``fileids`` can be a single file identifier string (to get a view +for a specific file); a list of file identifier strings (to get a view +for a specific list of files); or None (to get a view for the entire +corpus). Some of the common data access methods, and their return +types, are: + + - I{corpus}.words(): list of str + - I{corpus}.sents(): list of (list of str) + - I{corpus}.paras(): list of (list of (list of str)) + - I{corpus}.tagged_words(): list of (str,str) tuple + - I{corpus}.tagged_sents(): list of (list of (str,str)) + - I{corpus}.tagged_paras(): list of (list of (list of (str,str))) + - I{corpus}.chunked_sents(): list of (Tree w/ (str,str) leaves) + - I{corpus}.parsed_sents(): list of (Tree with str leaves) + - I{corpus}.parsed_paras(): list of (list of (Tree with str leaves)) + - I{corpus}.xml(): A single xml ElementTree + - I{corpus}.raw(): str (unprocessed corpus contents) + +For example, the `words()` method is supported by many different +corpora, and returns a flat list of word strings: + + >>> nltk.corpus.brown.words() + ['The', 'Fulton', 'County', 'Grand', 'Jury', ...] + >>> nltk.corpus.treebank.words() + ['Pierre', 'Vinken', ',', '61', 'years', 'old', ...] + >>> nltk.corpus.conll2002.words() + ['Sao', 'Paulo', '(', 'Brasil', ')', ',', '23', ...] + >>> nltk.corpus.genesis.words() + ['In', 'the', 'beginning', 'God', 'created', ...] + +On the other hand, the `tagged_words()` method is only supported by +corpora that include part-of-speech annotations: + + >>> nltk.corpus.brown.tagged_words() + [('The', 'AT'), ('Fulton', 'NP-TL'), ...] + >>> nltk.corpus.treebank.tagged_words() + [('Pierre', 'NNP'), ('Vinken', 'NNP'), ...] + >>> nltk.corpus.conll2002.tagged_words() + [('Sao', 'NC'), ('Paulo', 'VMI'), ('(', 'Fpa'), ...] + >>> nltk.corpus.genesis.tagged_words() + Traceback (most recent call last): + ... + AttributeError: 'PlaintextCorpusReader' object has no attribute 'tagged_words' + +Although most corpus readers use file identifiers to index their +content, some corpora use different identifiers instead. For example, +the data access methods for the ``timit`` corpus uses *utterance +identifiers* to select which corpus items should be returned: + + >>> nltk.corpus.timit.utteranceids() + ['dr1-fvmh0/sa1', 'dr1-fvmh0/sa2', 'dr1-fvmh0/si1466', ...] + >>> nltk.corpus.timit.words('dr1-fvmh0/sa2') + ["don't", 'ask', 'me', 'to', 'carry', 'an', 'oily', 'rag', 'like', 'that'] + +Attempting to call ``timit``\ 's data access methods with a file +identifier will result in an exception: + + >>> nltk.corpus.timit.fileids() + ['dr1-fvmh0/sa1.phn', 'dr1-fvmh0/sa1.txt', 'dr1-fvmh0/sa1.wav', ...] + >>> nltk.corpus.timit.words('dr1-fvmh0/sa1.txt') # doctest: +SKIP + Traceback (most recent call last): + ... + IOError: No such file or directory: '.../dr1-fvmh0/sa1.txt.wrd' + +As another example, the ``propbank`` corpus defines the ``roleset()`` +method, which expects a roleset identifier, not a file identifier: + + >>> roleset = nltk.corpus.propbank.roleset('eat.01') + >>> from xml.etree import ElementTree as ET + >>> print(ET.tostring(roleset).decode('utf8')) + + + ...... + ... + ... + +Stream Backed Corpus Views +========================== +An important feature of NLTK's corpus readers is that many of them +access the underlying data files using "corpus views." A *corpus +view* is an object that acts like a simple data structure (such as a +list), but does not store the data elements in memory; instead, data +elements are read from the underlying data files on an as-needed +basis. + +By only loading items from the file on an as-needed basis, corpus +views maintain both memory efficiency and responsiveness. The memory +efficiency of corpus readers is important because some corpora contain +very large amounts of data, and storing the entire data set in memory +could overwhelm many machines. The responsiveness is important when +experimenting with corpora in interactive sessions and in in-class +demonstrations. + +The most common corpus view is the `StreamBackedCorpusView`, which +acts as a read-only list of tokens. Two additional corpus view +classes, `ConcatenatedCorpusView` and `LazySubsequence`, make it +possible to create concatenations and take slices of +`StreamBackedCorpusView` objects without actually storing the +resulting list-like object's elements in memory. + +In the future, we may add additional corpus views that act like other +basic data structures, such as dictionaries. + +Writing New Corpus Readers +========================== + +In order to add support for new corpus formats, it is necessary to +define new corpus reader classes. For many corpus formats, writing +new corpus readers is relatively straight-forward. In this section, +we'll describe what's involved in creating a new corpus reader. If +you do create a new corpus reader, we encourage you to contribute it +back to the NLTK project. + +Don't Reinvent the Wheel +------------------------ +Before you start writing a new corpus reader, you should check to be +sure that the desired format can't be read using an existing corpus +reader with appropriate constructor arguments. For example, although +the `TaggedCorpusReader` assumes that words and tags are separated by +``/`` characters by default, an alternative tag-separation character +can be specified via the ``sep`` constructor argument. You should +also check whether the new corpus format can be handled by subclassing +an existing corpus reader, and tweaking a few methods or variables. + +Design +------ +If you decide to write a new corpus reader from scratch, then you +should first decide which data access methods you want the reader to +provide, and what their signatures should be. You should look at +existing corpus readers that process corpora with similar data +contents, and try to be consistent with those corpus readers whenever +possible. + +You should also consider what sets of identifiers are appropriate for +the corpus format. Where it's practical, file identifiers should be +used. However, for some corpora, it may make sense to use additional +sets of identifiers. Each set of identifiers should have a distinct +name (e.g., fileids, utteranceids, rolesets); and you should be consistent +in using that name to refer to that identifier. Do not use parameter +names like ``id``, which leave it unclear what type of identifier is +required. + +Once you've decided what data access methods and identifiers are +appropriate for your corpus, you should decide if there are any +customizable parameters that you'd like the corpus reader to handle. +These parameters make it possible to use a single corpus reader to +handle a wider variety of corpora. The ``sep`` argument for +`TaggedCorpusReader`, mentioned above, is an example of a customizable +corpus reader parameter. + +Implementation +-------------- + +Constructor +~~~~~~~~~~~ +If your corpus reader implements any customizable parameters, then +you'll need to override the constructor. Typically, the new +constructor will first call its base class's constructor, and then +store the customizable parameters. For example, the +`ConllChunkCorpusReader`\ 's constructor is defined as follows: + + >>> def __init__(self, root, fileids, chunk_types, encoding='utf8', + ... tagset=None, separator=None): + ... ConllCorpusReader.__init__( + ... self, root, fileids, ('words', 'pos', 'chunk'), + ... chunk_types=chunk_types, encoding=encoding, + ... tagset=tagset, separator=separator) + +If your corpus reader does not implement any customization parameters, +then you can often just inherit the base class's constructor. + +Data Access Methods +~~~~~~~~~~~~~~~~~~~ + +The most common type of data access method takes an argument +identifying which files to access, and returns a view covering those +files. This argument may be a single file identifier string (to get a +view for a specific file); a list of file identifier strings (to get a +view for a specific list of files); or None (to get a view for the +entire corpus). The method's implementation converts this argument to +a list of path names using the `abspaths()` method, which handles all +three value types (string, list, and None): + + >>> print(str(nltk.corpus.brown.abspaths()).replace('\\\\','/')) + [FileSystemPathPointer('.../corpora/brown/ca01'), + FileSystemPathPointer('.../corpora/brown/ca02'), ...] + >>> print(str(nltk.corpus.brown.abspaths('ce06')).replace('\\\\','/')) + [FileSystemPathPointer('.../corpora/brown/ce06')] + >>> print(str(nltk.corpus.brown.abspaths(['ce06', 'ce07'])).replace('\\\\','/')) + [FileSystemPathPointer('.../corpora/brown/ce06'), + FileSystemPathPointer('.../corpora/brown/ce07')] + +An example of this type of method is the `words()` method, defined by +the `PlaintextCorpusReader` as follows: + + >>> def words(self, fileids=None): + ... return concat([self.CorpusView(fileid, self._read_word_block) + ... for fileid in self.abspaths(fileids)]) + +This method first uses `abspaths()` to convert ``fileids`` to a list of +absolute paths. It then creates a corpus view for each file, using +the `PlaintextCorpusReader._read_word_block()` method to read elements +from the data file (see the discussion of corpus views below). +Finally, it combines these corpus views using the +`nltk.corpus.reader.util.concat()` function. + +When writing a corpus reader for a corpus that is never expected to be +very large, it can sometimes be appropriate to read the files +directly, rather than using a corpus view. For example, the +`WordListCorpusView` class defines its `words()` method as follows: + + >>> def words(self, fileids=None): + ... return concat([[w for w in open(fileid).read().split('\n') if w] + ... for fileid in self.abspaths(fileids)]) + +(This is usually more appropriate for lexicons than for token corpora.) + +If the type of data returned by a data access method is one for which +NLTK has a conventional representation (e.g., words, tagged words, and +parse trees), then you should use that representation. Otherwise, you +may find it necessary to define your own representation. For data +structures that are relatively corpus-specific, it's usually best to +define new classes for these elements. For example, the ``propbank`` +corpus defines the `PropbankInstance` class to store the semantic role +labeling instances described by the corpus; and the ``ppattach`` +corpus defines the `PPAttachment` class to store the prepositional +attachment instances described by the corpus. + +Corpus Views +~~~~~~~~~~~~ +.. (Much of the content for this section is taken from the + StreamBackedCorpusView docstring.) + +The heart of a `StreamBackedCorpusView` is its *block reader* +function, which reads zero or more tokens from a stream, and returns +them as a list. A very simple example of a block reader is: + + >>> def simple_block_reader(stream): + ... return stream.readline().split() + +This simple block reader reads a single line at a time, and returns a +single token (consisting of a string) for each whitespace-separated +substring on the line. A `StreamBackedCorpusView` built from this +block reader will act like a read-only list of all the +whitespace-separated tokens in an underlying file. + +When deciding how to define the block reader for a given corpus, +careful consideration should be given to the size of blocks handled by +the block reader. Smaller block sizes will increase the memory +requirements of the corpus view's internal data structures (by 2 +integers per block). On the other hand, larger block sizes may +decrease performance for random access to the corpus. (But note that +larger block sizes will *not* decrease performance for iteration.) + +Internally, the `StreamBackedCorpusView` class maintains a partial +mapping from token index to file position, with one entry per block. +When a token with a given index *i* is requested, the corpus view +constructs it as follows: + +1. First, it searches the toknum/filepos mapping for the token index + closest to (but less than or equal to) *i*. + +2. Then, starting at the file position corresponding to that index, it + reads one block at a time using the block reader until it reaches + the requested token. + +The toknum/filepos mapping is created lazily: it is initially empty, +but every time a new block is read, the block's initial token is added +to the mapping. (Thus, the toknum/filepos map has one entry per +block.) + +You can create your own corpus view in one of two ways: + +1. Call the `StreamBackedCorpusView` constructor, and provide your + block reader function via the ``block_reader`` argument. + +2. Subclass `StreamBackedCorpusView`, and override the + `read_block()` method. + +The first option is usually easier, but the second option can allow +you to write a single `read_block` method whose behavior can be +customized by different parameters to the subclass's constructor. For +an example of this design pattern, see the `TaggedCorpusView` class, +which is used by `TaggedCorpusView`. + +---------------- +Regression Tests +---------------- + +The following helper functions are used to create and then delete +testing corpora that are stored in temporary directories. These +testing corpora are used to make sure the readers work correctly. + + >>> import tempfile, os.path, textwrap + >>> def make_testcorpus(ext='', **fileids): + ... root = tempfile.mkdtemp() + ... for fileid, contents in fileids.items(): + ... fileid += ext + ... f = open(os.path.join(root, fileid), 'w') + ... f.write(textwrap.dedent(contents)) + ... f.close() + ... return root + >>> def del_testcorpus(root): + ... for fileid in os.listdir(root): + ... os.remove(os.path.join(root, fileid)) + ... os.rmdir(root) + +Plaintext Corpus Reader +======================= +The plaintext corpus reader is used to access corpora that consist of +unprocessed plaintext data. It assumes that paragraph breaks are +indicated by blank lines. Sentences and words can be tokenized using +the default tokenizers, or by custom tokenizers specified as +parameters to the constructor. + + >>> root = make_testcorpus(ext='.txt', + ... a="""\ + ... This is the first sentence. Here is another + ... sentence! And here's a third sentence. + ... + ... This is the second paragraph. Tokenization is currently + ... fairly simple, so the period in Mr. gets tokenized. + ... """, + ... b="""This is the second file.""") + + >>> from nltk.corpus.reader.plaintext import PlaintextCorpusReader + +The list of documents can be specified explicitly, or implicitly (using a +regexp). The ``ext`` argument specifies a file extension. + + >>> corpus = PlaintextCorpusReader(root, ['a.txt', 'b.txt']) + >>> corpus.fileids() + ['a.txt', 'b.txt'] + >>> corpus = PlaintextCorpusReader(root, r'.*\.txt') + >>> corpus.fileids() + ['a.txt', 'b.txt'] + +The directory containing the corpus is corpus.root: + + >>> str(corpus.root) == str(root) + True + +We can get a list of words, or the raw string: + + >>> corpus.words() + ['This', 'is', 'the', 'first', 'sentence', '.', ...] + >>> corpus.raw()[:40] + 'This is the first sentence. Here is ano' + +Check that reading individual documents works, and reading all documents at +once works: + + >>> len(corpus.words()), [len(corpus.words(d)) for d in corpus.fileids()] + (46, [40, 6]) + >>> corpus.words('a.txt') + ['This', 'is', 'the', 'first', 'sentence', '.', ...] + >>> corpus.words('b.txt') + ['This', 'is', 'the', 'second', 'file', '.'] + >>> corpus.words()[:4], corpus.words()[-4:] + (['This', 'is', 'the', 'first'], ['the', 'second', 'file', '.']) + +We're done with the test corpus: + + >>> del_testcorpus(root) + +Test the plaintext corpora that come with nltk: + + >>> from nltk.corpus import abc, genesis, inaugural + >>> from nltk.corpus import state_union, webtext + >>> for corpus in (abc, genesis, inaugural, state_union, + ... webtext): + ... print(str(corpus).replace('\\\\','/')) + ... print(' ', repr(corpus.fileids())[:60]) + ... print(' ', repr(corpus.words()[:10])[:60]) + + ['rural.txt', 'science.txt'] + ['PM', 'denies', 'knowledge', 'of', 'AWB', ... + + ['english-kjv.txt', 'english-web.txt', 'finnish.txt', ... + ['In', 'the', 'beginning', 'God', 'created', 'the', ... + + ['1789-Washington.txt', '1793-Washington.txt', ... + ['Fellow', '-', 'Citizens', 'of', 'the', 'Senate', ... + + ['1945-Truman.txt', '1946-Truman.txt', ... + ['PRESIDENT', 'HARRY', 'S', '.', 'TRUMAN', "'", ... + + ['firefox.txt', 'grail.txt', 'overheard.txt', ... + ['Cookie', 'Manager', ':', '"', 'Don', "'", 't', ... + + +Tagged Corpus Reader +==================== +The Tagged Corpus reader can give us words, sentences, and paragraphs, +each tagged or untagged. All of the read methods can take one item +(in which case they return the contents of that file) or a list of +documents (in which case they concatenate the contents of those files). +By default, they apply to all documents in the corpus. + + >>> root = make_testcorpus( + ... a="""\ + ... This/det is/verb the/det first/adj sentence/noun ./punc + ... Here/det is/verb another/adj sentence/noun ./punc + ... Note/verb that/comp you/pron can/verb use/verb \ + ... any/noun tag/noun set/noun + ... + ... This/det is/verb the/det second/adj paragraph/noun ./punc + ... word/n without/adj a/det tag/noun :/: hello ./punc + ... """, + ... b="""\ + ... This/det is/verb the/det second/adj file/noun ./punc + ... """) + + >>> from nltk.corpus.reader.tagged import TaggedCorpusReader + >>> corpus = TaggedCorpusReader(root, list('ab')) + >>> corpus.fileids() + ['a', 'b'] + >>> str(corpus.root) == str(root) + True + >>> corpus.words() + ['This', 'is', 'the', 'first', 'sentence', '.', ...] + >>> corpus.sents() + [['This', 'is', 'the', 'first', ...], ['Here', 'is', 'another'...], ...] + >>> corpus.paras() + [[['This', ...], ['Here', ...], ...], [['This', ...], ...], ...] + >>> corpus.tagged_words() + [('This', 'DET'), ('is', 'VERB'), ('the', 'DET'), ...] + >>> corpus.tagged_sents() + [[('This', 'DET'), ('is', 'VERB'), ...], [('Here', 'DET'), ...], ...] + >>> corpus.tagged_paras() + [[[('This', 'DET'), ...], ...], [[('This', 'DET'), ...], ...], ...] + >>> corpus.raw()[:40] + 'This/det is/verb the/det first/adj sente' + >>> len(corpus.words()), [len(corpus.words(d)) for d in corpus.fileids()] + (38, [32, 6]) + >>> len(corpus.sents()), [len(corpus.sents(d)) for d in corpus.fileids()] + (6, [5, 1]) + >>> len(corpus.paras()), [len(corpus.paras(d)) for d in corpus.fileids()] + (3, [2, 1]) + >>> print(corpus.words('a')) + ['This', 'is', 'the', 'first', 'sentence', '.', ...] + >>> print(corpus.words('b')) + ['This', 'is', 'the', 'second', 'file', '.'] + >>> del_testcorpus(root) + +The Brown Corpus uses the tagged corpus reader: + + >>> from nltk.corpus import brown + >>> brown.fileids() + ['ca01', 'ca02', 'ca03', 'ca04', 'ca05', 'ca06', 'ca07', ...] + >>> brown.categories() + ['adventure', 'belles_lettres', 'editorial', 'fiction', 'government', 'hobbies', 'humor', + 'learned', 'lore', 'mystery', 'news', 'religion', 'reviews', 'romance', 'science_fiction'] + >>> print(repr(brown.root).replace('\\\\','/')) + FileSystemPathPointer('.../corpora/brown') + >>> brown.words() + ['The', 'Fulton', 'County', 'Grand', 'Jury', ...] + >>> brown.sents() + [['The', 'Fulton', 'County', 'Grand', ...], ...] + >>> brown.paras() + [[['The', 'Fulton', 'County', ...]], [['The', 'jury', ...]], ...] + >>> brown.tagged_words() + [('The', 'AT'), ('Fulton', 'NP-TL'), ...] + >>> brown.tagged_sents() + [[('The', 'AT'), ('Fulton', 'NP-TL'), ('County', 'NN-TL'), ...], ...] + >>> brown.tagged_paras() + [[[('The', 'AT'), ...]], [[('The', 'AT'), ...]], ...] + +Verbnet Corpus Reader +===================== + +Make sure we're picking up the right number of elements: + + >>> from nltk.corpus import verbnet + >>> len(verbnet.lemmas()) + 3621 + >>> len(verbnet.wordnetids()) + 4953 + >>> len(verbnet.classids()) + 429 + +Selecting classids based on various selectors: + + >>> verbnet.classids(lemma='take') + ['bring-11.3', 'characterize-29.2', 'convert-26.6.2', 'cost-54.2', + 'fit-54.3', 'performance-26.7-2', 'steal-10.5'] + >>> verbnet.classids(wordnetid='lead%2:38:01') + ['accompany-51.7'] + >>> verbnet.classids(fileid='approve-77.xml') + ['approve-77'] + >>> verbnet.classids(classid='admire-31.2') # subclasses + ['admire-31.2-1'] + +vnclass() accepts filenames, long ids, and short ids: + + >>> a = ElementTree.tostring(verbnet.vnclass('admire-31.2.xml')) + >>> b = ElementTree.tostring(verbnet.vnclass('admire-31.2')) + >>> c = ElementTree.tostring(verbnet.vnclass('31.2')) + >>> a == b == c + True + +fileids() can be used to get files based on verbnet class ids: + + >>> verbnet.fileids('admire-31.2') + ['admire-31.2.xml'] + >>> verbnet.fileids(['admire-31.2', 'obtain-13.5.2']) + ['admire-31.2.xml', 'obtain-13.5.2.xml'] + >>> verbnet.fileids('badidentifier') + Traceback (most recent call last): + . . . + ValueError: vnclass identifier 'badidentifier' not found + +longid() and shortid() can be used to convert identifiers: + + >>> verbnet.longid('31.2') + 'admire-31.2' + >>> verbnet.longid('admire-31.2') + 'admire-31.2' + >>> verbnet.shortid('31.2') + '31.2' + >>> verbnet.shortid('admire-31.2') + '31.2' + >>> verbnet.longid('badidentifier') + Traceback (most recent call last): + . . . + ValueError: vnclass identifier 'badidentifier' not found + >>> verbnet.shortid('badidentifier') + Traceback (most recent call last): + . . . + ValueError: vnclass identifier 'badidentifier' not found + +Corpus View Regression Tests +============================ + +Select some corpus files to play with: + + >>> import nltk.data + >>> # A very short file (160 chars): + >>> f1 = nltk.data.find('corpora/inaugural/README') + >>> # A relatively short file (791 chars): + >>> f2 = nltk.data.find('corpora/inaugural/1793-Washington.txt') + >>> # A longer file (32k chars): + >>> f3 = nltk.data.find('corpora/inaugural/1909-Taft.txt') + >>> fileids = [f1, f2, f3] + + +Concatenation +------------- +Check that concatenation works as intended. + + >>> from nltk.corpus.reader.util import * + + >>> c1 = StreamBackedCorpusView(f1, read_whitespace_block, encoding='utf-8') + >>> c2 = StreamBackedCorpusView(f2, read_whitespace_block, encoding='utf-8') + >>> c3 = StreamBackedCorpusView(f3, read_whitespace_block, encoding='utf-8') + >>> c123 = c1+c2+c3 + >>> print(c123) + ['C-Span', 'Inaugural', 'Address', 'Corpus', 'US', ...] + + >>> l1 = f1.open(encoding='utf-8').read().split() + >>> l2 = f2.open(encoding='utf-8').read().split() + >>> l3 = f3.open(encoding='utf-8').read().split() + >>> l123 = l1+l2+l3 + + >>> list(c123) == l123 + True + + >>> (c1+c2+c3)[100] == l123[100] + True + +Slicing +------- +First, do some tests with fairly small slices. These will all +generate tuple values. + + >>> from nltk.util import LazySubsequence + >>> c1 = StreamBackedCorpusView(f1, read_whitespace_block, encoding='utf-8') + >>> l1 = f1.open(encoding='utf-8').read().split() + >>> print(len(c1)) + 21 + >>> len(c1) < LazySubsequence.MIN_SIZE + True + +Choose a list of indices, based on the length, that covers the +important corner cases: + + >>> indices = [-60, -30, -22, -21, -20, -1, + ... 0, 1, 10, 20, 21, 22, 30, 60] + +Test slicing with explicit start & stop value: + + >>> for s in indices: + ... for e in indices: + ... assert list(c1[s:e]) == l1[s:e] + +Test slicing with stop=None: + + >>> for s in indices: + ... assert list(c1[s:]) == l1[s:] + +Test slicing with start=None: + + >>> for e in indices: + ... assert list(c1[:e]) == l1[:e] + +Test slicing with start=stop=None: + + >>> list(c1[:]) == list(l1[:]) + True + +Next, we'll do some tests with much longer slices. These will +generate LazySubsequence objects. + + >>> c3 = StreamBackedCorpusView(f3, read_whitespace_block, encoding='utf-8') + >>> l3 = f3.open(encoding='utf-8').read().split() + >>> print(len(c3)) + 5430 + >>> len(c3) > LazySubsequence.MIN_SIZE*2 + True + +Choose a list of indices, based on the length, that covers the +important corner cases: + + >>> indices = [-12000, -6000, -5431, -5430, -5429, -3000, -200, -1, + ... 0, 1, 200, 3000, 5000, 5429, 5430, 5431, 6000, 12000] + +Test slicing with explicit start & stop value: + + >>> for s in indices: + ... for e in indices: + ... assert list(c3[s:e]) == l3[s:e] + +Test slicing with stop=None: + + >>> for s in indices: + ... assert list(c3[s:]) == l3[s:] + +Test slicing with start=None: + + >>> for e in indices: + ... assert list(c3[:e]) == l3[:e] + +Test slicing with start=stop=None: + + >>> list(c3[:]) == list(l3[:]) + True + +Multiple Iterators +------------------ +If multiple iterators are created for the same corpus view, their +iteration can be interleaved: + + >>> c3 = StreamBackedCorpusView(f3, read_whitespace_block) + >>> iterators = [c3.iterate_from(n) for n in [0,15,30,45]] + >>> for i in range(15): + ... for iterator in iterators: + ... print('%-15s' % next(iterator), end=' ') + ... print() + My a duties in + fellow heavy of a + citizens: weight the proper + Anyone of office sense + who responsibility. upon of + has If which the + taken not, he obligation + the he is which + oath has about the + I no to oath + have conception enter, imposes. + just of or The + taken the he office + must powers is of + feel and lacking an + +SeekableUnicodeStreamReader +=========================== + +The file-like objects provided by the ``codecs`` module unfortunately +suffer from a bug that prevents them from working correctly with +corpus view objects. In particular, although the expose ``seek()`` +and ``tell()`` methods, those methods do not exhibit the expected +behavior, because they are not synchronized with the internal buffers +that are kept by the file-like objects. For example, the ``tell()`` +method will return the file position at the end of the buffers (whose +contents have not yet been returned by the stream); and therefore this +file position can not be used to return to the 'current' location in +the stream (since ``seek()`` has no way to reconstruct the buffers). + +To get around these problems, we define a new class, +`SeekableUnicodeStreamReader`, to act as a file-like interface to +files containing encoded unicode data. This class is loosely based on +the ``codecs.StreamReader`` class. To construct a new reader, we call +the constructor with an underlying stream and an encoding name: + + >>> from io import StringIO, BytesIO + >>> from nltk.data import SeekableUnicodeStreamReader + >>> stream = BytesIO(b"""\ + ... This is a test file. + ... It is encoded in ascii. + ... """.decode('ascii').encode('ascii')) + >>> reader = SeekableUnicodeStreamReader(stream, 'ascii') + +`SeekableUnicodeStreamReader`\ s support all of the normal operations +supplied by a read-only stream. Note that all of the read operations +return ``unicode`` objects (not ``str`` objects). + + >>> reader.read() # read the entire file. + 'This is a test file.\nIt is encoded in ascii.\n' + >>> reader.seek(0) # rewind to the start. + >>> reader.read(5) # read at most 5 bytes. + 'This ' + >>> reader.readline() # read to the end of the line. + 'is a test file.\n' + >>> reader.seek(0) # rewind to the start. + >>> for line in reader: + ... print(repr(line)) # iterate over lines + 'This is a test file.\n' + 'It is encoded in ascii.\n' + >>> reader.seek(0) # rewind to the start. + >>> reader.readlines() # read a list of line strings + ['This is a test file.\n', 'It is encoded in ascii.\n'] + >>> reader.close() + +Size argument to ``read()`` +--------------------------- +The ``size`` argument to ``read()`` specifies the maximum number of +*bytes* to read, not the maximum number of *characters*. Thus, for +encodings that use multiple bytes per character, it may return fewer +characters than the ``size`` argument: + + >>> stream = BytesIO(b"""\ + ... This is a test file. + ... It is encoded in utf-16. + ... """.decode('ascii').encode('utf-16')) + >>> reader = SeekableUnicodeStreamReader(stream, 'utf-16') + >>> reader.read(10) + 'This ' + +If a read block ends in the middle of the byte string encoding a +single character, then that byte string is stored in an internal +buffer, and re-used on the next call to ``read()``. However, if the +size argument is too small to read even a single character, even +though at least one character is available, then the ``read()`` method +will read additional bytes until it can return a single character. +This ensures that the ``read()`` method does not return an empty +string, which could be mistaken for indicating the end of the file. + + >>> reader.seek(0) # rewind to the start. + >>> reader.read(1) # we actually need to read 4 bytes + 'T' + >>> int(reader.tell()) + 4 + +The ``readline()`` method may read more than a single line of text, in +which case it stores the text that it does not return in a buffer. If +this buffer is not empty, then its contents will be included in the +value returned by the next call to ``read()``, regardless of the +``size`` argument, since they are available without reading any new +bytes from the stream: + + >>> reader.seek(0) # rewind to the start. + >>> reader.readline() # stores extra text in a buffer + 'This is a test file.\n' + >>> print(reader.linebuffer) # examine the buffer contents + ['It is encoded i'] + >>> reader.read(0) # returns the contents of the buffer + 'It is encoded i' + >>> print(reader.linebuffer) # examine the buffer contents + None + +Seek and Tell +------------- +In addition to these basic read operations, +`SeekableUnicodeStreamReader` also supports the ``seek()`` and +``tell()`` operations. However, some care must still be taken when +using these operations. In particular, the only file offsets that +should be passed to ``seek()`` are ``0`` and any offset that has been +returned by ``tell``. + + >>> stream = BytesIO(b"""\ + ... This is a test file. + ... It is encoded in utf-16. + ... """.decode('ascii').encode('utf-16')) + >>> reader = SeekableUnicodeStreamReader(stream, 'utf-16') + >>> reader.read(20) + 'This is a ' + >>> pos = reader.tell(); print(pos) + 22 + >>> reader.read(20) + 'test file.' + >>> reader.seek(pos) # rewind to the position from tell. + >>> reader.read(20) + 'test file.' + +The ``seek()`` and ``tell()`` methods work property even when +``readline()`` is used. + + >>> stream = BytesIO(b"""\ + ... This is a test file. + ... It is encoded in utf-16. + ... """.decode('ascii').encode('utf-16')) + >>> reader = SeekableUnicodeStreamReader(stream, 'utf-16') + >>> reader.readline() + 'This is a test file.\n' + >>> pos = reader.tell(); print(pos) + 44 + >>> reader.readline() + 'It is encoded in utf-16.\n' + >>> reader.seek(pos) # rewind to the position from tell. + >>> reader.readline() + 'It is encoded in utf-16.\n' + + +Squashed Bugs +============= + +svn 5276 fixed a bug in the comment-stripping behavior of +parse_sexpr_block. + + >>> from io import StringIO + >>> from nltk.corpus.reader.util import read_sexpr_block + >>> f = StringIO(b""" + ... (a b c) + ... # This line is a comment. + ... (d e f\ng h)""".decode('ascii')) + >>> print(read_sexpr_block(f, block_size=38, comment_char='#')) + ['(a b c)'] + >>> print(read_sexpr_block(f, block_size=38, comment_char='#')) + ['(d e f\ng h)'] + +svn 5277 fixed a bug in parse_sexpr_block, which would cause it to +enter an infinite loop if a file ended mid-sexpr, or ended with a +token that was not followed by whitespace. A related bug caused +an infinite loop if the corpus ended in an unmatched close paren -- +this was fixed in svn 5279 + + >>> f = StringIO(b""" + ... This file ends mid-sexpr + ... (hello (world""".decode('ascii')) + >>> for i in range(3): print(read_sexpr_block(f)) + ['This', 'file', 'ends', 'mid-sexpr'] + ['(hello (world'] + [] + + >>> f = StringIO(b"This file has no trailing whitespace.".decode('ascii')) + >>> for i in range(3): print(read_sexpr_block(f)) + ['This', 'file', 'has', 'no', 'trailing'] + ['whitespace.'] + [] + + >>> # Bug fixed in 5279: + >>> f = StringIO(b"a b c)".decode('ascii')) + >>> for i in range(3): print(read_sexpr_block(f)) + ['a', 'b'] + ['c)'] + [] + + +svn 5624 & 5265 fixed a bug in ConcatenatedCorpusView, which caused it +to return the wrong items when indexed starting at any index beyond +the first file. + + >>> import nltk + >>> sents = nltk.corpus.brown.sents() + >>> print(sents[6000]) + ['Cholesterol', 'and', 'thyroid'] + >>> print(sents[6000]) + ['Cholesterol', 'and', 'thyroid'] + +svn 5728 fixed a bug in Categorized*CorpusReader, which caused them +to return words from *all* files when just one file was specified. + + >>> from nltk.corpus import reuters + >>> reuters.words('training/13085') + ['SNYDER', '&', 'lt', ';', 'SOI', '>', 'MAKES', ...] + >>> reuters.words('training/5082') + ['SHEPPARD', 'RESOURCES', 'TO', 'MERGE', 'WITH', ...] + +svn 7227 fixed a bug in the qc corpus reader, which prevented +access to its tuples() method + + >>> from nltk.corpus import qc + >>> qc.tuples('test.txt') + [('NUM:dist', 'How far is it from Denver to Aspen ?'), ('LOC:city', 'What county is Modesto , California in ?'), ...] + +Ensure that KEYWORD from `comparative_sents.py` no longer contains a ReDoS vulnerability. + + >>> import re + >>> import time + >>> from nltk.corpus.reader.comparative_sents import KEYWORD + >>> sizes = { + ... "short": 4000, + ... "long": 40000 + ... } + >>> exec_times = { + ... "short": [], + ... "long": [], + ... } + >>> for size_name, size in sizes.items(): + ... for j in range(9): + ... start_t = time.perf_counter() + ... payload = "( " + "(" * size + ... output = KEYWORD.findall(payload) + ... exec_times[size_name].append(time.perf_counter() - start_t) + ... exec_times[size_name] = sorted(exec_times[size_name])[4] # Get the median + +Ideally, the execution time of such a regular expression is linear +in the length of the input. As such, we would expect exec_times["long"] +to be roughly 10 times as big as exec_times["short"]. +With the ReDoS in place, it took roughly 80 times as long. +For now, we accept values below 30 (times as long), due to the potential +for variance. This ensures that the ReDoS has certainly been reduced, +if not removed. + + >>> exec_times["long"] / exec_times["short"] < 30 # doctest: +SKIP + True diff --git a/lib/python3.10/site-packages/nltk/test/data.doctest b/lib/python3.10/site-packages/nltk/test/data.doctest new file mode 100644 index 0000000000000000000000000000000000000000..0f54657d00c1e719518ca4f8034c1a91d483835c --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/data.doctest @@ -0,0 +1,387 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +========================================= + Loading Resources From the Data Package +========================================= + + >>> import nltk.data + +Overview +~~~~~~~~ +The `nltk.data` module contains functions that can be used to load +NLTK resource files, such as corpora, grammars, and saved processing +objects. + +Loading Data Files +~~~~~~~~~~~~~~~~~~ +Resources are loaded using the function `nltk.data.load()`, which +takes as its first argument a URL specifying what file should be +loaded. The ``nltk:`` protocol loads files from the NLTK data +distribution: + + >>> tokenizer = nltk.data.load('nltk:tokenizers/punkt/english.pickle') + >>> tokenizer.tokenize('Hello. This is a test. It works!') + ['Hello.', 'This is a test.', 'It works!'] + +It is important to note that there should be no space following the +colon (':') in the URL; 'nltk: tokenizers/punkt/english.pickle' will +not work! + +The ``nltk:`` protocol is used by default if no protocol is specified: + + >>> nltk.data.load('tokenizers/punkt/english.pickle') + + +But it is also possible to load resources from ``http:``, ``ftp:``, +and ``file:`` URLs: + + >>> # Load a grammar from the NLTK webpage. + >>> cfg = nltk.data.load('https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg') + >>> print(cfg) # doctest: +ELLIPSIS + Grammar with 14 productions (start state = S) + S -> NP VP + PP -> P NP + ... + P -> 'on' + P -> 'in' + + >>> # Load a grammar using an absolute path. + >>> url = 'file:%s' % nltk.data.find('grammars/sample_grammars/toy.cfg') + >>> url.replace('\\', '/') + 'file:...toy.cfg' + >>> print(nltk.data.load(url)) + Grammar with 14 productions (start state = S) + S -> NP VP + PP -> P NP + ... + P -> 'on' + P -> 'in' + +The second argument to the `nltk.data.load()` function specifies the +file format, which determines how the file's contents are processed +before they are returned by ``load()``. The formats that are +currently supported by the data module are described by the dictionary +`nltk.data.FORMATS`: + + >>> for format, descr in sorted(nltk.data.FORMATS.items()): + ... print('{0:<7} {1:}'.format(format, descr)) + cfg A context free grammar. + fcfg A feature CFG. + fol A list of first order logic expressions, parsed with + nltk.sem.logic.Expression.fromstring. + json A serialized python object, stored using the json module. + logic A list of first order logic expressions, parsed with + nltk.sem.logic.LogicParser. Requires an additional logic_parser + parameter + pcfg A probabilistic CFG. + pickle A serialized python object, stored using the pickle + module. + raw The raw (byte string) contents of a file. + text The raw (unicode string) contents of a file. + val A semantic valuation, parsed by + nltk.sem.Valuation.fromstring. + yaml A serialized python object, stored using the yaml module. + +`nltk.data.load()` will raise a ValueError if a bad format name is +specified: + + >>> nltk.data.load('grammars/sample_grammars/toy.cfg', 'bar') + Traceback (most recent call last): + . . . + ValueError: Unknown format type! + +By default, the ``"auto"`` format is used, which chooses a format +based on the filename's extension. The mapping from file extensions +to format names is specified by `nltk.data.AUTO_FORMATS`: + + >>> for ext, format in sorted(nltk.data.AUTO_FORMATS.items()): + ... print('.%-7s -> %s' % (ext, format)) + .cfg -> cfg + .fcfg -> fcfg + .fol -> fol + .json -> json + .logic -> logic + .pcfg -> pcfg + .pickle -> pickle + .text -> text + .txt -> text + .val -> val + .yaml -> yaml + +If `nltk.data.load()` is unable to determine the format based on the +filename's extension, it will raise a ValueError: + + >>> nltk.data.load('foo.bar') + Traceback (most recent call last): + . . . + ValueError: Could not determine format for foo.bar based on its file + extension; use the "format" argument to specify the format explicitly. + +Note that by explicitly specifying the ``format`` argument, you can +override the load method's default processing behavior. For example, +to get the raw contents of any file, simply use ``format="raw"``: + + >>> s = nltk.data.load('grammars/sample_grammars/toy.cfg', 'text') + >>> print(s) + S -> NP VP + PP -> P NP + NP -> Det N | NP PP + VP -> V NP | VP PP + ... + +Making Local Copies +~~~~~~~~~~~~~~~~~~~ +.. This will not be visible in the html output: create a tempdir to + play in. + >>> import tempfile, os + >>> tempdir = tempfile.mkdtemp() + >>> old_dir = os.path.abspath('.') + >>> os.chdir(tempdir) + +The function `nltk.data.retrieve()` copies a given resource to a local +file. This can be useful, for example, if you want to edit one of the +sample grammars. + + >>> nltk.data.retrieve('grammars/sample_grammars/toy.cfg') + Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'toy.cfg' + + >>> # Simulate editing the grammar. + >>> with open('toy.cfg') as inp: + ... s = inp.read().replace('NP', 'DP') + >>> with open('toy.cfg', 'w') as out: + ... _bytes_written = out.write(s) + + >>> # Load the edited grammar, & display it. + >>> cfg = nltk.data.load('file:///' + os.path.abspath('toy.cfg')) + >>> print(cfg) + Grammar with 14 productions (start state = S) + S -> DP VP + PP -> P DP + ... + P -> 'on' + P -> 'in' + +The second argument to `nltk.data.retrieve()` specifies the filename +for the new copy of the file. By default, the source file's filename +is used. + + >>> nltk.data.retrieve('grammars/sample_grammars/toy.cfg', 'mytoy.cfg') + Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'mytoy.cfg' + >>> os.path.isfile('./mytoy.cfg') + True + >>> nltk.data.retrieve('grammars/sample_grammars/np.fcfg') + Retrieving 'nltk:grammars/sample_grammars/np.fcfg', saving to 'np.fcfg' + >>> os.path.isfile('./np.fcfg') + True + +If a file with the specified (or default) filename already exists in +the current directory, then `nltk.data.retrieve()` will raise a +ValueError exception. It will *not* overwrite the file: + + >>> os.path.isfile('./toy.cfg') + True + >>> nltk.data.retrieve('grammars/sample_grammars/toy.cfg') + Traceback (most recent call last): + . . . + ValueError: File '...toy.cfg' already exists! + +.. This will not be visible in the html output: clean up the tempdir. + >>> os.chdir(old_dir) + >>> for f in os.listdir(tempdir): + ... os.remove(os.path.join(tempdir, f)) + >>> os.rmdir(tempdir) + +Finding Files in the NLTK Data Package +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The `nltk.data.find()` function searches the NLTK data package for a +given file, and returns a pointer to that file. This pointer can +either be a `FileSystemPathPointer` (whose `path` attribute gives the +absolute path of the file); or a `ZipFilePathPointer`, specifying a +zipfile and the name of an entry within that zipfile. Both pointer +types define the `open()` method, which can be used to read the string +contents of the file. + + >>> path = nltk.data.find('corpora/abc/rural.txt') + >>> str(path) + '...rural.txt' + >>> print(path.open().read(60).decode()) + PM denies knowledge of AWB kickbacks + The Prime Minister has + +Alternatively, the `nltk.data.load()` function can be used with the +keyword argument ``format="raw"``: + + >>> s = nltk.data.load('corpora/abc/rural.txt', format='raw')[:60] + >>> print(s.decode()) + PM denies knowledge of AWB kickbacks + The Prime Minister has + +Alternatively, you can use the keyword argument ``format="text"``: + + >>> s = nltk.data.load('corpora/abc/rural.txt', format='text')[:60] + >>> print(s) + PM denies knowledge of AWB kickbacks + The Prime Minister has + +Resource Caching +~~~~~~~~~~~~~~~~ + +NLTK uses a weakref dictionary to maintain a cache of resources that +have been loaded. If you load a resource that is already stored in +the cache, then the cached copy will be returned. This behavior can +be seen by the trace output generated when verbose=True: + + >>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg', verbose=True) + <> + >>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg', verbose=True) + <> + +If you wish to load a resource from its source, bypassing the cache, +use the ``cache=False`` argument to `nltk.data.load()`. This can be +useful, for example, if the resource is loaded from a local file, and +you are actively editing that file: + + >>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg',cache=False,verbose=True) + <> + +The cache *no longer* uses weak references. A resource will not be +automatically expunged from the cache when no more objects are using +it. In the following example, when we clear the variable ``feat0``, +the reference count for the feature grammar object drops to zero. +However, the object remains cached: + + >>> del feat0 + >>> feat0 = nltk.data.load('grammars/book_grammars/feat0.fcfg', + ... verbose=True) + <> + +You can clear the entire contents of the cache, using +`nltk.data.clear_cache()`: + + >>> nltk.data.clear_cache() + +Retrieving other Data Sources +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + >>> formulas = nltk.data.load('grammars/book_grammars/background.fol') + >>> for f in formulas: print(str(f)) + all x.(boxerdog(x) -> dog(x)) + all x.(boxer(x) -> person(x)) + all x.-(dog(x) & person(x)) + all x.(married(x) <-> exists y.marry(x,y)) + all x.(bark(x) -> dog(x)) + all x y.(marry(x,y) -> (person(x) & person(y))) + -(Vincent = Mia) + -(Vincent = Fido) + -(Mia = Fido) + +Regression Tests +~~~~~~~~~~~~~~~~ +Create a temp dir for tests that write files: + + >>> import tempfile, os + >>> tempdir = tempfile.mkdtemp() + >>> old_dir = os.path.abspath('.') + >>> os.chdir(tempdir) + +The `retrieve()` function accepts all url types: + + >>> urls = ['https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg', + ... 'file:%s' % nltk.data.find('grammars/sample_grammars/toy.cfg'), + ... 'nltk:grammars/sample_grammars/toy.cfg', + ... 'grammars/sample_grammars/toy.cfg'] + >>> for i, url in enumerate(urls): + ... nltk.data.retrieve(url, 'toy-%d.cfg' % i) + Retrieving 'https://raw.githubusercontent.com/nltk/nltk/develop/nltk/test/toy.cfg', saving to 'toy-0.cfg' + Retrieving 'file:...toy.cfg', saving to 'toy-1.cfg' + Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'toy-2.cfg' + Retrieving 'nltk:grammars/sample_grammars/toy.cfg', saving to 'toy-3.cfg' + +Clean up the temp dir: + + >>> os.chdir(old_dir) + >>> for f in os.listdir(tempdir): + ... os.remove(os.path.join(tempdir, f)) + >>> os.rmdir(tempdir) + +Lazy Loader +----------- +A lazy loader is a wrapper object that defers loading a resource until +it is accessed or used in any way. This is mainly intended for +internal use by NLTK's corpus readers. + + >>> # Create a lazy loader for toy.cfg. + >>> ll = nltk.data.LazyLoader('grammars/sample_grammars/toy.cfg') + + >>> # Show that it's not loaded yet: + >>> object.__repr__(ll) + '' + + >>> # printing it is enough to cause it to be loaded: + >>> print(ll) + + + >>> # Show that it's now been loaded: + >>> object.__repr__(ll) + '' + + + >>> # Test that accessing an attribute also loads it: + >>> ll = nltk.data.LazyLoader('grammars/sample_grammars/toy.cfg') + >>> ll.start() + S + >>> object.__repr__(ll) + '' + +Buffered Gzip Reading and Writing +--------------------------------- +Write performance to gzip-compressed is extremely poor when the files become large. +File creation can become a bottleneck in those cases. + +Read performance from large gzipped pickle files was improved in data.py by +buffering the reads. A similar fix can be applied to writes by buffering +the writes to a StringIO object first. + +This is mainly intended for internal use. The test simply tests that reading +and writing work as intended and does not test how much improvement buffering +provides. + + >>> from io import StringIO + >>> test = nltk.data.BufferedGzipFile('testbuf.gz', 'wb', size=2**10) + >>> ans = [] + >>> for i in range(10000): + ... ans.append(str(i).encode('ascii')) + ... test.write(str(i).encode('ascii')) + >>> test.close() + >>> test = nltk.data.BufferedGzipFile('testbuf.gz', 'rb') + >>> test.read() == b''.join(ans) + True + >>> test.close() + >>> import os + >>> os.unlink('testbuf.gz') + +JSON Encoding and Decoding +-------------------------- +JSON serialization is used instead of pickle for some classes. + + >>> from nltk import jsontags + >>> from nltk.jsontags import JSONTaggedEncoder, JSONTaggedDecoder, register_tag + >>> @jsontags.register_tag + ... class JSONSerializable: + ... json_tag = 'JSONSerializable' + ... + ... def __init__(self, n): + ... self.n = n + ... + ... def encode_json_obj(self): + ... return self.n + ... + ... @classmethod + ... def decode_json_obj(cls, obj): + ... n = obj + ... return cls(n) + ... + >>> JSONTaggedEncoder().encode(JSONSerializable(1)) + '{"!JSONSerializable": 1}' + >>> JSONTaggedDecoder().decode('{"!JSONSerializable": 1}').n + 1 diff --git a/lib/python3.10/site-packages/nltk/test/dependency.doctest b/lib/python3.10/site-packages/nltk/test/dependency.doctest new file mode 100644 index 0000000000000000000000000000000000000000..f621fac48e3682e7d65ade4819dd53e56e6b9780 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/dependency.doctest @@ -0,0 +1,241 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +=================== +Dependency Grammars +=================== + + >>> from nltk.grammar import DependencyGrammar + >>> from nltk.parse import ( + ... DependencyGraph, + ... ProjectiveDependencyParser, + ... NonprojectiveDependencyParser, + ... ) + +CoNLL Data +---------- + + >>> treebank_data = """Pierre NNP 2 NMOD + ... Vinken NNP 8 SUB + ... , , 2 P + ... 61 CD 5 NMOD + ... years NNS 6 AMOD + ... old JJ 2 NMOD + ... , , 2 P + ... will MD 0 ROOT + ... join VB 8 VC + ... the DT 11 NMOD + ... board NN 9 OBJ + ... as IN 9 VMOD + ... a DT 15 NMOD + ... nonexecutive JJ 15 NMOD + ... director NN 12 PMOD + ... Nov. NNP 9 VMOD + ... 29 CD 16 NMOD + ... . . 9 VMOD + ... """ + + >>> dg = DependencyGraph(treebank_data) + >>> dg.tree().pprint() + (will + (Vinken Pierre , (old (years 61)) ,) + (join (board the) (as (director a nonexecutive)) (Nov. 29) .)) + >>> for head, rel, dep in dg.triples(): + ... print( + ... '({h[0]}, {h[1]}), {r}, ({d[0]}, {d[1]})' + ... .format(h=head, r=rel, d=dep) + ... ) + (will, MD), SUB, (Vinken, NNP) + (Vinken, NNP), NMOD, (Pierre, NNP) + (Vinken, NNP), P, (,, ,) + (Vinken, NNP), NMOD, (old, JJ) + (old, JJ), AMOD, (years, NNS) + (years, NNS), NMOD, (61, CD) + (Vinken, NNP), P, (,, ,) + (will, MD), VC, (join, VB) + (join, VB), OBJ, (board, NN) + (board, NN), NMOD, (the, DT) + (join, VB), VMOD, (as, IN) + (as, IN), PMOD, (director, NN) + (director, NN), NMOD, (a, DT) + (director, NN), NMOD, (nonexecutive, JJ) + (join, VB), VMOD, (Nov., NNP) + (Nov., NNP), NMOD, (29, CD) + (join, VB), VMOD, (., .) + +Using a custom cell extractor. + + >>> def custom_extractor(cells): + ... _, tag, head, rel = cells + ... return 'spam', 'spam', tag, tag, '', head, rel + >>> dg = DependencyGraph(treebank_data, cell_extractor=custom_extractor) + >>> dg.tree().pprint() + (spam + (spam spam spam (spam (spam spam)) spam) + (spam (spam spam) (spam (spam spam spam)) (spam spam) spam)) + +Custom cell extractors can take in and return an index. + + >>> def custom_extractor(cells, index): + ... word, tag, head, rel = cells + ... return (index, '{}-{}'.format(word, index), word, + ... tag, tag, '', head, rel) + >>> dg = DependencyGraph(treebank_data, cell_extractor=custom_extractor) + >>> dg.tree().pprint() + (will-8 + (Vinken-2 Pierre-1 ,-3 (old-6 (years-5 61-4)) ,-7) + (join-9 + (board-11 the-10) + (as-12 (director-15 a-13 nonexecutive-14)) + (Nov.-16 29-17) + .-18)) + +Using the dependency-parsed version of the Penn Treebank corpus sample. + + >>> from nltk.corpus import dependency_treebank + >>> t = dependency_treebank.parsed_sents()[0] + >>> print(t.to_conll(3)) + Pierre NNP 2 + Vinken NNP 8 + , , 2 + 61 CD 5 + years NNS 6 + old JJ 2 + , , 2 + will MD 0 + join VB 8 + the DT 11 + board NN 9 + as IN 9 + a DT 15 + nonexecutive JJ 15 + director NN 12 + Nov. NNP 9 + 29 CD 16 + . . 8 + +Using the output of zpar (like Malt-TAB but with zero-based indexing) + + >>> zpar_data = """ + ... Pierre NNP 1 NMOD + ... Vinken NNP 7 SUB + ... , , 1 P + ... 61 CD 4 NMOD + ... years NNS 5 AMOD + ... old JJ 1 NMOD + ... , , 1 P + ... will MD -1 ROOT + ... join VB 7 VC + ... the DT 10 NMOD + ... board NN 8 OBJ + ... as IN 8 VMOD + ... a DT 14 NMOD + ... nonexecutive JJ 14 NMOD + ... director NN 11 PMOD + ... Nov. NNP 8 VMOD + ... 29 CD 15 NMOD + ... . . 7 P + ... """ + + >>> zdg = DependencyGraph(zpar_data, zero_based=True) + >>> print(zdg.tree()) + (will + (Vinken Pierre , (old (years 61)) ,) + (join (board the) (as (director a nonexecutive)) (Nov. 29)) + .) + + +Projective Dependency Parsing +----------------------------- + + >>> grammar = DependencyGrammar.fromstring(""" + ... 'fell' -> 'price' | 'stock' + ... 'price' -> 'of' 'the' + ... 'of' -> 'stock' + ... 'stock' -> 'the' + ... """) + >>> print(grammar) + Dependency grammar with 5 productions + 'fell' -> 'price' + 'fell' -> 'stock' + 'price' -> 'of' 'the' + 'of' -> 'stock' + 'stock' -> 'the' + + >>> dp = ProjectiveDependencyParser(grammar) + >>> for t in sorted(dp.parse(['the', 'price', 'of', 'the', 'stock', 'fell'])): + ... print(t) + (fell (price the (of (stock the)))) + (fell (price the of) (stock the)) + (fell (price the of the) stock) + +Non-Projective Dependency Parsing +--------------------------------- + + >>> grammar = DependencyGrammar.fromstring(""" + ... 'taught' -> 'play' | 'man' + ... 'man' -> 'the' + ... 'play' -> 'golf' | 'dog' | 'to' + ... 'dog' -> 'his' + ... """) + >>> print(grammar) + Dependency grammar with 7 productions + 'taught' -> 'play' + 'taught' -> 'man' + 'man' -> 'the' + 'play' -> 'golf' + 'play' -> 'dog' + 'play' -> 'to' + 'dog' -> 'his' + + >>> dp = NonprojectiveDependencyParser(grammar) + >>> g, = dp.parse(['the', 'man', 'taught', 'his', 'dog', 'to', 'play', 'golf']) + + >>> print(g.root['word']) + taught + + >>> for _, node in sorted(g.nodes.items()): + ... if node['word'] is not None: + ... print('{address} {word}: {d}'.format(d=node['deps'][''], **node)) + 1 the: [] + 2 man: [1] + 3 taught: [2, 7] + 4 his: [] + 5 dog: [4] + 6 to: [] + 7 play: [5, 6, 8] + 8 golf: [] + + >>> print(g.tree()) + (taught (man the) (play (dog his) to golf)) + +Integration with MALT parser +============================ + +In case the top relation is different from the default, we can set it. In case +of MALT parser, it's set to `'null'`. + +>>> dg_str = """1 I _ NN NN _ 2 nn _ _ +... 2 shot _ NN NN _ 0 null _ _ +... 3 an _ AT AT _ 2 dep _ _ +... 4 elephant _ NN NN _ 7 nn _ _ +... 5 in _ NN NN _ 7 nn _ _ +... 6 my _ NN NN _ 7 nn _ _ +... 7 pajamas _ NNS NNS _ 3 dobj _ _ +... """ +>>> dg = DependencyGraph(dg_str, top_relation_label='null') + +>>> len(dg.nodes) +8 + +>>> dg.root['word'], dg.root['address'] +('shot', 2) + +>>> print(dg.to_conll(10)) +1 I _ NN NN _ 2 nn _ _ +2 shot _ NN NN _ 0 null _ _ +3 an _ AT AT _ 2 dep _ _ +4 elephant _ NN NN _ 7 nn _ _ +5 in _ NN NN _ 7 nn _ _ +6 my _ NN NN _ 7 nn _ _ +7 pajamas _ NNS NNS _ 3 dobj _ _ diff --git a/lib/python3.10/site-packages/nltk/test/discourse.doctest b/lib/python3.10/site-packages/nltk/test/discourse.doctest new file mode 100644 index 0000000000000000000000000000000000000000..1e37ca56440809055871b656d59fb0f7fd634f2c --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/discourse.doctest @@ -0,0 +1,552 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +================== +Discourse Checking +================== + + >>> from nltk import * + >>> from nltk.sem import logic + >>> logic._counter._value = 0 + +Setup +===== + + >>> from nltk.test.childes_fixt import setup_module + >>> setup_module() + +Introduction +============ + +The NLTK discourse module makes it possible to test consistency and +redundancy of simple discourses, using theorem-proving and +model-building from `nltk.inference`. + +The ``DiscourseTester`` constructor takes a list of sentences as a +parameter. + + >>> dt = DiscourseTester(['a boxer walks', 'every boxer chases a girl']) + +The ``DiscourseTester`` parses each sentence into a list of logical +forms. Once we have created ``DiscourseTester`` object, we can +inspect various properties of the discourse. First off, we might want +to double-check what sentences are currently stored as the discourse. + + >>> dt.sentences() + s0: a boxer walks + s1: every boxer chases a girl + +As you will see, each sentence receives an identifier `s`\ :subscript:`i`. +We might also want to check what grammar the ``DiscourseTester`` is +using (by default, ``book_grammars/discourse.fcfg``): + + >>> dt.grammar() + % start S + # Grammar Rules + S[SEM = ] -> NP[NUM=?n,SEM=?subj] VP[NUM=?n,SEM=?vp] + NP[NUM=?n,SEM= ] -> Det[NUM=?n,SEM=?det] Nom[NUM=?n,SEM=?nom] + NP[LOC=?l,NUM=?n,SEM=?np] -> PropN[LOC=?l,NUM=?n,SEM=?np] + ... + +A different grammar can be invoked by using the optional ``gramfile`` +parameter when a ``DiscourseTester`` object is created. + +Readings and Threads +==================== + +Depending on +the grammar used, we may find some sentences have more than one +logical form. To check this, use the ``readings()`` method. Given a +sentence identifier of the form `s`\ :subscript:`i`, each reading of +that sentence is given an identifier `s`\ :sub:`i`-`r`\ :sub:`j`. + + + >>> dt.readings() + + s0 readings: + + s0-r0: exists z1.(boxer(z1) & walk(z1)) + s0-r1: exists z1.(boxerdog(z1) & walk(z1)) + + s1 readings: + + s1-r0: all z2.(boxer(z2) -> exists z3.(girl(z3) & chase(z2,z3))) + s1-r1: all z1.(boxerdog(z1) -> exists z2.(girl(z2) & chase(z1,z2))) + + +In this case, the only source of ambiguity lies in the word *boxer*, +which receives two translations: ``boxer`` and ``boxerdog``. The +intention is that one of these corresponds to the ``person`` sense and +one to the ``dog`` sense. In principle, we would also expect to see a +quantifier scope ambiguity in ``s1``. However, the simple grammar we +are using, namely `sem4.fcfg `_, doesn't support quantifier +scope ambiguity. + +We can also investigate the readings of a specific sentence: + + >>> dt.readings('a boxer walks') + The sentence 'a boxer walks' has these readings: + exists x.(boxer(x) & walk(x)) + exists x.(boxerdog(x) & walk(x)) + +Given that each sentence is two-ways ambiguous, we potentially have +four different discourse 'threads', taking all combinations of +readings. To see these, specify the ``threaded=True`` parameter on +the ``readings()`` method. Again, each thread is assigned an +identifier of the form `d`\ :sub:`i`. Following the identifier is a +list of the readings that constitute that thread. + + >>> dt.readings(threaded=True) + d0: ['s0-r0', 's1-r0'] + d1: ['s0-r0', 's1-r1'] + d2: ['s0-r1', 's1-r0'] + d3: ['s0-r1', 's1-r1'] + +Of course, this simple-minded approach doesn't scale: a discourse with, say, three +sentences, each of which has 3 readings, will generate 27 different +threads. It is an interesting exercise to consider how to manage +discourse ambiguity more efficiently. + +Checking Consistency +==================== + +Now, we can check whether some or all of the discourse threads are +consistent, using the ``models()`` method. With no parameter, this +method will try to find a model for every discourse thread in the +current discourse. However, we can also specify just one thread, say ``d1``. + + >>> dt.models('d1') + -------------------------------------------------------------------------------- + Model for Discourse Thread d1 + -------------------------------------------------------------------------------- + % number = 1 + % seconds = 0 + + % Interpretation of size 2 + + c1 = 0. + + f1(0) = 0. + f1(1) = 0. + + boxer(0). + - boxer(1). + + - boxerdog(0). + - boxerdog(1). + + - girl(0). + - girl(1). + + walk(0). + - walk(1). + + - chase(0,0). + - chase(0,1). + - chase(1,0). + - chase(1,1). + + Consistent discourse: d1 ['s0-r0', 's1-r1']: + s0-r0: exists z1.(boxer(z1) & walk(z1)) + s1-r1: all z1.(boxerdog(z1) -> exists z2.(girl(z2) & chase(z1,z2))) + + +There are various formats for rendering **Mace4** models --- here, +we have used the 'cooked' format (which is intended to be +human-readable). There are a number of points to note. + +#. The entities in the domain are all treated as non-negative + integers. In this case, there are only two entities, ``0`` and + ``1``. + +#. The ``-`` symbol indicates negation. So ``0`` is the only + ``boxerdog`` and the only thing that ``walk``\ s. Nothing is a + ``boxer``, or a ``girl`` or in the ``chase`` relation. Thus the + universal sentence is vacuously true. + +#. ``c1`` is an introduced constant that denotes ``0``. + +#. ``f1`` is a Skolem function, but it plays no significant role in + this model. + + +We might want to now add another sentence to the discourse, and there +is method ``add_sentence()`` for doing just this. + + >>> dt.add_sentence('John is a boxer') + >>> dt.sentences() + s0: a boxer walks + s1: every boxer chases a girl + s2: John is a boxer + +We can now test all the properties as before; here, we just show a +couple of them. + + >>> dt.readings() + + s0 readings: + + s0-r0: exists z1.(boxer(z1) & walk(z1)) + s0-r1: exists z1.(boxerdog(z1) & walk(z1)) + + s1 readings: + + s1-r0: all z1.(boxer(z1) -> exists z2.(girl(z2) & chase(z1,z2))) + s1-r1: all z1.(boxerdog(z1) -> exists z2.(girl(z2) & chase(z1,z2))) + + s2 readings: + + s2-r0: boxer(John) + s2-r1: boxerdog(John) + >>> dt.readings(threaded=True) + d0: ['s0-r0', 's1-r0', 's2-r0'] + d1: ['s0-r0', 's1-r0', 's2-r1'] + d2: ['s0-r0', 's1-r1', 's2-r0'] + d3: ['s0-r0', 's1-r1', 's2-r1'] + d4: ['s0-r1', 's1-r0', 's2-r0'] + d5: ['s0-r1', 's1-r0', 's2-r1'] + d6: ['s0-r1', 's1-r1', 's2-r0'] + d7: ['s0-r1', 's1-r1', 's2-r1'] + +If you are interested in a particular thread, the ``expand_threads()`` +method will remind you of what readings it consists of: + + >>> thread = dt.expand_threads('d1') + >>> for rid, reading in thread: + ... print(rid, str(reading.normalize())) + s0-r0 exists z1.(boxer(z1) & walk(z1)) + s1-r0 all z1.(boxer(z1) -> exists z2.(girl(z2) & chase(z1,z2))) + s2-r1 boxerdog(John) + +Suppose we have already defined a discourse, as follows: + + >>> dt = DiscourseTester(['A student dances', 'Every student is a person']) + +Now, when we add a new sentence, is it consistent with what we already +have? The `` consistchk=True`` parameter of ``add_sentence()`` allows +us to check: + + >>> dt.add_sentence('No person dances', consistchk=True) + Inconsistent discourse: d0 ['s0-r0', 's1-r0', 's2-r0']: + s0-r0: exists z1.(student(z1) & dance(z1)) + s1-r0: all z1.(student(z1) -> person(z1)) + s2-r0: -exists z1.(person(z1) & dance(z1)) + + >>> dt.readings() + + s0 readings: + + s0-r0: exists z1.(student(z1) & dance(z1)) + + s1 readings: + + s1-r0: all z1.(student(z1) -> person(z1)) + + s2 readings: + + s2-r0: -exists z1.(person(z1) & dance(z1)) + +So let's retract the inconsistent sentence: + + >>> dt.retract_sentence('No person dances', verbose=True) + Current sentences are + s0: A student dances + s1: Every student is a person + +We can now verify that result is consistent. + + >>> dt.models() + -------------------------------------------------------------------------------- + Model for Discourse Thread d0 + -------------------------------------------------------------------------------- + % number = 1 + % seconds = 0 + + % Interpretation of size 2 + + c1 = 0. + + dance(0). + - dance(1). + + person(0). + - person(1). + + student(0). + - student(1). + + Consistent discourse: d0 ['s0-r0', 's1-r0']: + s0-r0: exists z1.(student(z1) & dance(z1)) + s1-r0: all z1.(student(z1) -> person(z1)) + + +Checking Informativity +====================== + +Let's assume that we are still trying to extend the discourse *A +student dances.* *Every student is a person.* We add a new sentence, +but this time, we check whether it is informative with respect to what +has gone before. + + >>> dt.add_sentence('A person dances', informchk=True) + Sentence 'A person dances' under reading 'exists x.(person(x) & dance(x))': + Not informative relative to thread 'd0' + +In fact, we are just checking whether the new sentence is entailed by +the preceding discourse. + + >>> dt.models() + -------------------------------------------------------------------------------- + Model for Discourse Thread d0 + -------------------------------------------------------------------------------- + % number = 1 + % seconds = 0 + + % Interpretation of size 2 + + c1 = 0. + + c2 = 0. + + dance(0). + - dance(1). + + person(0). + - person(1). + + student(0). + - student(1). + + Consistent discourse: d0 ['s0-r0', 's1-r0', 's2-r0']: + s0-r0: exists z1.(student(z1) & dance(z1)) + s1-r0: all z1.(student(z1) -> person(z1)) + s2-r0: exists z1.(person(z1) & dance(z1)) + + + + +Adding Background Knowledge +=========================== + +Let's build a new discourse, and look at the readings of the component sentences: + + >>> dt = DiscourseTester(['Vincent is a boxer', 'Fido is a boxer', 'Vincent is married', 'Fido barks']) + >>> dt.readings() + + s0 readings: + + s0-r0: boxer(Vincent) + s0-r1: boxerdog(Vincent) + + s1 readings: + + s1-r0: boxer(Fido) + s1-r1: boxerdog(Fido) + + s2 readings: + + s2-r0: married(Vincent) + + s3 readings: + + s3-r0: bark(Fido) + +This gives us a lot of threads: + + >>> dt.readings(threaded=True) + d0: ['s0-r0', 's1-r0', 's2-r0', 's3-r0'] + d1: ['s0-r0', 's1-r1', 's2-r0', 's3-r0'] + d2: ['s0-r1', 's1-r0', 's2-r0', 's3-r0'] + d3: ['s0-r1', 's1-r1', 's2-r0', 's3-r0'] + + +We can eliminate some of the readings, and hence some of the threads, +by adding background information. + + >>> import nltk.data + >>> bg = nltk.data.load('grammars/book_grammars/background.fol') + >>> dt.add_background(bg) + >>> dt.background() + all x.(boxerdog(x) -> dog(x)) + all x.(boxer(x) -> person(x)) + all x.-(dog(x) & person(x)) + all x.(married(x) <-> exists y.marry(x,y)) + all x.(bark(x) -> dog(x)) + all x y.(marry(x,y) -> (person(x) & person(y))) + -(Vincent = Mia) + -(Vincent = Fido) + -(Mia = Fido) + +The background information allows us to reject three of the threads as +inconsistent. To see what remains, use the ``filter=True`` parameter +on ``readings()``. + + >>> dt.readings(filter=True) + d1: ['s0-r0', 's1-r1', 's2-r0', 's3-r0'] + +The ``models()`` method gives us more information about the surviving thread. + + >>> dt.models() + -------------------------------------------------------------------------------- + Model for Discourse Thread d0 + -------------------------------------------------------------------------------- + No model found! + + -------------------------------------------------------------------------------- + Model for Discourse Thread d1 + -------------------------------------------------------------------------------- + % number = 1 + % seconds = 0 + + % Interpretation of size 3 + + Fido = 0. + + Mia = 1. + + Vincent = 2. + + f1(0) = 0. + f1(1) = 0. + f1(2) = 2. + + bark(0). + - bark(1). + - bark(2). + + - boxer(0). + - boxer(1). + boxer(2). + + boxerdog(0). + - boxerdog(1). + - boxerdog(2). + + dog(0). + - dog(1). + - dog(2). + + - married(0). + - married(1). + married(2). + + - person(0). + - person(1). + person(2). + + - marry(0,0). + - marry(0,1). + - marry(0,2). + - marry(1,0). + - marry(1,1). + - marry(1,2). + - marry(2,0). + - marry(2,1). + marry(2,2). + + -------------------------------------------------------------------------------- + Model for Discourse Thread d2 + -------------------------------------------------------------------------------- + No model found! + + -------------------------------------------------------------------------------- + Model for Discourse Thread d3 + -------------------------------------------------------------------------------- + No model found! + + Inconsistent discourse: d0 ['s0-r0', 's1-r0', 's2-r0', 's3-r0']: + s0-r0: boxer(Vincent) + s1-r0: boxer(Fido) + s2-r0: married(Vincent) + s3-r0: bark(Fido) + + Consistent discourse: d1 ['s0-r0', 's1-r1', 's2-r0', 's3-r0']: + s0-r0: boxer(Vincent) + s1-r1: boxerdog(Fido) + s2-r0: married(Vincent) + s3-r0: bark(Fido) + + Inconsistent discourse: d2 ['s0-r1', 's1-r0', 's2-r0', 's3-r0']: + s0-r1: boxerdog(Vincent) + s1-r0: boxer(Fido) + s2-r0: married(Vincent) + s3-r0: bark(Fido) + + Inconsistent discourse: d3 ['s0-r1', 's1-r1', 's2-r0', 's3-r0']: + s0-r1: boxerdog(Vincent) + s1-r1: boxerdog(Fido) + s2-r0: married(Vincent) + s3-r0: bark(Fido) + + + +.. This will not be visible in the html output: create a tempdir to + play in. + >>> import tempfile, os + >>> tempdir = tempfile.mkdtemp() + >>> old_dir = os.path.abspath('.') + >>> os.chdir(tempdir) + +In order to play around with your own version of background knowledge, +you might want to start off with a local copy of ``background.fol``: + + >>> nltk.data.retrieve('grammars/book_grammars/background.fol') + Retrieving 'nltk:grammars/book_grammars/background.fol', saving to 'background.fol' + +After you have modified the file, the ``load_fol()`` function will parse +the strings in the file into expressions of ``nltk.sem.logic``. + + >>> from nltk.inference.discourse import load_fol + >>> mybg = load_fol(open('background.fol').read()) + +The result can be loaded as an argument of ``add_background()`` in the +manner shown earlier. + +.. This will not be visible in the html output: clean up the tempdir. + >>> os.chdir(old_dir) + >>> for f in os.listdir(tempdir): + ... os.remove(os.path.join(tempdir, f)) + >>> os.rmdir(tempdir) + >>> nltk.data.clear_cache() + + +Regression Testing from book +============================ + + >>> logic._counter._value = 0 + + >>> from nltk.tag import RegexpTagger + >>> tagger = RegexpTagger( + ... [('^(chases|runs)$', 'VB'), + ... ('^(a)$', 'ex_quant'), + ... ('^(every)$', 'univ_quant'), + ... ('^(dog|boy)$', 'NN'), + ... ('^(He)$', 'PRP') + ... ]) + >>> rc = DrtGlueReadingCommand(depparser=MaltParser(tagger=tagger)) + >>> dt = DiscourseTester(map(str.split, ['Every dog chases a boy', 'He runs']), rc) + >>> dt.readings() + + s0 readings: + + s0-r0: ([z2],[boy(z2), (([z5],[dog(z5)]) -> ([],[chases(z5,z2)]))]) + s0-r1: ([],[(([z1],[dog(z1)]) -> ([z2],[boy(z2), chases(z1,z2)]))]) + + s1 readings: + + s1-r0: ([z1],[PRO(z1), runs(z1)]) + >>> dt.readings(show_thread_readings=True) + d0: ['s0-r0', 's1-r0'] : ([z1,z2],[boy(z1), (([z3],[dog(z3)]) -> ([],[chases(z3,z1)])), (z2 = z1), runs(z2)]) + d1: ['s0-r1', 's1-r0'] : INVALID: AnaphoraResolutionException + >>> dt.readings(filter=True, show_thread_readings=True) + d0: ['s0-r0', 's1-r0'] : ([z1,z3],[boy(z1), (([z2],[dog(z2)]) -> ([],[chases(z2,z1)])), (z3 = z1), runs(z3)]) + + >>> logic._counter._value = 0 + + >>> from nltk.parse import FeatureEarleyChartParser + >>> from nltk.sem.drt import DrtParser + >>> grammar = nltk.data.load('grammars/book_grammars/drt.fcfg', logic_parser=DrtParser()) + >>> parser = FeatureEarleyChartParser(grammar, trace=0) + >>> trees = parser.parse('Angus owns a dog'.split()) + >>> print(list(trees)[0].label()['SEM'].simplify().normalize()) + ([z1,z2],[Angus(z1), dog(z2), own(z1,z2)]) diff --git a/lib/python3.10/site-packages/nltk/test/featgram.doctest b/lib/python3.10/site-packages/nltk/test/featgram.doctest new file mode 100644 index 0000000000000000000000000000000000000000..99e2735e8682ec270dc3039be39c2b3f2e3dc193 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/featgram.doctest @@ -0,0 +1,610 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +========================= + Feature Grammar Parsing +========================= + +.. definitions from nltk_book/definitions.rst + +.. role:: feat + :class: feature +.. role:: fval + :class: fval +.. |rarr| unicode:: U+2192 .. right arrow +.. |dot| unicode:: U+2022 .. bullet +.. |pi| unicode:: U+03C0 + +Grammars can be parsed from strings. + + >>> import nltk + >>> from nltk import grammar, parse + >>> g = """ + ... % start DP + ... DP[AGR=?a] -> D[AGR=?a] N[AGR=?a] + ... D[AGR=[NUM='sg', PERS=3]] -> 'this' | 'that' + ... D[AGR=[NUM='pl', PERS=3]] -> 'these' | 'those' + ... D[AGR=[NUM='pl', PERS=1]] -> 'we' + ... D[AGR=[PERS=2]] -> 'you' + ... N[AGR=[NUM='sg', GND='m']] -> 'boy' + ... N[AGR=[NUM='pl', GND='m']] -> 'boys' + ... N[AGR=[NUM='sg', GND='f']] -> 'girl' + ... N[AGR=[NUM='pl', GND='f']] -> 'girls' + ... N[AGR=[NUM='sg']] -> 'student' + ... N[AGR=[NUM='pl']] -> 'students' + ... """ + >>> grammar = grammar.FeatureGrammar.fromstring(g) + >>> tokens = 'these girls'.split() + >>> parser = parse.FeatureEarleyChartParser(grammar) + >>> trees = parser.parse(tokens) + >>> for tree in trees: print(tree) + (DP[AGR=[GND='f', NUM='pl', PERS=3]] + (D[AGR=[NUM='pl', PERS=3]] these) + (N[AGR=[GND='f', NUM='pl']] girls)) + +In general, when we are trying to develop even a very small grammar, +it is convenient to put the rules in a file where they can be edited, +tested and revised. Let's assume that we have saved feat0cfg as a file named +``'feat0.fcfg'`` and placed it in the NLTK ``data`` directory. We can +inspect it as follows: + + >>> nltk.data.show_cfg('grammars/book_grammars/feat0.fcfg') + % start S + # ################### + # Grammar Productions + # ################### + # S expansion productions + S -> NP[NUM=?n] VP[NUM=?n] + # NP expansion productions + NP[NUM=?n] -> N[NUM=?n] + NP[NUM=?n] -> PropN[NUM=?n] + NP[NUM=?n] -> Det[NUM=?n] N[NUM=?n] + NP[NUM=pl] -> N[NUM=pl] + # VP expansion productions + VP[TENSE=?t, NUM=?n] -> IV[TENSE=?t, NUM=?n] + VP[TENSE=?t, NUM=?n] -> TV[TENSE=?t, NUM=?n] NP + # ################### + # Lexical Productions + # ################### + Det[NUM=sg] -> 'this' | 'every' + Det[NUM=pl] -> 'these' | 'all' + Det -> 'the' | 'some' | 'several' + PropN[NUM=sg]-> 'Kim' | 'Jody' + N[NUM=sg] -> 'dog' | 'girl' | 'car' | 'child' + N[NUM=pl] -> 'dogs' | 'girls' | 'cars' | 'children' + IV[TENSE=pres, NUM=sg] -> 'disappears' | 'walks' + TV[TENSE=pres, NUM=sg] -> 'sees' | 'likes' + IV[TENSE=pres, NUM=pl] -> 'disappear' | 'walk' + TV[TENSE=pres, NUM=pl] -> 'see' | 'like' + IV[TENSE=past] -> 'disappeared' | 'walked' + TV[TENSE=past] -> 'saw' | 'liked' + +Assuming we have saved feat0cfg as a file named +``'feat0.fcfg'``, the function ``parse.load_parser`` allows us to +read the grammar into NLTK, ready for use in parsing. + + + >>> cp = parse.load_parser('grammars/book_grammars/feat0.fcfg', trace=1) + >>> sent = 'Kim likes children' + >>> tokens = sent.split() + >>> tokens + ['Kim', 'likes', 'children'] + >>> trees = cp.parse(tokens) + |.Kim .like.chil.| + |[----] . .| [0:1] 'Kim' + |. [----] .| [1:2] 'likes' + |. . [----]| [2:3] 'children' + |[----] . .| [0:1] PropN[NUM='sg'] -> 'Kim' * + |[----] . .| [0:1] NP[NUM='sg'] -> PropN[NUM='sg'] * + |[----> . .| [0:1] S[] -> NP[NUM=?n] * VP[NUM=?n] {?n: 'sg'} + |. [----] .| [1:2] TV[NUM='sg', TENSE='pres'] -> 'likes' * + |. [----> .| [1:2] VP[NUM=?n, TENSE=?t] -> TV[NUM=?n, TENSE=?t] * NP[] {?n: 'sg', ?t: 'pres'} + |. . [----]| [2:3] N[NUM='pl'] -> 'children' * + |. . [----]| [2:3] NP[NUM='pl'] -> N[NUM='pl'] * + |. . [---->| [2:3] S[] -> NP[NUM=?n] * VP[NUM=?n] {?n: 'pl'} + |. [---------]| [1:3] VP[NUM='sg', TENSE='pres'] -> TV[NUM='sg', TENSE='pres'] NP[] * + |[==============]| [0:3] S[] -> NP[NUM='sg'] VP[NUM='sg'] * + >>> for tree in trees: print(tree) + (S[] + (NP[NUM='sg'] (PropN[NUM='sg'] Kim)) + (VP[NUM='sg', TENSE='pres'] + (TV[NUM='sg', TENSE='pres'] likes) + (NP[NUM='pl'] (N[NUM='pl'] children)))) + +The parser works directly with +the underspecified productions given by the grammar. That is, the +Predictor rule does not attempt to compile out all admissible feature +combinations before trying to expand the non-terminals on the left hand +side of a production. However, when the Scanner matches an input word +against a lexical production that has been predicted, the new edge will +typically contain fully specified features; e.g., the edge +[PropN[`num`:feat: = `sg`:fval:] |rarr| 'Kim', (0, 1)]. Recall from +Chapter 8 that the Fundamental (or Completer) Rule in +standard CFGs is used to combine an incomplete edge that's expecting a +nonterminal *B* with a following, complete edge whose left hand side +matches *B*. In our current setting, rather than checking for a +complete match, we test whether the expected category *B* will +unify with the left hand side *B'* of a following complete +edge. We will explain in more detail in Section 9.2 how +unification works; for the moment, it is enough to know that as a +result of unification, any variable values of features in *B* will be +instantiated by constant values in the corresponding feature structure +in *B'*, and these instantiated values will be used in the new edge +added by the Completer. This instantiation can be seen, for example, +in the edge +[NP [`num`:feat:\ =\ `sg`:fval:] |rarr| PropN[`num`:feat:\ =\ `sg`:fval:] |dot|, (0, 1)] +in Example 9.2, where the feature `num`:feat: has been assigned the value `sg`:fval:. + +Feature structures in NLTK are ... Atomic feature values can be strings or +integers. + + >>> fs1 = nltk.FeatStruct(TENSE='past', NUM='sg') + >>> print(fs1) + [ NUM = 'sg' ] + [ TENSE = 'past' ] + +We can think of a feature structure as being like a Python dictionary, +and access its values by indexing in the usual way. + + >>> fs1 = nltk.FeatStruct(PER=3, NUM='pl', GND='fem') + >>> print(fs1['GND']) + fem + +We can also define feature structures which have complex values, as +discussed earlier. + + >>> fs2 = nltk.FeatStruct(POS='N', AGR=fs1) + >>> print(fs2) + [ [ GND = 'fem' ] ] + [ AGR = [ NUM = 'pl' ] ] + [ [ PER = 3 ] ] + [ ] + [ POS = 'N' ] + >>> print(fs2['AGR']) + [ GND = 'fem' ] + [ NUM = 'pl' ] + [ PER = 3 ] + >>> print(fs2['AGR']['PER']) + 3 + +Feature structures can also be constructed using the ``parse()`` +method of the ``nltk.FeatStruct`` class. Note that in this case, atomic +feature values do not need to be enclosed in quotes. + + >>> f1 = nltk.FeatStruct("[NUMBER = sg]") + >>> f2 = nltk.FeatStruct("[PERSON = 3]") + >>> print(nltk.unify(f1, f2)) + [ NUMBER = 'sg' ] + [ PERSON = 3 ] + + >>> f1 = nltk.FeatStruct("[A = [B = b, D = d]]") + >>> f2 = nltk.FeatStruct("[A = [C = c, D = d]]") + >>> print(nltk.unify(f1, f2)) + [ [ B = 'b' ] ] + [ A = [ C = 'c' ] ] + [ [ D = 'd' ] ] + + +Feature Structures as Graphs +---------------------------- + +Feature structures are not inherently tied to linguistic objects; they are +general purpose structures for representing knowledge. For example, we +could encode information about a person in a feature structure: + + >>> person01 = nltk.FeatStruct("[NAME=Lee, TELNO='01 27 86 42 96',AGE=33]") + >>> print(person01) + [ AGE = 33 ] + [ NAME = 'Lee' ] + [ TELNO = '01 27 86 42 96' ] + +There are a number of notations for representing reentrancy in +matrix-style representations of feature structures. In NLTK, we adopt +the following convention: the first occurrence of a shared feature structure +is prefixed with an integer in parentheses, such as ``(1)``, and any +subsequent reference to that structure uses the notation +``->(1)``, as shown below. + + + >>> fs = nltk.FeatStruct("""[NAME=Lee, ADDRESS=(1)[NUMBER=74, STREET='rue Pascal'], + ... SPOUSE=[NAME=Kim, ADDRESS->(1)]]""") + >>> print(fs) + [ ADDRESS = (1) [ NUMBER = 74 ] ] + [ [ STREET = 'rue Pascal' ] ] + [ ] + [ NAME = 'Lee' ] + [ ] + [ SPOUSE = [ ADDRESS -> (1) ] ] + [ [ NAME = 'Kim' ] ] + +There can be any number of tags within a single feature structure. + + >>> fs3 = nltk.FeatStruct("[A=(1)[B=b], C=(2)[], D->(1), E->(2)]") + >>> print(fs3) + [ A = (1) [ B = 'b' ] ] + [ ] + [ C = (2) [] ] + [ ] + [ D -> (1) ] + [ E -> (2) ] + >>> fs1 = nltk.FeatStruct(NUMBER=74, STREET='rue Pascal') + >>> fs2 = nltk.FeatStruct(CITY='Paris') + >>> print(nltk.unify(fs1, fs2)) + [ CITY = 'Paris' ] + [ NUMBER = 74 ] + [ STREET = 'rue Pascal' ] + +Unification is symmetric: + + >>> nltk.unify(fs1, fs2) == nltk.unify(fs2, fs1) + True + +Unification is commutative: + + >>> fs3 = nltk.FeatStruct(TELNO='01 27 86 42 96') + >>> nltk.unify(nltk.unify(fs1, fs2), fs3) == nltk.unify(fs1, nltk.unify(fs2, fs3)) + True + +Unification between *FS*:math:`_0` and *FS*:math:`_1` will fail if the +two feature structures share a path |pi|, +but the value of |pi| in *FS*:math:`_0` is a distinct +atom from the value of |pi| in *FS*:math:`_1`. In NLTK, +this is implemented by setting the result of unification to be +``None``. + + >>> fs0 = nltk.FeatStruct(A='a') + >>> fs1 = nltk.FeatStruct(A='b') + >>> print(nltk.unify(fs0, fs1)) + None + +Now, if we look at how unification interacts with structure-sharing, +things become really interesting. + + + + >>> fs0 = nltk.FeatStruct("""[NAME=Lee, + ... ADDRESS=[NUMBER=74, + ... STREET='rue Pascal'], + ... SPOUSE= [NAME=Kim, + ... ADDRESS=[NUMBER=74, + ... STREET='rue Pascal']]]""") + >>> print(fs0) + [ ADDRESS = [ NUMBER = 74 ] ] + [ [ STREET = 'rue Pascal' ] ] + [ ] + [ NAME = 'Lee' ] + [ ] + [ [ ADDRESS = [ NUMBER = 74 ] ] ] + [ SPOUSE = [ [ STREET = 'rue Pascal' ] ] ] + [ [ ] ] + [ [ NAME = 'Kim' ] ] + + + >>> fs1 = nltk.FeatStruct("[SPOUSE=[ADDRESS=[CITY=Paris]]]") + >>> print(nltk.unify(fs0, fs1)) + [ ADDRESS = [ NUMBER = 74 ] ] + [ [ STREET = 'rue Pascal' ] ] + [ ] + [ NAME = 'Lee' ] + [ ] + [ [ [ CITY = 'Paris' ] ] ] + [ [ ADDRESS = [ NUMBER = 74 ] ] ] + [ SPOUSE = [ [ STREET = 'rue Pascal' ] ] ] + [ [ ] ] + [ [ NAME = 'Kim' ] ] + + >>> fs2 = nltk.FeatStruct("""[NAME=Lee, ADDRESS=(1)[NUMBER=74, STREET='rue Pascal'], + ... SPOUSE=[NAME=Kim, ADDRESS->(1)]]""") + + + >>> print(fs2) + [ ADDRESS = (1) [ NUMBER = 74 ] ] + [ [ STREET = 'rue Pascal' ] ] + [ ] + [ NAME = 'Lee' ] + [ ] + [ SPOUSE = [ ADDRESS -> (1) ] ] + [ [ NAME = 'Kim' ] ] + + + >>> print(nltk.unify(fs2, fs1)) + [ [ CITY = 'Paris' ] ] + [ ADDRESS = (1) [ NUMBER = 74 ] ] + [ [ STREET = 'rue Pascal' ] ] + [ ] + [ NAME = 'Lee' ] + [ ] + [ SPOUSE = [ ADDRESS -> (1) ] ] + [ [ NAME = 'Kim' ] ] + + + >>> fs1 = nltk.FeatStruct("[ADDRESS1=[NUMBER=74, STREET='rue Pascal']]") + >>> fs2 = nltk.FeatStruct("[ADDRESS1=?x, ADDRESS2=?x]") + >>> print(fs2) + [ ADDRESS1 = ?x ] + [ ADDRESS2 = ?x ] + >>> print(nltk.unify(fs1, fs2)) + [ ADDRESS1 = (1) [ NUMBER = 74 ] ] + [ [ STREET = 'rue Pascal' ] ] + [ ] + [ ADDRESS2 -> (1) ] + + + + + >>> sent = 'who do you claim that you like' + >>> tokens = sent.split() + >>> cp = parse.load_parser('grammars/book_grammars/feat1.fcfg', trace=1) + >>> trees = cp.parse(tokens) + |.w.d.y.c.t.y.l.| + |[-] . . . . . .| [0:1] 'who' + |. [-] . . . . .| [1:2] 'do' + |. . [-] . . . .| [2:3] 'you' + |. . . [-] . . .| [3:4] 'claim' + |. . . . [-] . .| [4:5] 'that' + |. . . . . [-] .| [5:6] 'you' + |. . . . . . [-]| [6:7] 'like' + |# . . . . . . .| [0:0] NP[]/NP[] -> * + |. # . . . . . .| [1:1] NP[]/NP[] -> * + |. . # . . . . .| [2:2] NP[]/NP[] -> * + |. . . # . . . .| [3:3] NP[]/NP[] -> * + |. . . . # . . .| [4:4] NP[]/NP[] -> * + |. . . . . # . .| [5:5] NP[]/NP[] -> * + |. . . . . . # .| [6:6] NP[]/NP[] -> * + |. . . . . . . #| [7:7] NP[]/NP[] -> * + |[-] . . . . . .| [0:1] NP[+WH] -> 'who' * + |[-> . . . . . .| [0:1] S[-INV] -> NP[] * VP[] {} + |[-> . . . . . .| [0:1] S[-INV]/?x[] -> NP[] * VP[]/?x[] {} + |[-> . . . . . .| [0:1] S[-INV] -> NP[] * S[]/NP[] {} + |. [-] . . . . .| [1:2] V[+AUX] -> 'do' * + |. [-> . . . . .| [1:2] S[+INV] -> V[+AUX] * NP[] VP[] {} + |. [-> . . . . .| [1:2] S[+INV]/?x[] -> V[+AUX] * NP[] VP[]/?x[] {} + |. [-> . . . . .| [1:2] VP[] -> V[+AUX] * VP[] {} + |. [-> . . . . .| [1:2] VP[]/?x[] -> V[+AUX] * VP[]/?x[] {} + |. . [-] . . . .| [2:3] NP[-WH] -> 'you' * + |. . [-> . . . .| [2:3] S[-INV] -> NP[] * VP[] {} + |. . [-> . . . .| [2:3] S[-INV]/?x[] -> NP[] * VP[]/?x[] {} + |. . [-> . . . .| [2:3] S[-INV] -> NP[] * S[]/NP[] {} + |. [---> . . . .| [1:3] S[+INV] -> V[+AUX] NP[] * VP[] {} + |. [---> . . . .| [1:3] S[+INV]/?x[] -> V[+AUX] NP[] * VP[]/?x[] {} + |. . . [-] . . .| [3:4] V[-AUX, SUBCAT='clause'] -> 'claim' * + |. . . [-> . . .| [3:4] VP[] -> V[-AUX, SUBCAT='clause'] * SBar[] {} + |. . . [-> . . .| [3:4] VP[]/?x[] -> V[-AUX, SUBCAT='clause'] * SBar[]/?x[] {} + |. . . . [-] . .| [4:5] Comp[] -> 'that' * + |. . . . [-> . .| [4:5] SBar[] -> Comp[] * S[-INV] {} + |. . . . [-> . .| [4:5] SBar[]/?x[] -> Comp[] * S[-INV]/?x[] {} + |. . . . . [-] .| [5:6] NP[-WH] -> 'you' * + |. . . . . [-> .| [5:6] S[-INV] -> NP[] * VP[] {} + |. . . . . [-> .| [5:6] S[-INV]/?x[] -> NP[] * VP[]/?x[] {} + |. . . . . [-> .| [5:6] S[-INV] -> NP[] * S[]/NP[] {} + |. . . . . . [-]| [6:7] V[-AUX, SUBCAT='trans'] -> 'like' * + |. . . . . . [->| [6:7] VP[] -> V[-AUX, SUBCAT='trans'] * NP[] {} + |. . . . . . [->| [6:7] VP[]/?x[] -> V[-AUX, SUBCAT='trans'] * NP[]/?x[] {} + |. . . . . . [-]| [6:7] VP[]/NP[] -> V[-AUX, SUBCAT='trans'] NP[]/NP[] * + |. . . . . [---]| [5:7] S[-INV]/NP[] -> NP[] VP[]/NP[] * + |. . . . [-----]| [4:7] SBar[]/NP[] -> Comp[] S[-INV]/NP[] * + |. . . [-------]| [3:7] VP[]/NP[] -> V[-AUX, SUBCAT='clause'] SBar[]/NP[] * + |. . [---------]| [2:7] S[-INV]/NP[] -> NP[] VP[]/NP[] * + |. [-----------]| [1:7] S[+INV]/NP[] -> V[+AUX] NP[] VP[]/NP[] * + |[=============]| [0:7] S[-INV] -> NP[] S[]/NP[] * + + >>> trees = list(trees) + >>> for tree in trees: print(tree) + (S[-INV] + (NP[+WH] who) + (S[+INV]/NP[] + (V[+AUX] do) + (NP[-WH] you) + (VP[]/NP[] + (V[-AUX, SUBCAT='clause'] claim) + (SBar[]/NP[] + (Comp[] that) + (S[-INV]/NP[] + (NP[-WH] you) + (VP[]/NP[] (V[-AUX, SUBCAT='trans'] like) (NP[]/NP[] ))))))) + +A different parser should give the same parse trees, but perhaps in a different order: + + >>> cp2 = parse.load_parser('grammars/book_grammars/feat1.fcfg', trace=1, + ... parser=parse.FeatureEarleyChartParser) + >>> trees2 = cp2.parse(tokens) + |.w.d.y.c.t.y.l.| + |[-] . . . . . .| [0:1] 'who' + |. [-] . . . . .| [1:2] 'do' + |. . [-] . . . .| [2:3] 'you' + |. . . [-] . . .| [3:4] 'claim' + |. . . . [-] . .| [4:5] 'that' + |. . . . . [-] .| [5:6] 'you' + |. . . . . . [-]| [6:7] 'like' + |> . . . . . . .| [0:0] S[-INV] -> * NP[] VP[] {} + |> . . . . . . .| [0:0] S[-INV]/?x[] -> * NP[] VP[]/?x[] {} + |> . . . . . . .| [0:0] S[-INV] -> * NP[] S[]/NP[] {} + |> . . . . . . .| [0:0] S[-INV] -> * Adv[+NEG] S[+INV] {} + |> . . . . . . .| [0:0] S[+INV] -> * V[+AUX] NP[] VP[] {} + |> . . . . . . .| [0:0] S[+INV]/?x[] -> * V[+AUX] NP[] VP[]/?x[] {} + |> . . . . . . .| [0:0] NP[+WH] -> * 'who' {} + |[-] . . . . . .| [0:1] NP[+WH] -> 'who' * + |[-> . . . . . .| [0:1] S[-INV] -> NP[] * VP[] {} + |[-> . . . . . .| [0:1] S[-INV]/?x[] -> NP[] * VP[]/?x[] {} + |[-> . . . . . .| [0:1] S[-INV] -> NP[] * S[]/NP[] {} + |. > . . . . . .| [1:1] S[-INV]/?x[] -> * NP[] VP[]/?x[] {} + |. > . . . . . .| [1:1] S[+INV]/?x[] -> * V[+AUX] NP[] VP[]/?x[] {} + |. > . . . . . .| [1:1] V[+AUX] -> * 'do' {} + |. > . . . . . .| [1:1] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {} + |. > . . . . . .| [1:1] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {} + |. > . . . . . .| [1:1] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {} + |. > . . . . . .| [1:1] VP[] -> * V[-AUX, SUBCAT='intrans'] {} + |. > . . . . . .| [1:1] VP[] -> * V[-AUX, SUBCAT='trans'] NP[] {} + |. > . . . . . .| [1:1] VP[] -> * V[-AUX, SUBCAT='clause'] SBar[] {} + |. > . . . . . .| [1:1] VP[] -> * V[+AUX] VP[] {} + |. [-] . . . . .| [1:2] V[+AUX] -> 'do' * + |. [-> . . . . .| [1:2] S[+INV]/?x[] -> V[+AUX] * NP[] VP[]/?x[] {} + |. [-> . . . . .| [1:2] VP[]/?x[] -> V[+AUX] * VP[]/?x[] {} + |. [-> . . . . .| [1:2] VP[] -> V[+AUX] * VP[] {} + |. . > . . . . .| [2:2] VP[] -> * V[-AUX, SUBCAT='intrans'] {} + |. . > . . . . .| [2:2] VP[] -> * V[-AUX, SUBCAT='trans'] NP[] {} + |. . > . . . . .| [2:2] VP[] -> * V[-AUX, SUBCAT='clause'] SBar[] {} + |. . > . . . . .| [2:2] VP[] -> * V[+AUX] VP[] {} + |. . > . . . . .| [2:2] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {} + |. . > . . . . .| [2:2] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {} + |. . > . . . . .| [2:2] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {} + |. . > . . . . .| [2:2] NP[-WH] -> * 'you' {} + |. . [-] . . . .| [2:3] NP[-WH] -> 'you' * + |. [---> . . . .| [1:3] S[+INV]/?x[] -> V[+AUX] NP[] * VP[]/?x[] {} + |. . . > . . . .| [3:3] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {} + |. . . > . . . .| [3:3] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {} + |. . . > . . . .| [3:3] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {} + |. . . > . . . .| [3:3] V[-AUX, SUBCAT='clause'] -> * 'claim' {} + |. . . [-] . . .| [3:4] V[-AUX, SUBCAT='clause'] -> 'claim' * + |. . . [-> . . .| [3:4] VP[]/?x[] -> V[-AUX, SUBCAT='clause'] * SBar[]/?x[] {} + |. . . . > . . .| [4:4] SBar[]/?x[] -> * Comp[] S[-INV]/?x[] {} + |. . . . > . . .| [4:4] Comp[] -> * 'that' {} + |. . . . [-] . .| [4:5] Comp[] -> 'that' * + |. . . . [-> . .| [4:5] SBar[]/?x[] -> Comp[] * S[-INV]/?x[] {} + |. . . . . > . .| [5:5] S[-INV]/?x[] -> * NP[] VP[]/?x[] {} + |. . . . . > . .| [5:5] NP[-WH] -> * 'you' {} + |. . . . . [-] .| [5:6] NP[-WH] -> 'you' * + |. . . . . [-> .| [5:6] S[-INV]/?x[] -> NP[] * VP[]/?x[] {} + |. . . . . . > .| [6:6] VP[]/?x[] -> * V[-AUX, SUBCAT='trans'] NP[]/?x[] {} + |. . . . . . > .| [6:6] VP[]/?x[] -> * V[-AUX, SUBCAT='clause'] SBar[]/?x[] {} + |. . . . . . > .| [6:6] VP[]/?x[] -> * V[+AUX] VP[]/?x[] {} + |. . . . . . > .| [6:6] V[-AUX, SUBCAT='trans'] -> * 'like' {} + |. . . . . . [-]| [6:7] V[-AUX, SUBCAT='trans'] -> 'like' * + |. . . . . . [->| [6:7] VP[]/?x[] -> V[-AUX, SUBCAT='trans'] * NP[]/?x[] {} + |. . . . . . . #| [7:7] NP[]/NP[] -> * + |. . . . . . [-]| [6:7] VP[]/NP[] -> V[-AUX, SUBCAT='trans'] NP[]/NP[] * + |. . . . . [---]| [5:7] S[-INV]/NP[] -> NP[] VP[]/NP[] * + |. . . . [-----]| [4:7] SBar[]/NP[] -> Comp[] S[-INV]/NP[] * + |. . . [-------]| [3:7] VP[]/NP[] -> V[-AUX, SUBCAT='clause'] SBar[]/NP[] * + |. [-----------]| [1:7] S[+INV]/NP[] -> V[+AUX] NP[] VP[]/NP[] * + |[=============]| [0:7] S[-INV] -> NP[] S[]/NP[] * + + >>> sorted(trees) == sorted(trees2) + True + + +Let's load a German grammar: + + >>> cp = parse.load_parser('grammars/book_grammars/german.fcfg', trace=0) + >>> sent = 'die Katze sieht den Hund' + >>> tokens = sent.split() + >>> trees = cp.parse(tokens) + >>> for tree in trees: print(tree) + (S[] + (NP[AGR=[GND='fem', NUM='sg', PER=3], CASE='nom'] + (Det[AGR=[GND='fem', NUM='sg', PER=3], CASE='nom'] die) + (N[AGR=[GND='fem', NUM='sg', PER=3]] Katze)) + (VP[AGR=[NUM='sg', PER=3]] + (TV[AGR=[NUM='sg', PER=3], OBJCASE='acc'] sieht) + (NP[AGR=[GND='masc', NUM='sg', PER=3], CASE='acc'] + (Det[AGR=[GND='masc', NUM='sg', PER=3], CASE='acc'] den) + (N[AGR=[GND='masc', NUM='sg', PER=3]] Hund)))) + +Grammar with Binding Operators +------------------------------ +The bindop.fcfg grammar is a semantic grammar that uses lambda +calculus. Each element has a core semantics, which is a single lambda +calculus expression; and a set of binding operators, which bind +variables. + +In order to make the binding operators work right, they need to +instantiate their bound variable every time they are added to the +chart. To do this, we use a special subclass of `Chart`, called +`InstantiateVarsChart`. + + >>> from nltk.parse.featurechart import InstantiateVarsChart + >>> cp = parse.load_parser('grammars/sample_grammars/bindop.fcfg', trace=1, + ... chart_class=InstantiateVarsChart) + >>> print(cp.grammar()) + Grammar with 15 productions (start state = S[]) + S[SEM=[BO={?b1+?b2}, CORE=]] -> NP[SEM=[BO=?b1, CORE=?subj]] VP[SEM=[BO=?b2, CORE=?vp]] + VP[SEM=[BO={?b1+?b2}, CORE=]] -> TV[SEM=[BO=?b1, CORE=?v]] NP[SEM=[BO=?b2, CORE=?obj]] + VP[SEM=?s] -> IV[SEM=?s] + NP[SEM=[BO={?b1+?b2+{bo(?det(?n),@x)}}, CORE=<@x>]] -> Det[SEM=[BO=?b1, CORE=?det]] N[SEM=[BO=?b2, CORE=?n]] + Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] -> 'a' + N[SEM=[BO={/}, CORE=]] -> 'dog' + N[SEM=[BO={/}, CORE=]] -> 'cat' + N[SEM=[BO={/}, CORE=]] -> 'mouse' + IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'barks' + IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'eats' + IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'walks' + TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] -> 'feeds' + TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] -> 'walks' + NP[SEM=[BO={bo(\P.P(John),@x)}, CORE=<@x>]] -> 'john' + NP[SEM=[BO={bo(\P.P(John),@x)}, CORE=<@x>]] -> 'alex' + +A simple intransitive sentence: + + >>> from nltk.sem import logic + >>> logic._counter._value = 100 + + >>> trees = cp.parse('john barks'.split()) + |. john.barks.| + |[-----] .| [0:1] 'john' + |. [-----]| [1:2] 'barks' + |[-----] .| [0:1] NP[SEM=[BO={bo(\P.P(John),z101)}, CORE=]] -> 'john' * + |[-----> .| [0:1] S[SEM=[BO={?b1+?b2}, CORE=]] -> NP[SEM=[BO=?b1, CORE=?subj]] * VP[SEM=[BO=?b2, CORE=?vp]] {?b1: {bo(\P.P(John),z2)}, ?subj: } + |. [-----]| [1:2] IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> 'barks' * + |. [-----]| [1:2] VP[SEM=[BO={/}, CORE=<\x.bark(x)>]] -> IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] * + |[===========]| [0:2] S[SEM=[BO={bo(\P.P(John),z2)}, CORE=]] -> NP[SEM=[BO={bo(\P.P(John),z2)}, CORE=]] VP[SEM=[BO={/}, CORE=<\x.bark(x)>]] * + >>> for tree in trees: print(tree) + (S[SEM=[BO={bo(\P.P(John),z2)}, CORE=]] + (NP[SEM=[BO={bo(\P.P(John),z101)}, CORE=]] john) + (VP[SEM=[BO={/}, CORE=<\x.bark(x)>]] + (IV[SEM=[BO={/}, CORE=<\x.bark(x)>]] barks))) + +A transitive sentence: + + >>> trees = cp.parse('john feeds a dog'.split()) + |.joh.fee. a .dog.| + |[---] . . .| [0:1] 'john' + |. [---] . .| [1:2] 'feeds' + |. . [---] .| [2:3] 'a' + |. . . [---]| [3:4] 'dog' + |[---] . . .| [0:1] NP[SEM=[BO={bo(\P.P(John),z102)}, CORE=]] -> 'john' * + |[---> . . .| [0:1] S[SEM=[BO={?b1+?b2}, CORE=]] -> NP[SEM=[BO=?b1, CORE=?subj]] * VP[SEM=[BO=?b2, CORE=?vp]] {?b1: {bo(\P.P(John),z2)}, ?subj: } + |. [---] . .| [1:2] TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] -> 'feeds' * + |. [---> . .| [1:2] VP[SEM=[BO={?b1+?b2}, CORE=]] -> TV[SEM=[BO=?b1, CORE=?v]] * NP[SEM=[BO=?b2, CORE=?obj]] {?b1: {/}, ?v: } + |. . [---] .| [2:3] Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] -> 'a' * + |. . [---> .| [2:3] NP[SEM=[BO={?b1+?b2+{bo(?det(?n),@x)}}, CORE=<@x>]] -> Det[SEM=[BO=?b1, CORE=?det]] * N[SEM=[BO=?b2, CORE=?n]] {?b1: {/}, ?det: } + |. . . [---]| [3:4] N[SEM=[BO={/}, CORE=]] -> 'dog' * + |. . [-------]| [2:4] NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z103)}, CORE=]] -> Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] N[SEM=[BO={/}, CORE=]] * + |. . [------->| [2:4] S[SEM=[BO={?b1+?b2}, CORE=]] -> NP[SEM=[BO=?b1, CORE=?subj]] * VP[SEM=[BO=?b2, CORE=?vp]] {?b1: {bo(\P.exists x.(dog(x) & P(x)),z2)}, ?subj: } + |. [-----------]| [1:4] VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=<\y.feed(y,z2)>]] -> TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=]] * + |[===============]| [0:4] S[SEM=[BO={bo(\P.P(John),z2), bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=]] -> NP[SEM=[BO={bo(\P.P(John),z2)}, CORE=]] VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=<\y.feed(y,z3)>]] * + + >>> for tree in trees: print(tree) + (S[SEM=[BO={bo(\P.P(John),z2), bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=]] + (NP[SEM=[BO={bo(\P.P(John),z102)}, CORE=]] john) + (VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=<\y.feed(y,z2)>]] + (TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] feeds) + (NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z103)}, CORE=]] + (Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] a) + (N[SEM=[BO={/}, CORE=]] dog)))) + +Turn down the verbosity: + + >>> cp = parse.load_parser('grammars/sample_grammars/bindop.fcfg', trace=0, + ... chart_class=InstantiateVarsChart) + +Reuse the same lexical item twice: + + >>> trees = cp.parse('john feeds john'.split()) + >>> for tree in trees: print(tree) + (S[SEM=[BO={bo(\P.P(John),z2), bo(\P.P(John),z3)}, CORE=]] + (NP[SEM=[BO={bo(\P.P(John),z104)}, CORE=]] john) + (VP[SEM=[BO={bo(\P.P(John),z2)}, CORE=<\y.feed(y,z2)>]] + (TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] feeds) + (NP[SEM=[BO={bo(\P.P(John),z105)}, CORE=]] john))) + + >>> trees = cp.parse('a dog feeds a dog'.split()) + >>> for tree in trees: print(tree) + (S[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2), bo(\P.exists x.(dog(x) & P(x)),z3)}, CORE=]] + (NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z106)}, CORE=]] + (Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] a) + (N[SEM=[BO={/}, CORE=]] dog)) + (VP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z2)}, CORE=<\y.feed(y,z2)>]] + (TV[SEM=[BO={/}, CORE=<\x y.feed(y,x)>]] feeds) + (NP[SEM=[BO={bo(\P.exists x.(dog(x) & P(x)),z107)}, CORE=]] + (Det[SEM=[BO={/}, CORE=<\Q P.exists x.(Q(x) & P(x))>]] a) + (N[SEM=[BO={/}, CORE=]] dog)))) diff --git a/lib/python3.10/site-packages/nltk/test/featstruct.doctest b/lib/python3.10/site-packages/nltk/test/featstruct.doctest new file mode 100644 index 0000000000000000000000000000000000000000..e6062d4fb31a9894d7cae48d9d834e78e5e37a6a --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/featstruct.doctest @@ -0,0 +1,1229 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +================================== + Feature Structures & Unification +================================== + >>> from nltk.featstruct import FeatStruct + >>> from nltk.sem.logic import Variable, VariableExpression, Expression + +.. note:: For now, featstruct uses the older lambdalogic semantics + module. Eventually, it should be updated to use the new first + order predicate logic module. + +Overview +~~~~~~~~ +A feature structure is a mapping from feature identifiers to feature +values, where feature values can be simple values (like strings or +ints), nested feature structures, or variables: + + >>> fs1 = FeatStruct(number='singular', person=3) + >>> print(fs1) + [ number = 'singular' ] + [ person = 3 ] + +Feature structure may be nested: + + >>> fs2 = FeatStruct(type='NP', agr=fs1) + >>> print(fs2) + [ agr = [ number = 'singular' ] ] + [ [ person = 3 ] ] + [ ] + [ type = 'NP' ] + +Variables are used to indicate that two features should be assigned +the same value. For example, the following feature structure requires +that the feature fs3['agr']['number'] be bound to the same value as the +feature fs3['subj']['number']. + + >>> fs3 = FeatStruct(agr=FeatStruct(number=Variable('?n')), + ... subj=FeatStruct(number=Variable('?n'))) + >>> print(fs3) + [ agr = [ number = ?n ] ] + [ ] + [ subj = [ number = ?n ] ] + +Feature structures are typically used to represent partial information +about objects. A feature name that is not mapped to a value stands +for a feature whose value is unknown (*not* a feature without a +value). Two feature structures that represent (potentially +overlapping) information about the same object can be combined by +*unification*. + + >>> print(fs2.unify(fs3)) + [ agr = [ number = 'singular' ] ] + [ [ person = 3 ] ] + [ ] + [ subj = [ number = 'singular' ] ] + [ ] + [ type = 'NP' ] + +When two inconsistent feature structures are unified, the unification +fails and returns ``None``. + + >>> fs4 = FeatStruct(agr=FeatStruct(person=1)) + >>> print(fs4.unify(fs2)) + None + >>> print(fs2.unify(fs4)) + None + +.. + >>> del fs1, fs2, fs3, fs4 # clean-up + +Feature Structure Types +----------------------- +There are actually two types of feature structure: + +- *feature dictionaries*, implemented by `FeatDict`, act like + Python dictionaries. Feature identifiers may be strings or + instances of the `Feature` class. +- *feature lists*, implemented by `FeatList`, act like Python + lists. Feature identifiers are integers. + +When you construct a feature structure using the `FeatStruct` +constructor, it will automatically decide which type is appropriate: + + >>> type(FeatStruct(number='singular')) + + >>> type(FeatStruct([1,2,3])) + + +Usually, we will just use feature dictionaries; but sometimes feature +lists can be useful too. Two feature lists will unify with each other +only if they have equal lengths, and all of their feature values +match. If you wish to write a feature list that contains 'unknown' +values, you must use variables: + + >>> fs1 = FeatStruct([1,2,Variable('?y')]) + >>> fs2 = FeatStruct([1,Variable('?x'),3]) + >>> fs1.unify(fs2) + [1, 2, 3] + +.. + >>> del fs1, fs2 # clean-up + +Parsing Feature Structure Strings +--------------------------------- +Feature structures can be constructed directly from strings. Often, +this is more convenient than constructing them directly. NLTK can +parse most feature strings to produce the corresponding feature +structures. (But you must restrict your base feature values to +strings, ints, logic expressions (`nltk.sem.logic.Expression`), and a +few other types discussed below). + +Feature dictionaries are written like Python dictionaries, except that +keys are not put in quotes; and square brackets (``[]``) are used +instead of braces (``{}``): + + >>> FeatStruct('[tense="past", agr=[number="sing", person=3]]') + [agr=[number='sing', person=3], tense='past'] + +If a feature value is a single alphanumeric word, then it does not +need to be quoted -- it will be automatically treated as a string: + + >>> FeatStruct('[tense=past, agr=[number=sing, person=3]]') + [agr=[number='sing', person=3], tense='past'] + +Feature lists are written like python lists: + + >>> FeatStruct('[1, 2, 3]') + [1, 2, 3] + +The expression ``[]`` is treated as an empty feature dictionary, not +an empty feature list: + + >>> type(FeatStruct('[]')) + + +Feature Paths +------------- +Features can be specified using *feature paths*, or tuples of feature +identifiers that specify path through the nested feature structures to +a value. + + >>> fs1 = FeatStruct('[x=1, y=[1,2,[z=3]]]') + >>> fs1['y'] + [1, 2, [z=3]] + >>> fs1['y', 2] + [z=3] + >>> fs1['y', 2, 'z'] + 3 + +.. + >>> del fs1 # clean-up + +Reentrance +---------- +Feature structures may contain reentrant feature values. A *reentrant +feature value* is a single feature structure that can be accessed via +multiple feature paths. + + >>> fs1 = FeatStruct(x='val') + >>> fs2 = FeatStruct(a=fs1, b=fs1) + >>> print(fs2) + [ a = (1) [ x = 'val' ] ] + [ ] + [ b -> (1) ] + >>> fs2 + [a=(1)[x='val'], b->(1)] + +As you can see, reentrane is displayed by marking a feature structure +with a unique identifier, in this case ``(1)``, the first time it is +encountered; and then using the special form ``var -> id`` whenever it +is encountered again. You can use the same notation to directly +create reentrant feature structures from strings. + + >>> FeatStruct('[a=(1)[], b->(1), c=[d->(1)]]') + [a=(1)[], b->(1), c=[d->(1)]] + +Reentrant feature structures may contain cycles: + + >>> fs3 = FeatStruct('(1)[a->(1)]') + >>> fs3['a', 'a', 'a', 'a'] + (1)[a->(1)] + >>> fs3['a', 'a', 'a', 'a'] is fs3 + True + +Unification preserves the reentrance relations imposed by both of the +unified feature structures. In the feature structure resulting from +unification, any modifications to a reentrant feature value will be +visible using any of its feature paths. + + >>> fs3.unify(FeatStruct('[a=[b=12], c=33]')) + (1)[a->(1), b=12, c=33] + +.. + >>> del fs1, fs2, fs3 # clean-up + +Feature Structure Equality +-------------------------- +Two feature structures are considered equal if they assign the same +values to all features, *and* they contain the same reentrances. + + >>> fs1 = FeatStruct('[a=(1)[x=1], b->(1)]') + >>> fs2 = FeatStruct('[a=(1)[x=1], b->(1)]') + >>> fs3 = FeatStruct('[a=[x=1], b=[x=1]]') + >>> fs1 == fs1, fs1 is fs1 + (True, True) + >>> fs1 == fs2, fs1 is fs2 + (True, False) + >>> fs1 == fs3, fs1 is fs3 + (False, False) + +Note that this differs from how Python dictionaries and lists define +equality -- in particular, Python dictionaries and lists ignore +reentrance relations. To test two feature structures for equality +while ignoring reentrance relations, use the `equal_values()` method: + + >>> fs1.equal_values(fs1) + True + >>> fs1.equal_values(fs2) + True + >>> fs1.equal_values(fs3) + True + +.. + >>> del fs1, fs2, fs3 # clean-up + +Feature Value Sets & Feature Value Tuples +----------------------------------------- +`nltk.featstruct` defines two new data types that are intended to be +used as feature values: `FeatureValueTuple` and `FeatureValueSet`. +Both of these types are considered base values -- i.e., unification +does *not* apply to them. However, variable binding *does* apply to +any values that they contain. + +Feature value tuples are written with parentheses: + + >>> fs1 = FeatStruct('[x=(?x, ?y)]') + >>> fs1 + [x=(?x, ?y)] + >>> fs1.substitute_bindings({Variable('?x'): 1, Variable('?y'): 2}) + [x=(1, 2)] + +Feature sets are written with braces: + + >>> fs1 = FeatStruct('[x={?x, ?y}]') + >>> fs1 + [x={?x, ?y}] + >>> fs1.substitute_bindings({Variable('?x'): 1, Variable('?y'): 2}) + [x={1, 2}] + +In addition to the basic feature value tuple & set classes, nltk +defines feature value unions (for sets) and feature value +concatenations (for tuples). These are written using '+', and can be +used to combine sets & tuples: + + >>> fs1 = FeatStruct('[x=((1, 2)+?z), z=?z]') + >>> fs1 + [x=((1, 2)+?z), z=?z] + >>> fs1.unify(FeatStruct('[z=(3, 4, 5)]')) + [x=(1, 2, 3, 4, 5), z=(3, 4, 5)] + +Thus, feature value tuples and sets can be used to build up tuples +and sets of values over the course of unification. For example, when +parsing sentences using a semantic feature grammar, feature sets or +feature tuples can be used to build a list of semantic predicates as +the sentence is parsed. + +As was mentioned above, unification does not apply to feature value +tuples and sets. One reason for this that it's impossible to define a +single correct answer for unification when concatenation is used. +Consider the following example: + + >>> fs1 = FeatStruct('[x=(1, 2, 3, 4)]') + >>> fs2 = FeatStruct('[x=(?a+?b), a=?a, b=?b]') + +If unification applied to feature tuples, then the unification +algorithm would have to arbitrarily choose how to divide the tuple +(1,2,3,4) into two parts. Instead, the unification algorithm refuses +to make this decision, and simply unifies based on value. Because +(1,2,3,4) is not equal to (?a+?b), fs1 and fs2 will not unify: + + >>> print(fs1.unify(fs2)) + None + +If you need a list-like structure that unification does apply to, use +`FeatList`. + +.. + >>> del fs1, fs2 # clean-up + +Light-weight Feature Structures +------------------------------- +Many of the functions defined by `nltk.featstruct` can be applied +directly to simple Python dictionaries and lists, rather than to +full-fledged `FeatDict` and `FeatList` objects. In other words, +Python ``dicts`` and ``lists`` can be used as "light-weight" feature +structures. + + >>> # Note: pprint prints dicts sorted + >>> from pprint import pprint + >>> from nltk.featstruct import unify + >>> pprint(unify(dict(x=1, y=dict()), dict(a='a', y=dict(b='b')))) + {'a': 'a', 'x': 1, 'y': {'b': 'b'}} + +However, you should keep in mind the following caveats: + +- Python dictionaries & lists ignore reentrance when checking for + equality between values. But two FeatStructs with different + reentrances are considered nonequal, even if all their base + values are equal. + +- FeatStructs can be easily frozen, allowing them to be used as + keys in hash tables. Python dictionaries and lists can not. + +- FeatStructs display reentrance in their string representations; + Python dictionaries and lists do not. + +- FeatStructs may *not* be mixed with Python dictionaries and lists + (e.g., when performing unification). + +- FeatStructs provide a number of useful methods, such as `walk()` + and `cyclic()`, which are not available for Python dicts & lists. + +In general, if your feature structures will contain any reentrances, +or if you plan to use them as dictionary keys, it is strongly +recommended that you use full-fledged `FeatStruct` objects. + +Custom Feature Values +--------------------- +The abstract base class `CustomFeatureValue` can be used to define new +base value types that have custom unification methods. For example, +the following feature value type encodes a range, and defines +unification as taking the intersection on the ranges: + + >>> from functools import total_ordering + >>> from nltk.featstruct import CustomFeatureValue, UnificationFailure + >>> @total_ordering + ... class Range(CustomFeatureValue): + ... def __init__(self, low, high): + ... assert low <= high + ... self.low = low + ... self.high = high + ... def unify(self, other): + ... if not isinstance(other, Range): + ... return UnificationFailure + ... low = max(self.low, other.low) + ... high = min(self.high, other.high) + ... if low <= high: return Range(low, high) + ... else: return UnificationFailure + ... def __repr__(self): + ... return '(%s>> fs1 = FeatStruct(x=Range(5,8), y=FeatStruct(z=Range(7,22))) + >>> print(fs1.unify(FeatStruct(x=Range(6, 22)))) + [ x = (6>> print(fs1.unify(FeatStruct(x=Range(9, 12)))) + None + >>> print(fs1.unify(FeatStruct(x=12))) + None + >>> print(fs1.unify(FeatStruct('[x=?x, y=[z=?x]]'))) + [ x = (7>> fs1 = FeatStruct(a=1, b=2, c=3) + >>> fs2 = FeatStruct(x=fs1, y='x') + +Feature structures support all dictionary methods (excluding the class +method `dict.fromkeys()`). Non-mutating methods: + + >>> sorted(fs2.keys()) # keys() + ['x', 'y'] + >>> sorted(fs2.values()) # values() + [[a=1, b=2, c=3], 'x'] + >>> sorted(fs2.items()) # items() + [('x', [a=1, b=2, c=3]), ('y', 'x')] + >>> sorted(fs2) # __iter__() + ['x', 'y'] + >>> 'a' in fs2, 'x' in fs2 # __contains__() + (False, True) + >>> fs2.has_key('a'), fs2.has_key('x') # has_key() + (False, True) + >>> fs2['x'], fs2['y'] # __getitem__() + ([a=1, b=2, c=3], 'x') + >>> fs2['a'] # __getitem__() + Traceback (most recent call last): + . . . + KeyError: 'a' + >>> fs2.get('x'), fs2.get('y'), fs2.get('a') # get() + ([a=1, b=2, c=3], 'x', None) + >>> fs2.get('x', 'hello'), fs2.get('a', 'hello') # get() + ([a=1, b=2, c=3], 'hello') + >>> len(fs1), len(fs2) # __len__ + (3, 2) + >>> fs2.copy() # copy() + [x=[a=1, b=2, c=3], y='x'] + >>> fs2.copy() is fs2 # copy() + False + +Note: by default, `FeatStruct.copy()` does a deep copy. Use +`FeatStruct.copy(deep=False)` for a shallow copy. + +.. + >>> del fs1, fs2 # clean-up. + +Dictionary access methods (mutating) +------------------------------------ + >>> fs1 = FeatStruct(a=1, b=2, c=3) + >>> fs2 = FeatStruct(x=fs1, y='x') + +Setting features (`__setitem__()`) + + >>> fs1['c'] = 5 + >>> fs1 + [a=1, b=2, c=5] + >>> fs1['x'] = 12 + >>> fs1 + [a=1, b=2, c=5, x=12] + >>> fs2['x', 'a'] = 2 + >>> fs2 + [x=[a=2, b=2, c=5, x=12], y='x'] + >>> fs1 + [a=2, b=2, c=5, x=12] + +Deleting features (`__delitem__()`) + + >>> del fs1['x'] + >>> fs1 + [a=2, b=2, c=5] + >>> del fs2['x', 'a'] + >>> fs1 + [b=2, c=5] + +`setdefault()`: + + >>> fs1.setdefault('b', 99) + 2 + >>> fs1 + [b=2, c=5] + >>> fs1.setdefault('x', 99) + 99 + >>> fs1 + [b=2, c=5, x=99] + +`update()`: + + >>> fs2.update({'a':'A', 'b':'B'}, c='C') + >>> fs2 + [a='A', b='B', c='C', x=[b=2, c=5, x=99], y='x'] + +`pop()`: + + >>> fs2.pop('a') + 'A' + >>> fs2 + [b='B', c='C', x=[b=2, c=5, x=99], y='x'] + >>> fs2.pop('a') + Traceback (most recent call last): + . . . + KeyError: 'a' + >>> fs2.pop('a', 'foo') + 'foo' + >>> fs2 + [b='B', c='C', x=[b=2, c=5, x=99], y='x'] + +`clear()`: + + >>> fs1.clear() + >>> fs1 + [] + >>> fs2 + [b='B', c='C', x=[], y='x'] + +`popitem()`: + + >>> sorted([fs2.popitem() for i in range(len(fs2))]) + [('b', 'B'), ('c', 'C'), ('x', []), ('y', 'x')] + >>> fs2 + [] + +Once a feature structure has been frozen, it may not be mutated. + + >>> fs1 = FeatStruct('[x=1, y=2, z=[a=3]]') + >>> fs1.freeze() + >>> fs1.frozen() + True + >>> fs1['z'].frozen() + True + + >>> fs1['x'] = 5 + Traceback (most recent call last): + . . . + ValueError: Frozen FeatStructs may not be modified. + >>> del fs1['x'] + Traceback (most recent call last): + . . . + ValueError: Frozen FeatStructs may not be modified. + >>> fs1.clear() + Traceback (most recent call last): + . . . + ValueError: Frozen FeatStructs may not be modified. + >>> fs1.pop('x') + Traceback (most recent call last): + . . . + ValueError: Frozen FeatStructs may not be modified. + >>> fs1.popitem() + Traceback (most recent call last): + . . . + ValueError: Frozen FeatStructs may not be modified. + >>> fs1.setdefault('x') + Traceback (most recent call last): + . . . + ValueError: Frozen FeatStructs may not be modified. + >>> fs1.update(z=22) + Traceback (most recent call last): + . . . + ValueError: Frozen FeatStructs may not be modified. + +.. + >>> del fs1, fs2 # clean-up. + +Feature Paths +------------- +Make sure that __getitem__ with feature paths works as intended: + + >>> fs1 = FeatStruct(a=1, b=2, + ... c=FeatStruct( + ... d=FeatStruct(e=12), + ... f=FeatStruct(g=55, h='hello'))) + >>> fs1[()] + [a=1, b=2, c=[d=[e=12], f=[g=55, h='hello']]] + >>> fs1['a'], fs1[('a',)] + (1, 1) + >>> fs1['c','d','e'] + 12 + >>> fs1['c','f','g'] + 55 + +Feature paths that select unknown features raise KeyError: + + >>> fs1['c', 'f', 'e'] + Traceback (most recent call last): + . . . + KeyError: ('c', 'f', 'e') + >>> fs1['q', 'p'] + Traceback (most recent call last): + . . . + KeyError: ('q', 'p') + +Feature paths that try to go 'through' a feature that's not a feature +structure raise KeyError: + + >>> fs1['a', 'b'] + Traceback (most recent call last): + . . . + KeyError: ('a', 'b') + +Feature paths can go through reentrant structures: + + >>> fs2 = FeatStruct('(1)[a=[b=[c->(1), d=5], e=11]]') + >>> fs2['a', 'b', 'c', 'a', 'e'] + 11 + >>> fs2['a', 'b', 'c', 'a', 'b', 'd'] + 5 + >>> fs2[tuple('abcabcabcabcabcabcabcabcabcabca')] + (1)[b=[c=[a->(1)], d=5], e=11] + +Indexing requires strings, `Feature`\s, or tuples; other types raise a +TypeError: + + >>> fs2[12] + Traceback (most recent call last): + . . . + TypeError: Expected feature name or path. Got 12. + >>> fs2[list('abc')] + Traceback (most recent call last): + . . . + TypeError: Expected feature name or path. Got ['a', 'b', 'c']. + +Feature paths can also be used with `get()`, `has_key()`, and +`__contains__()`. + + >>> fpath1 = tuple('abcabc') + >>> fpath2 = tuple('abcabz') + >>> fs2.get(fpath1), fs2.get(fpath2) + ((1)[a=[b=[c->(1), d=5], e=11]], None) + >>> fpath1 in fs2, fpath2 in fs2 + (True, False) + >>> fs2.has_key(fpath1), fs2.has_key(fpath2) + (True, False) + +.. + >>> del fs1, fs2 # clean-up + +Reading Feature Structures +-------------------------- + +Empty feature struct: + + >>> FeatStruct('[]') + [] + +Test features with integer values: + + >>> FeatStruct('[a=12, b=-33, c=0]') + [a=12, b=-33, c=0] + +Test features with string values. Either single or double quotes may +be used. Strings are evaluated just like python strings -- in +particular, you can use escape sequences and 'u' and 'r' prefixes, and +triple-quoted strings. + + >>> FeatStruct('[a="", b="hello", c="\'", d=\'\', e=\'"\']') + [a='', b='hello', c="'", d='', e='"'] + >>> FeatStruct(r'[a="\\", b="\"", c="\x6f\\y", d="12"]') + [a='\\', b='"', c='o\\y', d='12'] + >>> FeatStruct(r'[b=r"a\b\c"]') + [b='a\\b\\c'] + >>> FeatStruct('[x="""a"""]') + [x='a'] + +Test parsing of reentrant feature structures. + + >>> FeatStruct('[a=(1)[], b->(1)]') + [a=(1)[], b->(1)] + >>> FeatStruct('[a=(1)[x=1, y=2], b->(1)]') + [a=(1)[x=1, y=2], b->(1)] + +Test parsing of cyclic feature structures. + + >>> FeatStruct('[a=(1)[b->(1)]]') + [a=(1)[b->(1)]] + >>> FeatStruct('(1)[a=[b=[c->(1)]]]') + (1)[a=[b=[c->(1)]]] + +Strings of the form "+name" and "-name" may be used to specify boolean +values. + + >>> FeatStruct('[-bar, +baz, +foo]') + [-bar, +baz, +foo] + +None, True, and False are recognized as values: + + >>> FeatStruct('[bar=True, baz=False, foo=None]') + [+bar, -baz, foo=None] + +Special features: + + >>> FeatStruct('NP/VP') + NP[]/VP[] + >>> FeatStruct('?x/?x') + ?x[]/?x[] + >>> print(FeatStruct('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]')) + [ *type* = 'VP' ] + [ ] + [ [ *type* = 'NP' ] ] + [ *slash* = [ agr = ?x ] ] + [ [ pl = True ] ] + [ ] + [ agr = ?x ] + [ fin = True ] + [ tense = 'past' ] + +Here the slash feature gets coerced: + + >>> FeatStruct('[*slash*=a, x=b, *type*="NP"]') + NP[x='b']/a[] + + >>> FeatStruct('NP[sem=]/NP') + NP[sem=]/NP[] + >>> FeatStruct('S[sem=]') + S[sem=] + >>> print(FeatStruct('NP[sem=]/NP')) + [ *type* = 'NP' ] + [ ] + [ *slash* = [ *type* = 'NP' ] ] + [ ] + [ sem = ] + +Playing with ranges: + + >>> from nltk.featstruct import RangeFeature, FeatStructReader + >>> width = RangeFeature('width') + >>> reader = FeatStructReader([width]) + >>> fs1 = reader.fromstring('[*width*=-5:12]') + >>> fs2 = reader.fromstring('[*width*=2:123]') + >>> fs3 = reader.fromstring('[*width*=-7:-2]') + >>> fs1.unify(fs2) + [*width*=(2, 12)] + >>> fs1.unify(fs3) + [*width*=(-5, -2)] + >>> print(fs2.unify(fs3)) # no overlap in width. + None + +The slash feature has a default value of 'False': + + >>> print(FeatStruct('NP[]/VP').unify(FeatStruct('NP[]'), trace=1)) + + Unification trace: + / NP[]/VP[] + |\ NP[] + | + | Unify feature: *type* + | / 'NP' + | |\ 'NP' + | | + | +-->'NP' + | + | Unify feature: *slash* + | / VP[] + | |\ False + | | + X X <-- FAIL + None + +The demo structures from category.py. They all parse, but they don't +do quite the right thing, -- ?x vs x. + + >>> FeatStruct(pos='n', agr=FeatStruct(number='pl', gender='f')) + [agr=[gender='f', number='pl'], pos='n'] + >>> FeatStruct(r'NP[sem=]/NP') + NP[sem=]/NP[] + >>> FeatStruct(r'S[sem=]') + S[sem=] + >>> FeatStruct('?x/?x') + ?x[]/?x[] + >>> FeatStruct('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]') + VP[agr=?x, +fin, tense='past']/NP[agr=?x, +pl] + >>> FeatStruct('S[sem = ]') + S[sem=] + + >>> FeatStruct('S') + S[] + +The parser also includes support for reading sets and tuples. + + >>> FeatStruct('[x={1,2,2,2}, y={/}]') + [x={1, 2}, y={/}] + >>> FeatStruct('[x=(1,2,2,2), y=()]') + [x=(1, 2, 2, 2), y=()] + >>> print(FeatStruct('[x=(1,[z=(1,2,?x)],?z,{/})]')) + [ x = (1, [ z = (1, 2, ?x) ], ?z, {/}) ] + +Note that we can't put a featstruct inside a tuple, because doing so +would hash it, and it's not frozen yet: + + >>> print(FeatStruct('[x={[]}]')) + Traceback (most recent call last): + . . . + TypeError: FeatStructs must be frozen before they can be hashed. + +There's a special syntax for taking the union of sets: "{...+...}". +The elements should only be variables or sets. + + >>> FeatStruct('[x={?a+?b+{1,2,3}}]') + [x={?a+?b+{1, 2, 3}}] + +There's a special syntax for taking the concatenation of tuples: +"(...+...)". The elements should only be variables or tuples. + + >>> FeatStruct('[x=(?a+?b+(1,2,3))]') + [x=(?a+?b+(1, 2, 3))] + +Parsing gives helpful messages if your string contains an error. + + >>> FeatStruct('[a=, b=5]]') + Traceback (most recent call last): + . . . + ValueError: Error parsing feature structure + [a=, b=5]] + ^ Expected value + >>> FeatStruct('[a=12 22, b=33]') + Traceback (most recent call last): + . . . + ValueError: Error parsing feature structure + [a=12 22, b=33] + ^ Expected comma + >>> FeatStruct('[a=5] [b=6]') + Traceback (most recent call last): + . . . + ValueError: Error parsing feature structure + [a=5] [b=6] + ^ Expected end of string + >>> FeatStruct(' *++*') + Traceback (most recent call last): + . . . + ValueError: Error parsing feature structure + *++* + ^ Expected open bracket or identifier + >>> FeatStruct('[x->(1)]') + Traceback (most recent call last): + . . . + ValueError: Error parsing feature structure + [x->(1)] + ^ Expected bound identifier + >>> FeatStruct('[x->y]') + Traceback (most recent call last): + . . . + ValueError: Error parsing feature structure + [x->y] + ^ Expected identifier + >>> FeatStruct('') + Traceback (most recent call last): + . . . + ValueError: Error parsing feature structure + + ^ Expected open bracket or identifier + + +Unification +----------- +Very simple unifications give the expected results: + + >>> FeatStruct().unify(FeatStruct()) + [] + >>> FeatStruct(number='singular').unify(FeatStruct()) + [number='singular'] + >>> FeatStruct().unify(FeatStruct(number='singular')) + [number='singular'] + >>> FeatStruct(number='singular').unify(FeatStruct(person=3)) + [number='singular', person=3] + +Merging nested structures: + + >>> fs1 = FeatStruct('[A=[B=b]]') + >>> fs2 = FeatStruct('[A=[C=c]]') + >>> fs1.unify(fs2) + [A=[B='b', C='c']] + >>> fs2.unify(fs1) + [A=[B='b', C='c']] + +A basic case of reentrant unification + + >>> fs4 = FeatStruct('[A=(1)[B=b], E=[F->(1)]]') + >>> fs5 = FeatStruct("[A=[C='c'], E=[F=[D='d']]]") + >>> fs4.unify(fs5) + [A=(1)[B='b', C='c', D='d'], E=[F->(1)]] + >>> fs5.unify(fs4) + [A=(1)[B='b', C='c', D='d'], E=[F->(1)]] + +More than 2 paths to a value + + >>> fs1 = FeatStruct("[a=[],b=[],c=[],d=[]]") + >>> fs2 = FeatStruct('[a=(1)[], b->(1), c->(1), d->(1)]') + >>> fs1.unify(fs2) + [a=(1)[], b->(1), c->(1), d->(1)] + +fs1[a] gets unified with itself + + >>> fs1 = FeatStruct('[x=(1)[], y->(1)]') + >>> fs2 = FeatStruct('[x=(1)[], y->(1)]') + >>> fs1.unify(fs2) + [x=(1)[], y->(1)] + +Bound variables should get forwarded appropriately + + >>> fs1 = FeatStruct('[A=(1)[X=x], B->(1), C=?cvar, D=?dvar]') + >>> fs2 = FeatStruct('[A=(1)[Y=y], B=(2)[Z=z], C->(1), D->(2)]') + >>> fs1.unify(fs2) + [A=(1)[X='x', Y='y', Z='z'], B->(1), C->(1), D->(1)] + >>> fs2.unify(fs1) + [A=(1)[X='x', Y='y', Z='z'], B->(1), C->(1), D->(1)] + +Cyclic structure created by unification. + + >>> fs1 = FeatStruct('[F=(1)[], G->(1)]') + >>> fs2 = FeatStruct('[F=[H=(2)[]], G->(2)]') + >>> fs3 = fs1.unify(fs2) + >>> fs3 + [F=(1)[H->(1)], G->(1)] + >>> fs3['F'] is fs3['G'] + True + >>> fs3['F'] is fs3['G']['H'] + True + >>> fs3['F'] is fs3['G']['H']['H'] + True + >>> fs3['F'] is fs3['F']['H']['H']['H']['H']['H']['H']['H']['H'] + True + +Cyclic structure created w/ variables. + + >>> fs1 = FeatStruct('[F=[H=?x]]') + >>> fs2 = FeatStruct('[F=?x]') + >>> fs3 = fs1.unify(fs2, rename_vars=False) + >>> fs3 + [F=(1)[H->(1)]] + >>> fs3['F'] is fs3['F']['H'] + True + >>> fs3['F'] is fs3['F']['H']['H'] + True + >>> fs3['F'] is fs3['F']['H']['H']['H']['H']['H']['H']['H']['H'] + True + +Unifying w/ a cyclic feature structure. + + >>> fs4 = FeatStruct('[F=[H=[H=[H=(1)[]]]], K->(1)]') + >>> fs3.unify(fs4) + [F=(1)[H->(1)], K->(1)] + >>> fs4.unify(fs3) + [F=(1)[H->(1)], K->(1)] + +Variable bindings should preserve reentrance. + + >>> bindings = {} + >>> fs1 = FeatStruct("[a=?x]") + >>> fs2 = fs1.unify(FeatStruct("[a=[]]"), bindings) + >>> fs2['a'] is bindings[Variable('?x')] + True + >>> fs2.unify(FeatStruct("[b=?x]"), bindings) + [a=(1)[], b->(1)] + +Aliased variable tests + + >>> fs1 = FeatStruct("[a=?x, b=?x]") + >>> fs2 = FeatStruct("[b=?y, c=?y]") + >>> bindings = {} + >>> fs3 = fs1.unify(fs2, bindings) + >>> fs3 + [a=?x, b=?x, c=?x] + >>> bindings + {Variable('?y'): Variable('?x')} + >>> fs3.unify(FeatStruct("[a=1]")) + [a=1, b=1, c=1] + +If we keep track of the bindings, then we can use the same variable +over multiple calls to unify. + + >>> bindings = {} + >>> fs1 = FeatStruct('[a=?x]') + >>> fs2 = fs1.unify(FeatStruct('[a=[]]'), bindings) + >>> fs2.unify(FeatStruct('[b=?x]'), bindings) + [a=(1)[], b->(1)] + >>> bindings + {Variable('?x'): []} + +.. + >>> del fs1, fs2, fs3, fs4, fs5 # clean-up + +Unification Bindings +-------------------- + + >>> bindings = {} + >>> fs1 = FeatStruct('[a=?x]') + >>> fs2 = FeatStruct('[a=12]') + >>> fs3 = FeatStruct('[b=?x]') + >>> fs1.unify(fs2, bindings) + [a=12] + >>> bindings + {Variable('?x'): 12} + >>> fs3.substitute_bindings(bindings) + [b=12] + >>> fs3 # substitute_bindings didn't mutate fs3. + [b=?x] + >>> fs2.unify(fs3, bindings) + [a=12, b=12] + + >>> bindings = {} + >>> fs1 = FeatStruct('[a=?x, b=1]') + >>> fs2 = FeatStruct('[a=5, b=?x]') + >>> fs1.unify(fs2, bindings) + [a=5, b=1] + >>> sorted(bindings.items()) + [(Variable('?x'), 5), (Variable('?x2'), 1)] + +.. + >>> del fs1, fs2, fs3 # clean-up + +Expressions +----------- + + >>> e = Expression.fromstring('\\P y.P(z,y)') + >>> fs1 = FeatStruct(x=e, y=Variable('z')) + >>> fs2 = FeatStruct(y=VariableExpression(Variable('John'))) + >>> fs1.unify(fs2) + [x=<\P y.P(John,y)>, y=] + +Remove Variables +---------------- + + >>> FeatStruct('[a=?x, b=12, c=[d=?y]]').remove_variables() + [b=12, c=[]] + >>> FeatStruct('(1)[a=[b=?x,c->(1)]]').remove_variables() + (1)[a=[c->(1)]] + +Equality & Hashing +------------------ +The `equal_values` method checks whether two feature structures assign +the same value to every feature. If the optional argument +``check_reentrances`` is supplied, then it also returns false if there +is any difference in the reentrances. + + >>> a = FeatStruct('(1)[x->(1)]') + >>> b = FeatStruct('(1)[x->(1)]') + >>> c = FeatStruct('(1)[x=[x->(1)]]') + >>> d = FeatStruct('[x=(1)[x->(1)]]') + >>> e = FeatStruct('(1)[x=[x->(1), y=1], y=1]') + >>> def compare(x,y): + ... assert x.equal_values(y, True) == y.equal_values(x, True) + ... assert x.equal_values(y, False) == y.equal_values(x, False) + ... if x.equal_values(y, True): + ... assert x.equal_values(y, False) + ... print('equal values, same reentrance') + ... elif x.equal_values(y, False): + ... print('equal values, different reentrance') + ... else: + ... print('different values') + + >>> compare(a, a) + equal values, same reentrance + >>> compare(a, b) + equal values, same reentrance + >>> compare(a, c) + equal values, different reentrance + >>> compare(a, d) + equal values, different reentrance + >>> compare(c, d) + equal values, different reentrance + >>> compare(a, e) + different values + >>> compare(c, e) + different values + >>> compare(d, e) + different values + >>> compare(e, e) + equal values, same reentrance + +Feature structures may not be hashed until they are frozen: + + >>> hash(a) + Traceback (most recent call last): + . . . + TypeError: FeatStructs must be frozen before they can be hashed. + >>> a.freeze() + >>> v = hash(a) + +Feature structures define hash consistently. The following example +looks at the hash value for each (fs1,fs2) pair; if their hash values +are not equal, then they must not be equal. If their hash values are +equal, then display a message, and indicate whether their values are +indeed equal. Note that c and d currently have the same hash value, +even though they are not equal. That is not a bug, strictly speaking, +but it wouldn't be a bad thing if it changed. + + >>> for fstruct in (a, b, c, d, e): + ... fstruct.freeze() + >>> for fs1_name in 'abcde': + ... for fs2_name in 'abcde': + ... fs1 = locals()[fs1_name] + ... fs2 = locals()[fs2_name] + ... if hash(fs1) != hash(fs2): + ... assert fs1 != fs2 + ... else: + ... print('%s and %s have the same hash value,' % + ... (fs1_name, fs2_name)) + ... if fs1 == fs2: print('and are equal') + ... else: print('and are not equal') + a and a have the same hash value, and are equal + a and b have the same hash value, and are equal + b and a have the same hash value, and are equal + b and b have the same hash value, and are equal + c and c have the same hash value, and are equal + c and d have the same hash value, and are not equal + d and c have the same hash value, and are not equal + d and d have the same hash value, and are equal + e and e have the same hash value, and are equal + +.. + >>> del a, b, c, d, e, v # clean-up + +Tracing +------- + + >>> fs1 = FeatStruct('[a=[b=(1)[], c=?x], d->(1), e=[f=?x]]') + >>> fs2 = FeatStruct('[a=(1)[c="C"], e=[g->(1)]]') + >>> fs1.unify(fs2, trace=True) + + Unification trace: + / [a=[b=(1)[], c=?x], d->(1), e=[f=?x]] + |\ [a=(1)[c='C'], e=[g->(1)]] + | + | Unify feature: a + | / [b=[], c=?x] + | |\ [c='C'] + | | + | | Unify feature: a.c + | | / ?x + | | |\ 'C' + | | | + | | +-->Variable('?x') + | | + | +-->[b=[], c=?x] + | Bindings: {?x: 'C'} + | + | Unify feature: e + | / [f=?x] + | |\ [g=[c='C']] + | | + | +-->[f=?x, g=[b=[], c=?x]] + | Bindings: {?x: 'C'} + | + +-->[a=(1)[b=(2)[], c='C'], d->(2), e=[f='C', g->(1)]] + Bindings: {?x: 'C'} + [a=(1)[b=(2)[], c='C'], d->(2), e=[f='C', g->(1)]] + >>> + >>> fs1 = FeatStruct('[a=?x, b=?z, c=?z]') + >>> fs2 = FeatStruct('[a=?y, b=?y, c=?q]') + >>> #fs1.unify(fs2, trace=True) + >>> + +.. + >>> del fs1, fs2 # clean-up + +Unification on Dicts & Lists +---------------------------- +It's possible to do unification on dictionaries: + + >>> from nltk.featstruct import unify + >>> pprint(unify(dict(x=1, y=dict(z=2)), dict(x=1, q=5)), width=1) + {'q': 5, 'x': 1, 'y': {'z': 2}} + +It's possible to do unification on lists as well: + + >>> unify([1, 2, 3], [1, Variable('x'), 3]) + [1, 2, 3] + +Mixing dicts and lists is fine: + + >>> pprint(unify([dict(x=1, y=dict(z=2)),3], [dict(x=1, q=5),3]), + ... width=1) + [{'q': 5, 'x': 1, 'y': {'z': 2}}, 3] + +Mixing dicts and FeatStructs is discouraged: + + >>> unify(dict(x=1), FeatStruct(x=1)) + Traceback (most recent call last): + . . . + ValueError: Mixing FeatStruct objects with Python dicts and lists is not supported. + +But you can do it if you really want, by explicitly stating that both +dictionaries and FeatStructs should be treated as feature structures: + + >>> unify(dict(x=1), FeatStruct(x=1), fs_class=(dict, FeatStruct)) + {'x': 1} + +Finding Conflicts +----------------- + + >>> from nltk.featstruct import conflicts + >>> fs1 = FeatStruct('[a=[b=(1)[c=2], d->(1), e=[f->(1)]]]') + >>> fs2 = FeatStruct('[a=[b=[c=[x=5]], d=[c=2], e=[f=[c=3]]]]') + >>> for path in conflicts(fs1, fs2): + ... print('%-8s: %r vs %r' % ('.'.join(path), fs1[path], fs2[path])) + a.b.c : 2 vs [x=5] + a.e.f.c : 2 vs 3 + +.. + >>> del fs1, fs2 # clean-up + +Retracting Bindings +------------------- + + >>> from nltk.featstruct import retract_bindings + >>> bindings = {} + >>> fs1 = FeatStruct('[a=?x, b=[c=?y]]') + >>> fs2 = FeatStruct('[a=(1)[c=[d=1]], b->(1)]') + >>> fs3 = fs1.unify(fs2, bindings) + >>> print(fs3) + [ a = (1) [ c = [ d = 1 ] ] ] + [ ] + [ b -> (1) ] + >>> pprint(bindings) + {Variable('?x'): [c=[d=1]], Variable('?y'): [d=1]} + >>> retract_bindings(fs3, bindings) + [a=?x, b=?x] + >>> pprint(bindings) + {Variable('?x'): [c=?y], Variable('?y'): [d=1]} + +Squashed Bugs +~~~~~~~~~~~~~ +In svn rev 5167, unifying two feature structures that used the same +variable would cause those variables to become aliased in the output. + + >>> fs1 = FeatStruct('[a=?x]') + >>> fs2 = FeatStruct('[b=?x]') + >>> fs1.unify(fs2) + [a=?x, b=?x2] + +There was a bug in svn revision 5172 that caused `rename_variables` to +rename variables to names that are already used. + + >>> FeatStruct('[a=?x, b=?x2]').rename_variables( + ... vars=[Variable('?x')]) + [a=?x3, b=?x2] + >>> fs1 = FeatStruct('[a=?x]') + >>> fs2 = FeatStruct('[a=?x, b=?x2]') + >>> fs1.unify(fs2) + [a=?x, b=?x2] + +There was a bug in svn rev 5167 that caused us to get the following +example wrong. Basically the problem was that we only followed +'forward' pointers for other, not self, when unifying two feature +structures. (nb: this test assumes that features are unified in +alphabetical order -- if they are not, it might pass even if the bug +is present.) + + >>> fs1 = FeatStruct('[a=[x=1], b=?x, c=?x]') + >>> fs2 = FeatStruct('[a=(1)[], b->(1), c=[x=2]]') + >>> print(fs1.unify(fs2)) + None + +.. + >>> del fs1, fs2 # clean-up diff --git a/lib/python3.10/site-packages/nltk/test/framenet.doctest b/lib/python3.10/site-packages/nltk/test/framenet.doctest new file mode 100644 index 0000000000000000000000000000000000000000..337c348b923a0d3a95c2576f10da6347e7085e7a --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/framenet.doctest @@ -0,0 +1,288 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +======== +FrameNet +======== + +The FrameNet corpus is a lexical database of English that is both human- +and machine-readable, based on annotating examples of how words are used +in actual texts. FrameNet is based on a theory of meaning called Frame +Semantics, deriving from the work of Charles J. Fillmore and colleagues. +The basic idea is straightforward: that the meanings of most words can +best be understood on the basis of a semantic frame: a description of a +type of event, relation, or entity and the participants in it. For +example, the concept of cooking typically involves a person doing the +cooking (Cook), the food that is to be cooked (Food), something to hold +the food while cooking (Container) and a source of heat +(Heating_instrument). In the FrameNet project, this is represented as a +frame called Apply_heat, and the Cook, Food, Heating_instrument and +Container are called frame elements (FEs). Words that evoke this frame, +such as fry, bake, boil, and broil, are called lexical units (LUs) of +the Apply_heat frame. The job of FrameNet is to define the frames +and to annotate sentences to show how the FEs fit syntactically around +the word that evokes the frame. + +------ +Frames +------ + +A Frame is a script-like conceptual structure that describes a +particular type of situation, object, or event along with the +participants and props that are needed for that Frame. For +example, the "Apply_heat" frame describes a common situation +involving a Cook, some Food, and a Heating_Instrument, and is +evoked by words such as bake, blanch, boil, broil, brown, +simmer, steam, etc. + +We call the roles of a Frame "frame elements" (FEs) and the +frame-evoking words are called "lexical units" (LUs). + +FrameNet includes relations between Frames. Several types of +relations are defined, of which the most important are: + +- Inheritance: An IS-A relation. The child frame is a subtype + of the parent frame, and each FE in the parent is bound to + a corresponding FE in the child. An example is the + "Revenge" frame which inherits from the + "Rewards_and_punishments" frame. + +- Using: The child frame presupposes the parent frame as + background, e.g the "Speed" frame "uses" (or presupposes) + the "Motion" frame; however, not all parent FEs need to be + bound to child FEs. + +- Subframe: The child frame is a subevent of a complex event + represented by the parent, e.g. the "Criminal_process" frame + has subframes of "Arrest", "Arraignment", "Trial", and + "Sentencing". + +- Perspective_on: The child frame provides a particular + perspective on an un-perspectivized parent frame. A pair of + examples consists of the "Hiring" and "Get_a_job" frames, + which perspectivize the "Employment_start" frame from the + Employer's and the Employee's point of view, respectively. + +To get a list of all of the Frames in FrameNet, you can use the +`frames()` function. If you supply a regular expression pattern to the +`frames()` function, you will get a list of all Frames whose names match +that pattern: + + >>> from pprint import pprint + >>> from operator import itemgetter + >>> from nltk.corpus import framenet as fn + >>> from nltk.corpus.reader.framenet import PrettyList + >>> x = fn.frames(r'(?i)crim') + >>> x.sort(key=itemgetter('ID')) + >>> x + [, , ...] + >>> PrettyList(sorted(x, key=itemgetter('ID'))) + [, , ...] + +To get the details of a particular Frame, you can use the `frame()` +function passing in the frame number: + + >>> from pprint import pprint + >>> from nltk.corpus import framenet as fn + >>> f = fn.frame(202) + >>> f.ID + 202 + >>> f.name + 'Arrest' + >>> f.definition + "Authorities charge a Suspect, who is under suspicion of having committed a crime..." + >>> len(f.lexUnit) + 11 + >>> pprint(sorted([x for x in f.FE])) + ['Authorities', + 'Charges', + 'Co-participant', + 'Manner', + 'Means', + 'Offense', + 'Place', + 'Purpose', + 'Source_of_legal_authority', + 'Suspect', + 'Time', + 'Type'] + >>> pprint(f.frameRelations) + [ Child=Arrest>, Component=Arrest>, ...] + +The `frame()` function shown above returns a dict object containing +detailed information about the Frame. See the documentation on the +`frame()` function for the specifics. + +You can also search for Frames by their Lexical Units (LUs). The +`frames_by_lemma()` function returns a list of all frames that contain +LUs in which the 'name' attribute of the LU matches the given regular +expression. Note that LU names are composed of "lemma.POS", where the +"lemma" part can be made up of either a single lexeme (e.g. 'run') or +multiple lexemes (e.g. 'a little') (see below). + + >>> PrettyList(sorted(fn.frames_by_lemma(r'(?i)a little'), key=itemgetter('ID'))) + [, ] + +------------- +Lexical Units +------------- + +A lexical unit (LU) is a pairing of a word with a meaning. For +example, the "Apply_heat" Frame describes a common situation +involving a Cook, some Food, and a Heating Instrument, and is +_evoked_ by words such as bake, blanch, boil, broil, brown, +simmer, steam, etc. These frame-evoking words are the LUs in the +Apply_heat frame. Each sense of a polysemous word is a different +LU. + +We have used the word "word" in talking about LUs. The reality +is actually rather complex. When we say that the word "bake" is +polysemous, we mean that the lemma "bake.v" (which has the +word-forms "bake", "bakes", "baked", and "baking") is linked to +three different frames: + +- Apply_heat: "Michelle baked the potatoes for 45 minutes." + +- Cooking_creation: "Michelle baked her mother a cake for her birthday." + +- Absorb_heat: "The potatoes have to bake for more than 30 minutes." + +These constitute three different LUs, with different +definitions. + +Multiword expressions such as "given name" and hyphenated words +like "shut-eye" can also be LUs. Idiomatic phrases such as +"middle of nowhere" and "give the slip (to)" are also defined as +LUs in the appropriate frames ("Isolated_places" and "Evading", +respectively), and their internal structure is not analyzed. + +Framenet provides multiple annotated examples of each sense of a +word (i.e. each LU). Moreover, the set of examples +(approximately 20 per LU) illustrates all of the combinatorial +possibilities of the lexical unit. + +Each LU is linked to a Frame, and hence to the other words which +evoke that Frame. This makes the FrameNet database similar to a +thesaurus, grouping together semantically similar words. + +In the simplest case, frame-evoking words are verbs such as +"fried" in: + + "Matilde fried the catfish in a heavy iron skillet." + +Sometimes event nouns may evoke a Frame. For example, +"reduction" evokes "Cause_change_of_scalar_position" in: + + "...the reduction of debt levels to $665 million from $2.6 billion." + +Adjectives may also evoke a Frame. For example, "asleep" may +evoke the "Sleep" frame as in: + + "They were asleep for hours." + +Many common nouns, such as artifacts like "hat" or "tower", +typically serve as dependents rather than clearly evoking their +own frames. + +Details for a specific lexical unit can be obtained using this class's +`lus()` function, which takes an optional regular expression +pattern that will be matched against the name of the lexical unit: + + >>> from pprint import pprint + >>> PrettyList(sorted(fn.lus(r'(?i)a little'), key=itemgetter('ID'))) + [, , ...] + +You can obtain detailed information on a particular LU by calling the +`lu()` function and passing in an LU's 'ID' number: + + >>> from pprint import pprint + >>> from nltk.corpus import framenet as fn + >>> fn.lu(256).name + 'foresee.v' + >>> fn.lu(256).definition + 'COD: be aware of beforehand; predict.' + >>> fn.lu(256).frame.name + 'Expectation' + >>> fn.lu(256).lexemes[0].name + 'foresee' + +Note that LU names take the form of a dotted string (e.g. "run.v" or "a +little.adv") in which a lemma precedes the "." and a part of speech +(POS) follows the dot. The lemma may be composed of a single lexeme +(e.g. "run") or of multiple lexemes (e.g. "a little"). The list of +POSs used in the LUs is: + +v - verb +n - noun +a - adjective +adv - adverb +prep - preposition +num - numbers +intj - interjection +art - article +c - conjunction +scon - subordinating conjunction + +For more detailed information about the info that is contained in the +dict that is returned by the `lu()` function, see the documentation on +the `lu()` function. + +------------------- +Annotated Documents +------------------- + +The FrameNet corpus contains a small set of annotated documents. A list +of these documents can be obtained by calling the `docs()` function: + + >>> from pprint import pprint + >>> from nltk.corpus import framenet as fn + >>> d = fn.docs('BellRinging')[0] + >>> d.corpname + 'PropBank' + >>> d.sentence[49] + full-text sentence (...) in BellRinging: + + + [POS] 17 tags + + [POS_tagset] PENN + + [text] + [annotationSet] + + `` I live in hopes that the ringers themselves will be drawn into + ***** ******* ***** + Desir Cause_t Cause + [1] [3] [2] + + that fuller life . + ****** + Comple + [4] + (Desir=Desiring, Cause_t=Cause_to_make_noise, Cause=Cause_motion, Comple=Completeness) + + + >>> d.sentence[49].annotationSet[1] + annotation set (...): + + [status] MANUAL + + [LU] (6605) hope.n in Desiring + + [frame] (366) Desiring + + [GF] 2 relations + + [PT] 2 phrases + + [text] + [Target] + [FE] + [Noun] + + `` I live in hopes that the ringers themselves will be drawn into + - ^^^^ ^^ ***** ---------------------------------------------- + E supp su Event + + that fuller life . + ----------------- + + (E=Experiencer, su=supp) + + diff --git a/lib/python3.10/site-packages/nltk/test/generate.doctest b/lib/python3.10/site-packages/nltk/test/generate.doctest new file mode 100644 index 0000000000000000000000000000000000000000..eee322d6d7811e46c5d4c17e7d2daf0ef2e314c2 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/generate.doctest @@ -0,0 +1,78 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +=============================================== +Generating sentences from context-free grammars +=============================================== + +An example grammar: + + >>> from nltk.parse.generate import generate, demo_grammar + >>> from nltk import CFG + >>> grammar = CFG.fromstring(demo_grammar) + >>> print(grammar) + Grammar with 13 productions (start state = S) + S -> NP VP + NP -> Det N + PP -> P NP + VP -> 'slept' + VP -> 'saw' NP + VP -> 'walked' PP + Det -> 'the' + Det -> 'a' + N -> 'man' + N -> 'park' + N -> 'dog' + P -> 'in' + P -> 'with' + +The first 10 generated sentences: + + >>> for sentence in generate(grammar, n=10): + ... print(' '.join(sentence)) + the man slept + the man saw the man + the man saw the park + the man saw the dog + the man saw a man + the man saw a park + the man saw a dog + the man walked in the man + the man walked in the park + the man walked in the dog + +All sentences of max depth 4: + + >>> for sentence in generate(grammar, depth=4): + ... print(' '.join(sentence)) + the man slept + the park slept + the dog slept + a man slept + a park slept + a dog slept + +The number of sentences of different max depths: + + >>> len(list(generate(grammar, depth=3))) + 0 + >>> len(list(generate(grammar, depth=4))) + 6 + >>> len(list(generate(grammar, depth=5))) + 42 + >>> len(list(generate(grammar, depth=6))) + 114 + >>> len(list(generate(grammar))) + 114 + +Infinite grammars will throw a RecursionError when not bounded by some ``depth``: + + >>> grammar = CFG.fromstring(""" + ... S -> A B + ... A -> B + ... B -> "b" | A + ... """) + >>> list(generate(grammar)) + Traceback (most recent call last): + ... + RuntimeError: The grammar has rule(s) that yield infinite recursion! diff --git a/lib/python3.10/site-packages/nltk/test/gensim.doctest b/lib/python3.10/site-packages/nltk/test/gensim.doctest new file mode 100644 index 0000000000000000000000000000000000000000..65d0c6a53f4ac5d209a8557bc4cec37e98ca1e4d --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/gensim.doctest @@ -0,0 +1,141 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +======================================= +Demonstrate word embedding using Gensim +======================================= + + >>> from nltk.test.gensim_fixt import setup_module + >>> setup_module() + +We demonstrate three functions: +- Train the word embeddings using brown corpus; +- Load the pre-trained model and perform simple tasks; and +- Pruning the pre-trained binary model. + + >>> import gensim + +--------------- +Train the model +--------------- + +Here we train a word embedding using the Brown Corpus: + + >>> from nltk.corpus import brown + >>> train_set = brown.sents()[:10000] + >>> model = gensim.models.Word2Vec(train_set) + +It might take some time to train the model. So, after it is trained, it can be saved as follows: + + >>> model.save('brown.embedding') + >>> new_model = gensim.models.Word2Vec.load('brown.embedding') + +The model will be the list of words with their embedding. We can easily get the vector representation of a word. + + >>> len(new_model.wv['university']) + 100 + +There are some supporting functions already implemented in Gensim to manipulate with word embeddings. +For example, to compute the cosine similarity between 2 words: + + >>> new_model.wv.similarity('university','school') > 0.3 + True + +--------------------------- +Using the pre-trained model +--------------------------- + +NLTK includes a pre-trained model which is part of a model that is trained on 100 billion words from the Google News Dataset. +The full model is from https://code.google.com/p/word2vec/ (about 3 GB). + + >>> from nltk.data import find + >>> word2vec_sample = str(find('models/word2vec_sample/pruned.word2vec.txt')) + >>> model = gensim.models.KeyedVectors.load_word2vec_format(word2vec_sample, binary=False) + +We pruned the model to only include the most common words (~44k words). + + >>> len(model) + 43981 + +Each word is represented in the space of 300 dimensions: + + >>> len(model['university']) + 300 + +Finding the top n words that are similar to a target word is simple. The result is the list of n words with the score. + + >>> model.most_similar(positive=['university'], topn = 3) + [('universities', 0.70039...), ('faculty', 0.67809...), ('undergraduate', 0.65870...)] + +Finding a word that is not in a list is also supported, although, implementing this by yourself is simple. + + >>> model.doesnt_match('breakfast cereal dinner lunch'.split()) + 'cereal' + +Mikolov et al. (2013) figured out that word embedding captures much of syntactic and semantic regularities. For example, +the vector 'King - Man + Woman' is close to 'Queen' and 'Germany - Berlin + Paris' is close to 'France'. + + >>> model.most_similar(positive=['woman','king'], negative=['man'], topn = 1) + [('queen', 0.71181...)] + + >>> model.most_similar(positive=['Paris','Germany'], negative=['Berlin'], topn = 1) + [('France', 0.78840...)] + +We can visualize the word embeddings using t-SNE (https://lvdmaaten.github.io/tsne/). For this demonstration, we visualize the first 1000 words. + +| import numpy as np +| labels = [] +| count = 0 +| max_count = 1000 +| X = np.zeros(shape=(max_count,len(model['university']))) +| +| for term in model.index_to_key: +| X[count] = model[term] +| labels.append(term) +| count+= 1 +| if count >= max_count: break +| +| # It is recommended to use PCA first to reduce to ~50 dimensions +| from sklearn.decomposition import PCA +| pca = PCA(n_components=50) +| X_50 = pca.fit_transform(X) +| +| # Using TSNE to further reduce to 2 dimensions +| from sklearn.manifold import TSNE +| model_tsne = TSNE(n_components=2, random_state=0) +| Y = model_tsne.fit_transform(X_50) +| +| # Show the scatter plot +| import matplotlib.pyplot as plt +| plt.scatter(Y[:,0], Y[:,1], 20) +| +| # Add labels +| for label, x, y in zip(labels, Y[:, 0], Y[:, 1]): +| plt.annotate(label, xy = (x,y), xytext = (0, 0), textcoords = 'offset points', size = 10) +| +| plt.show() + +------------------------------ +Prune the trained binary model +------------------------------ + +Here is the supporting code to extract part of the binary model (GoogleNews-vectors-negative300.bin.gz) from https://code.google.com/p/word2vec/ +We use this code to get the `word2vec_sample` model. + +| import gensim +| # Load the binary model +| model = gensim.models.KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary = True) +| +| # Only output word that appear in the Brown corpus +| from nltk.corpus import brown +| words = set(brown.words()) +| print(len(words)) +| +| # Output presented word to a temporary file +| out_file = 'pruned.word2vec.txt' +| with open(out_file,'w') as f: +| word_presented = words.intersection(model.index_to_key) +| f.write('{} {}\n'.format(len(word_presented),len(model['word']))) +| +| for word in word_presented: +| f.write('{} {}\n'.format(word, ' '.join(str(value) for value in model[word]))) diff --git a/lib/python3.10/site-packages/nltk/test/gluesemantics_malt_fixt.py b/lib/python3.10/site-packages/nltk/test/gluesemantics_malt_fixt.py new file mode 100644 index 0000000000000000000000000000000000000000..ad278231a9c9798936f9c8236dc8c16ed4437a28 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/gluesemantics_malt_fixt.py @@ -0,0 +1,9 @@ +def setup_module(): + import pytest + + from nltk.parse.malt import MaltParser + + try: + depparser = MaltParser() + except (AssertionError, LookupError) as e: + pytest.skip("MaltParser is not available") diff --git a/lib/python3.10/site-packages/nltk/test/grammar.doctest b/lib/python3.10/site-packages/nltk/test/grammar.doctest new file mode 100644 index 0000000000000000000000000000000000000000..5d8f96d5265a0a8d185edbdcff328b932d542343 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/grammar.doctest @@ -0,0 +1,69 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +=============== +Grammar Parsing +=============== + +Grammars can be parsed from strings: + + >>> from nltk import CFG + >>> grammar = CFG.fromstring(""" + ... S -> NP VP + ... PP -> P NP + ... NP -> Det N | NP PP + ... VP -> V NP | VP PP + ... Det -> 'a' | 'the' + ... N -> 'dog' | 'cat' + ... V -> 'chased' | 'sat' + ... P -> 'on' | 'in' + ... """) + >>> grammar + + >>> grammar.start() + S + >>> grammar.productions() + [S -> NP VP, PP -> P NP, NP -> Det N, NP -> NP PP, VP -> V NP, VP -> VP PP, + Det -> 'a', Det -> 'the', N -> 'dog', N -> 'cat', V -> 'chased', V -> 'sat', + P -> 'on', P -> 'in'] + +Probabilistic CFGs: + + >>> from nltk import PCFG + >>> toy_pcfg1 = PCFG.fromstring(""" + ... S -> NP VP [1.0] + ... NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15] + ... Det -> 'the' [0.8] | 'my' [0.2] + ... N -> 'man' [0.5] | 'telescope' [0.5] + ... VP -> VP PP [0.1] | V NP [0.7] | V [0.2] + ... V -> 'ate' [0.35] | 'saw' [0.65] + ... PP -> P NP [1.0] + ... P -> 'with' [0.61] | 'under' [0.39] + ... """) + +Chomsky Normal Form grammar (Test for bug 474) + + >>> g = CFG.fromstring("VP^ -> VBP NP^") + >>> g.productions()[0].lhs() + VP^ + +Grammars can contain both empty strings and empty productions: + + >>> from nltk.grammar import CFG + >>> from nltk.parse.generate import generate + >>> grammar = CFG.fromstring(""" + ... S -> A B + ... A -> 'a' + ... # An empty string: + ... B -> 'b' | '' + ... """) + >>> list(generate(grammar)) + [['a', 'b'], ['a', '']] + >>> grammar = CFG.fromstring(""" + ... S -> A B + ... A -> 'a' + ... # An empty production: + ... B -> 'b' | + ... """) + >>> list(generate(grammar)) + [['a', 'b'], ['a']] diff --git a/lib/python3.10/site-packages/nltk/test/grammartestsuites.doctest b/lib/python3.10/site-packages/nltk/test/grammartestsuites.doctest new file mode 100644 index 0000000000000000000000000000000000000000..2d008b70f6fedd55c537188f2b69f688df873201 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/grammartestsuites.doctest @@ -0,0 +1,109 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +========================== + Test Suites for Grammars +========================== + +Sentences in the test suite are divided into two classes: + +- grammatical (*accept*) and +- ungrammatical (*reject*). + +If a sentence should parse according to the grammar, the value of +``trees`` will be a non-empty list. If a sentence should be rejected +according to the grammar, then the value of ``trees`` will be ``None``. + + >>> from nltk.parse import TestGrammar + >>> germantest1 = {} + >>> germantest1['doc'] = "Tests for person agreement" + >>> germantest1['accept'] = [ + ... 'ich komme', + ... 'ich sehe mich', + ... 'du kommst', + ... 'du siehst mich', + ... 'sie kommt', + ... 'sie sieht mich', + ... 'ihr kommt', + ... 'wir kommen', + ... 'sie kommen', + ... 'du magst mich', + ... 'er mag mich', + ... 'du folgst mir', + ... 'sie hilft mir', + ... ] + >>> germantest1['reject'] = [ + ... 'ich kommt', + ... 'ich kommst', + ... 'ich siehst mich', + ... 'du komme', + ... 'du sehe mich', + ... 'du kommt', + ... 'er komme', + ... 'er siehst mich', + ... 'wir komme', + ... 'wir kommst', + ... 'die Katzen kommst', + ... 'sie komme', + ... 'sie kommst', + ... 'du mag mich', + ... 'er magst mich', + ... 'du folgt mir', + ... 'sie hilfst mir', + ... ] + >>> germantest2 = {} + >>> germantest2['doc'] = "Tests for number agreement" + >>> germantest2['accept'] = [ + ... 'der Hund kommt', + ... 'die Hunde kommen', + ... 'ich komme', + ... 'wir kommen', + ... 'ich sehe die Katzen', + ... 'ich folge den Katzen', + ... 'ich sehe die Katzen', + ... 'ich folge den Katzen', + ... 'wir sehen die Katzen', + ... 'wir folgen den Katzen' + ... ] + >>> germantest2['reject'] = [ + ... 'ich kommen', + ... 'wir komme', + ... 'der Hunde kommt', + ... 'der Hunde kommen', + ... 'die Katzen kommt', + ... 'ich sehe der Hunde', + ... 'ich folge den Hund', + ... 'ich sehen der Hunde', + ... 'ich folgen den Hund', + ... 'wir sehe die Katzen', + ... 'wir folge den Katzen' + ... ] + >>> germantest3 = {} + >>> germantest3['doc'] = "Tests for case government and subcategorization" + >>> germantest3['accept'] = [ + ... 'der Hund sieht mich', + ... 'der Hund kommt', + ... 'ich sehe den Hund', + ... 'ich helfe dem Hund', + ... ] + >>> germantest3['reject'] = [ + ... 'ich sehe', + ... 'ich helfe', + ... 'ich komme den Hund', + ... 'ich sehe den Hund die Katzen', + ... 'du hilfst mich', + ... 'du siehst mir', + ... 'du siehst ich', + ... 'der Hunde kommt mich', + ... 'die Hunde sehe die Hunde', + ... 'der Hund sehe die Hunde', + ... 'ich hilft den Hund', + ... 'ich hilft der Hund', + ... 'ich sehe dem Hund', + ... ] + >>> germantestsuites = [germantest1, germantest2, germantest3] + >>> tester = TestGrammar('grammars/book_grammars/german.fcfg', germantestsuites) + >>> tester.run() + Tests for person agreement: All tests passed! + Tests for number agreement: All tests passed! + Tests for case government and subcategorization: All tests passed! diff --git a/lib/python3.10/site-packages/nltk/test/japanese.doctest b/lib/python3.10/site-packages/nltk/test/japanese.doctest new file mode 100644 index 0000000000000000000000000000000000000000..61bbc06a4a65311695f61e2b2891a03a58181d04 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/japanese.doctest @@ -0,0 +1,48 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +============================ +Japanese Language Processing +============================ + + >>> from nltk import * + +------------- +Corpus Access +------------- + +KNB Corpus +---------- + + >>> from nltk.corpus import knbc + +Access the words: this should produce a list of strings: + + >>> type(knbc.words()[0]) is not bytes + True + +Access the sentences: this should produce a list of lists of strings: + + >>> type(knbc.sents()[0][0]) is not bytes + True + +Access the tagged words: this should produce a list of word, tag pairs: + + >>> type(knbc.tagged_words()[0]) + <... 'tuple'> + +Access the tagged sentences: this should produce a list of lists of word, tag pairs: + + >>> type(knbc.tagged_sents()[0][0]) + <... 'tuple'> + + +JEITA Corpus +------------ + + >>> from nltk.corpus import jeita + +Access the tagged words: this should produce a list of word, tag pairs, where a tag is a string: + + >>> type(jeita.tagged_words()[0][1]) is not bytes + True diff --git a/lib/python3.10/site-packages/nltk/test/lm.doctest b/lib/python3.10/site-packages/nltk/test/lm.doctest new file mode 100644 index 0000000000000000000000000000000000000000..9668582b3f90f8bcc5ee48f72b0a41d2da9660e5 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/lm.doctest @@ -0,0 +1,135 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +.. -*- coding: utf-8 -*- + + +Regression Tests +================ + + +Issue 167 +--------- +https://github.com/nltk/nltk/issues/167 + + >>> from nltk.corpus import brown + >>> from nltk.lm.preprocessing import padded_everygram_pipeline + >>> ngram_order = 3 + >>> train_data, vocab_data = padded_everygram_pipeline( + ... ngram_order, + ... brown.sents(categories="news") + ... ) + + >>> from nltk.lm import WittenBellInterpolated + >>> lm = WittenBellInterpolated(ngram_order) + >>> lm.fit(train_data, vocab_data) + + + + +Sentence containing an unseen word should result in infinite entropy because +Witten-Bell is based ultimately on MLE, which cannot handle unseen ngrams. +Crucially, it shouldn't raise any exceptions for unseen words. + + >>> from nltk.util import ngrams + >>> sent = ngrams("This is a sentence with the word aaddvark".split(), 3) + >>> lm.entropy(sent) + inf + +If we remove all unseen ngrams from the sentence, we'll get a non-infinite value +for the entropy. + + >>> sent = ngrams("This is a sentence".split(), 3) + >>> round(lm.entropy(sent), 14) + 10.23701322869105 + + +Issue 367 +--------- +https://github.com/nltk/nltk/issues/367 + +Reproducing Dan Blanchard's example: +https://github.com/nltk/nltk/issues/367#issuecomment-14646110 + + >>> from nltk.lm import Lidstone, Vocabulary + >>> word_seq = list('aaaababaaccbacb') + >>> ngram_order = 2 + >>> from nltk.util import everygrams + >>> train_data = [everygrams(word_seq, max_len=ngram_order)] + >>> V = Vocabulary(['a', 'b', 'c', '']) + >>> lm = Lidstone(0.2, ngram_order, vocabulary=V) + >>> lm.fit(train_data) + +For doctest to work we have to sort the vocabulary keys. + + >>> V_keys = sorted(V) + >>> round(sum(lm.score(w, ("b",)) for w in V_keys), 6) + 1.0 + >>> round(sum(lm.score(w, ("a",)) for w in V_keys), 6) + 1.0 + + >>> [lm.score(w, ("b",)) for w in V_keys] + [0.05, 0.05, 0.8, 0.05, 0.05] + >>> [round(lm.score(w, ("a",)), 4) for w in V_keys] + [0.0222, 0.0222, 0.4667, 0.2444, 0.2444] + + +Here's reproducing @afourney's comment: +https://github.com/nltk/nltk/issues/367#issuecomment-15686289 + + >>> sent = ['foo', 'foo', 'foo', 'foo', 'bar', 'baz'] + >>> ngram_order = 3 + >>> from nltk.lm.preprocessing import padded_everygram_pipeline + >>> train_data, vocab_data = padded_everygram_pipeline(ngram_order, [sent]) + >>> from nltk.lm import Lidstone + >>> lm = Lidstone(0.2, ngram_order) + >>> lm.fit(train_data, vocab_data) + +The vocabulary includes the "UNK" symbol as well as two padding symbols. + + >>> len(lm.vocab) + 6 + >>> word = "foo" + >>> context = ("bar", "baz") + +The raw counts. + + >>> lm.context_counts(context)[word] + 0 + >>> lm.context_counts(context).N() + 1 + +Counts with Lidstone smoothing. + + >>> lm.context_counts(context)[word] + lm.gamma + 0.2 + >>> lm.context_counts(context).N() + len(lm.vocab) * lm.gamma + 2.2 + +Without any backoff, just using Lidstone smoothing, P("foo" | "bar", "baz") should be: +0.2 / 2.2 ~= 0.090909 + + >>> round(lm.score(word, context), 6) + 0.090909 + + +Issue 380 +--------- +https://github.com/nltk/nltk/issues/380 + +Reproducing setup akin to this comment: +https://github.com/nltk/nltk/issues/380#issue-12879030 + +For speed take only the first 100 sentences of reuters. Shouldn't affect the test. + + >>> from nltk.corpus import reuters + >>> sents = reuters.sents()[:100] + >>> ngram_order = 3 + >>> from nltk.lm.preprocessing import padded_everygram_pipeline + >>> train_data, vocab_data = padded_everygram_pipeline(ngram_order, sents) + + >>> from nltk.lm import Lidstone + >>> lm = Lidstone(0.2, ngram_order) + >>> lm.fit(train_data, vocab_data) + >>> lm.score("said", ("",)) < 1 + True diff --git a/lib/python3.10/site-packages/nltk/test/meteor.doctest b/lib/python3.10/site-packages/nltk/test/meteor.doctest new file mode 100644 index 0000000000000000000000000000000000000000..d7d924004601091811d6b58d52aa549849ada659 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/meteor.doctest @@ -0,0 +1,54 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +.. -*- coding: utf-8 -*- + +============= +METEOR tests +============= + +No Alignment test +------------------ + + >>> from nltk.translate import meteor + >>> from nltk import word_tokenize + +If the candidate has no alignment to any of the references, the METEOR score is 0. + + >>> round(meteor( + ... [word_tokenize('The candidate has no alignment to any of the references')], + ... word_tokenize('John loves Mary') + ... ), 4) + 0.0 + +Tests based on wikipedia examples +--------------------------------- + +Testing on `wikipedia examples `_ + + >>> same_res = round(meteor( + ... [word_tokenize('The cat sat on the mat')], + ... word_tokenize('The cat sat on the mat') + ... ), 4) + >>> abs(same_res - 0.9977) < 1e-2 + True + + >>> meteor( + ... [word_tokenize('The cat sat on the mat')], + ... word_tokenize('on the mat sat the cat') + ... ) + 0.5 + + >>> round(meteor( + ... [word_tokenize('The cat sat on the mat')], + ... word_tokenize('The cat was sat on the mat') + ... ), 4) + 0.9654 + +Test corresponding to issue #2751, where METEOR score > 1 + + >>> round(meteor( + ... [word_tokenize('create or update a vm set')], + ... word_tokenize('creates or updates a virtual machine scale set') + ... ), 4) + 0.7806 diff --git a/lib/python3.10/site-packages/nltk/test/nonmonotonic.doctest b/lib/python3.10/site-packages/nltk/test/nonmonotonic.doctest new file mode 100644 index 0000000000000000000000000000000000000000..a570e05119733733829fd7db7813f7edfdc92f2e --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/nonmonotonic.doctest @@ -0,0 +1,293 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +====================== +Nonmonotonic Reasoning +====================== + + >>> from nltk.test.setup_fixt import check_binary + >>> check_binary('mace4') + + >>> from nltk import * + >>> from nltk.inference.nonmonotonic import * + >>> from nltk.sem import logic + >>> logic._counter._value = 0 + >>> read_expr = logic.Expression.fromstring + +------------------------ +Closed Domain Assumption +------------------------ + +The only entities in the domain are those found in the assumptions or goal. +If the domain only contains "A" and "B", then the expression "exists x.P(x)" can +be replaced with "P(A) | P(B)" and an expression "all x.P(x)" can be replaced +with "P(A) & P(B)". + + >>> p1 = read_expr(r'all x.(man(x) -> mortal(x))') + >>> p2 = read_expr(r'man(Socrates)') + >>> c = read_expr(r'mortal(Socrates)') + >>> prover = Prover9Command(c, [p1,p2]) + >>> prover.prove() + True + >>> cdp = ClosedDomainProver(prover) + >>> for a in cdp.assumptions(): print(a) # doctest: +SKIP + (man(Socrates) -> mortal(Socrates)) + man(Socrates) + >>> cdp.prove() + True + + >>> p1 = read_expr(r'exists x.walk(x)') + >>> p2 = read_expr(r'man(Socrates)') + >>> c = read_expr(r'walk(Socrates)') + >>> prover = Prover9Command(c, [p1,p2]) + >>> prover.prove() + False + >>> cdp = ClosedDomainProver(prover) + >>> for a in cdp.assumptions(): print(a) # doctest: +SKIP + walk(Socrates) + man(Socrates) + >>> cdp.prove() + True + + >>> p1 = read_expr(r'exists x.walk(x)') + >>> p2 = read_expr(r'man(Socrates)') + >>> p3 = read_expr(r'-walk(Bill)') + >>> c = read_expr(r'walk(Socrates)') + >>> prover = Prover9Command(c, [p1,p2,p3]) + >>> prover.prove() + False + >>> cdp = ClosedDomainProver(prover) + >>> for a in cdp.assumptions(): print(a) # doctest: +SKIP + (walk(Socrates) | walk(Bill)) + man(Socrates) + -walk(Bill) + >>> cdp.prove() + True + + >>> p1 = read_expr(r'walk(Socrates)') + >>> p2 = read_expr(r'walk(Bill)') + >>> c = read_expr(r'all x.walk(x)') + >>> prover = Prover9Command(c, [p1,p2]) + >>> prover.prove() + False + >>> cdp = ClosedDomainProver(prover) + >>> for a in cdp.assumptions(): print(a) # doctest: +SKIP + walk(Socrates) + walk(Bill) + >>> print(cdp.goal()) # doctest: +SKIP + (walk(Socrates) & walk(Bill)) + >>> cdp.prove() + True + + >>> p1 = read_expr(r'girl(mary)') + >>> p2 = read_expr(r'dog(rover)') + >>> p3 = read_expr(r'all x.(girl(x) -> -dog(x))') + >>> p4 = read_expr(r'all x.(dog(x) -> -girl(x))') + >>> p5 = read_expr(r'chase(mary, rover)') + >>> c = read_expr(r'exists y.(dog(y) & all x.(girl(x) -> chase(x,y)))') + >>> prover = Prover9Command(c, [p1,p2,p3,p4,p5]) + >>> print(prover.prove()) + False + >>> cdp = ClosedDomainProver(prover) + >>> for a in cdp.assumptions(): print(a) # doctest: +SKIP + girl(mary) + dog(rover) + ((girl(rover) -> -dog(rover)) & (girl(mary) -> -dog(mary))) + ((dog(rover) -> -girl(rover)) & (dog(mary) -> -girl(mary))) + chase(mary,rover) + >>> print(cdp.goal()) # doctest: +SKIP + ((dog(rover) & (girl(rover) -> chase(rover,rover)) & (girl(mary) -> chase(mary,rover))) | (dog(mary) & (girl(rover) -> chase(rover,mary)) & (girl(mary) -> chase(mary,mary)))) + >>> print(cdp.prove()) + True + +----------------------- +Unique Names Assumption +----------------------- + +No two entities in the domain represent the same entity unless it can be +explicitly proven that they do. Therefore, if the domain contains "A" and "B", +then add the assumption "-(A = B)" if it is not the case that +" \|- (A = B)". + + >>> p1 = read_expr(r'man(Socrates)') + >>> p2 = read_expr(r'man(Bill)') + >>> c = read_expr(r'exists x.exists y.-(x = y)') + >>> prover = Prover9Command(c, [p1,p2]) + >>> prover.prove() + False + >>> unp = UniqueNamesProver(prover) + >>> for a in unp.assumptions(): print(a) # doctest: +SKIP + man(Socrates) + man(Bill) + -(Socrates = Bill) + >>> unp.prove() + True + + >>> p1 = read_expr(r'all x.(walk(x) -> (x = Socrates))') + >>> p2 = read_expr(r'Bill = William') + >>> p3 = read_expr(r'Bill = Billy') + >>> c = read_expr(r'-walk(William)') + >>> prover = Prover9Command(c, [p1,p2,p3]) + >>> prover.prove() + False + >>> unp = UniqueNamesProver(prover) + >>> for a in unp.assumptions(): print(a) # doctest: +SKIP + all x.(walk(x) -> (x = Socrates)) + (Bill = William) + (Bill = Billy) + -(William = Socrates) + -(Billy = Socrates) + -(Socrates = Bill) + >>> unp.prove() + True + +----------------------- +Closed World Assumption +----------------------- + +The only entities that have certain properties are those that is it stated +have the properties. We accomplish this assumption by "completing" predicates. + +If the assumptions contain "P(A)", then "all x.(P(x) -> (x=A))" is the completion +of "P". If the assumptions contain "all x.(ostrich(x) -> bird(x))", then +"all x.(bird(x) -> ostrich(x))" is the completion of "bird". If the +assumptions don't contain anything that are "P", then "all x.-P(x)" is the +completion of "P". + + >>> p1 = read_expr(r'walk(Socrates)') + >>> p2 = read_expr(r'-(Socrates = Bill)') + >>> c = read_expr(r'-walk(Bill)') + >>> prover = Prover9Command(c, [p1,p2]) + >>> prover.prove() + False + >>> cwp = ClosedWorldProver(prover) + >>> for a in cwp.assumptions(): print(a) # doctest: +SKIP + walk(Socrates) + -(Socrates = Bill) + all z1.(walk(z1) -> (z1 = Socrates)) + >>> cwp.prove() + True + + >>> p1 = read_expr(r'see(Socrates, John)') + >>> p2 = read_expr(r'see(John, Mary)') + >>> p3 = read_expr(r'-(Socrates = John)') + >>> p4 = read_expr(r'-(John = Mary)') + >>> c = read_expr(r'-see(Socrates, Mary)') + >>> prover = Prover9Command(c, [p1,p2,p3,p4]) + >>> prover.prove() + False + >>> cwp = ClosedWorldProver(prover) + >>> for a in cwp.assumptions(): print(a) # doctest: +SKIP + see(Socrates,John) + see(John,Mary) + -(Socrates = John) + -(John = Mary) + all z3 z4.(see(z3,z4) -> (((z3 = Socrates) & (z4 = John)) | ((z3 = John) & (z4 = Mary)))) + >>> cwp.prove() + True + + >>> p1 = read_expr(r'all x.(ostrich(x) -> bird(x))') + >>> p2 = read_expr(r'bird(Tweety)') + >>> p3 = read_expr(r'-ostrich(Sam)') + >>> p4 = read_expr(r'Sam != Tweety') + >>> c = read_expr(r'-bird(Sam)') + >>> prover = Prover9Command(c, [p1,p2,p3,p4]) + >>> prover.prove() + False + >>> cwp = ClosedWorldProver(prover) + >>> for a in cwp.assumptions(): print(a) # doctest: +SKIP + all x.(ostrich(x) -> bird(x)) + bird(Tweety) + -ostrich(Sam) + -(Sam = Tweety) + all z7.-ostrich(z7) + all z8.(bird(z8) -> ((z8 = Tweety) | ostrich(z8))) + >>> print(cwp.prove()) + True + +----------------------- +Multi-Decorator Example +----------------------- + +Decorators can be nested to utilize multiple assumptions. + + >>> p1 = read_expr(r'see(Socrates, John)') + >>> p2 = read_expr(r'see(John, Mary)') + >>> c = read_expr(r'-see(Socrates, Mary)') + >>> prover = Prover9Command(c, [p1,p2]) + >>> print(prover.prove()) + False + >>> cmd = ClosedDomainProver(UniqueNamesProver(ClosedWorldProver(prover))) + >>> print(cmd.prove()) + True + +----------------- +Default Reasoning +----------------- + >>> logic._counter._value = 0 + >>> premises = [] + +define the taxonomy + + >>> premises.append(read_expr(r'all x.(elephant(x) -> animal(x))')) + >>> premises.append(read_expr(r'all x.(bird(x) -> animal(x))')) + >>> premises.append(read_expr(r'all x.(dove(x) -> bird(x))')) + >>> premises.append(read_expr(r'all x.(ostrich(x) -> bird(x))')) + >>> premises.append(read_expr(r'all x.(flying_ostrich(x) -> ostrich(x))')) + +default the properties using abnormalities + + >>> premises.append(read_expr(r'all x.((animal(x) & -Ab1(x)) -> -fly(x))')) #normal animals don't fly + >>> premises.append(read_expr(r'all x.((bird(x) & -Ab2(x)) -> fly(x))')) #normal birds fly + >>> premises.append(read_expr(r'all x.((ostrich(x) & -Ab3(x)) -> -fly(x))')) #normal ostriches don't fly + +specify abnormal entities + + >>> premises.append(read_expr(r'all x.(bird(x) -> Ab1(x))')) #flight + >>> premises.append(read_expr(r'all x.(ostrich(x) -> Ab2(x))')) #non-flying bird + >>> premises.append(read_expr(r'all x.(flying_ostrich(x) -> Ab3(x))')) #flying ostrich + +define entities + + >>> premises.append(read_expr(r'elephant(el)')) + >>> premises.append(read_expr(r'dove(do)')) + >>> premises.append(read_expr(r'ostrich(os)')) + +print the augmented assumptions list + + >>> prover = Prover9Command(None, premises) + >>> command = UniqueNamesProver(ClosedWorldProver(prover)) + >>> for a in command.assumptions(): print(a) # doctest: +SKIP + all x.(elephant(x) -> animal(x)) + all x.(bird(x) -> animal(x)) + all x.(dove(x) -> bird(x)) + all x.(ostrich(x) -> bird(x)) + all x.(flying_ostrich(x) -> ostrich(x)) + all x.((animal(x) & -Ab1(x)) -> -fly(x)) + all x.((bird(x) & -Ab2(x)) -> fly(x)) + all x.((ostrich(x) & -Ab3(x)) -> -fly(x)) + all x.(bird(x) -> Ab1(x)) + all x.(ostrich(x) -> Ab2(x)) + all x.(flying_ostrich(x) -> Ab3(x)) + elephant(el) + dove(do) + ostrich(os) + all z1.(animal(z1) -> (elephant(z1) | bird(z1))) + all z2.(Ab1(z2) -> bird(z2)) + all z3.(bird(z3) -> (dove(z3) | ostrich(z3))) + all z4.(dove(z4) -> (z4 = do)) + all z5.(Ab2(z5) -> ostrich(z5)) + all z6.(Ab3(z6) -> flying_ostrich(z6)) + all z7.(ostrich(z7) -> ((z7 = os) | flying_ostrich(z7))) + all z8.-flying_ostrich(z8) + all z9.(elephant(z9) -> (z9 = el)) + -(el = os) + -(el = do) + -(os = do) + + >>> UniqueNamesProver(ClosedWorldProver(Prover9Command(read_expr('-fly(el)'), premises))).prove() + True + >>> UniqueNamesProver(ClosedWorldProver(Prover9Command(read_expr('fly(do)'), premises))).prove() + True + >>> UniqueNamesProver(ClosedWorldProver(Prover9Command(read_expr('-fly(os)'), premises))).prove() + True diff --git a/lib/python3.10/site-packages/nltk/test/paice.doctest b/lib/python3.10/site-packages/nltk/test/paice.doctest new file mode 100644 index 0000000000000000000000000000000000000000..3759a44bd17ae6234b970b87ee39d6424e6d6f2c --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/paice.doctest @@ -0,0 +1,35 @@ + +===================================================== +PAICE's evaluation statistics for stemming algorithms +===================================================== + +Given a list of words with their real lemmas and stems according to stemming algorithm under evaluation, +counts Understemming Index (UI), Overstemming Index (OI), Stemming Weight (SW) and Error-rate relative to truncation (ERRT). + + >>> from nltk.metrics import Paice + + +------------------------------------- +Understemming and Overstemming values +------------------------------------- + + >>> lemmas = {'kneel': ['kneel', 'knelt'], + ... 'range': ['range', 'ranged'], + ... 'ring': ['ring', 'rang', 'rung']} + >>> stems = {'kneel': ['kneel'], + ... 'knelt': ['knelt'], + ... 'rang': ['rang', 'range', 'ranged'], + ... 'ring': ['ring'], + ... 'rung': ['rung']} + >>> p = Paice(lemmas, stems) + >>> p.gumt, p.gdmt, p.gwmt, p.gdnt + (4.0, 5.0, 2.0, 16.0) + + >>> p.ui, p.oi, p.sw + (0.8..., 0.125..., 0.15625...) + + >>> p.errt + 1.0 + + >>> [('{0:.3f}'.format(a), '{0:.3f}'.format(b)) for a, b in p.coords] + [('0.000', '1.000'), ('0.000', '0.375'), ('0.600', '0.125'), ('0.800', '0.125')] diff --git a/lib/python3.10/site-packages/nltk/test/parse.doctest b/lib/python3.10/site-packages/nltk/test/parse.doctest new file mode 100644 index 0000000000000000000000000000000000000000..13e107e3faa103ea345da5733aa84fe09163e10d --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/parse.doctest @@ -0,0 +1,933 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +========= + Parsing +========= + +Unit tests for the Context Free Grammar class +--------------------------------------------- + + >>> import pickle + >>> import subprocess + >>> import sys + >>> from nltk import Nonterminal, nonterminals, Production, CFG + + >>> nt1 = Nonterminal('NP') + >>> nt2 = Nonterminal('VP') + + >>> nt1.symbol() + 'NP' + + >>> nt1 == Nonterminal('NP') + True + + >>> nt1 == nt2 + False + + >>> S, NP, VP, PP = nonterminals('S, NP, VP, PP') + >>> N, V, P, DT = nonterminals('N, V, P, DT') + + >>> prod1 = Production(S, [NP, VP]) + >>> prod2 = Production(NP, [DT, NP]) + + >>> prod1.lhs() + S + + >>> prod1.rhs() + (NP, VP) + + >>> prod1 == Production(S, [NP, VP]) + True + + >>> prod1 == prod2 + False + + >>> grammar = CFG.fromstring(""" + ... S -> NP VP + ... PP -> P NP + ... NP -> 'the' N | N PP | 'the' N PP + ... VP -> V NP | V PP | V NP PP + ... N -> 'cat' + ... N -> 'dog' + ... N -> 'rug' + ... V -> 'chased' + ... V -> 'sat' + ... P -> 'in' + ... P -> 'on' + ... """) + + >>> cmd = """import pickle + ... from nltk import Production + ... p = Production('S', ['NP', 'VP']) + ... print(pickle.dumps(p)) + ... """ + + >>> # Start a subprocess to simulate pickling in another process + >>> proc = subprocess.run([sys.executable, '-c', cmd], stdout=subprocess.PIPE) + >>> p1 = pickle.loads(eval(proc.stdout)) + >>> p2 = Production('S', ['NP', 'VP']) + >>> print(hash(p1) == hash(p2)) + True + +Unit tests for the rd (Recursive Descent Parser) class +------------------------------------------------------ + +Create and run a recursive descent parser over both a syntactically ambiguous +and unambiguous sentence. + + >>> from nltk.parse import RecursiveDescentParser + >>> rd = RecursiveDescentParser(grammar) + + >>> sentence1 = 'the cat chased the dog'.split() + >>> sentence2 = 'the cat chased the dog on the rug'.split() + + >>> for t in rd.parse(sentence1): + ... print(t) + (S (NP the (N cat)) (VP (V chased) (NP the (N dog)))) + + >>> for t in rd.parse(sentence2): + ... print(t) + (S + (NP the (N cat)) + (VP (V chased) (NP the (N dog) (PP (P on) (NP the (N rug)))))) + (S + (NP the (N cat)) + (VP (V chased) (NP the (N dog)) (PP (P on) (NP the (N rug))))) + + +(dolist (expr doctest-font-lock-keywords) + (add-to-list 'font-lock-keywords expr)) + + font-lock-keywords +(add-to-list 'font-lock-keywords + (car doctest-font-lock-keywords)) + + +Unit tests for the sr (Shift Reduce Parser) class +------------------------------------------------- + +Create and run a shift reduce parser over both a syntactically ambiguous +and unambiguous sentence. Note that unlike the recursive descent parser, one +and only one parse is ever returned. + + >>> from nltk.parse import ShiftReduceParser + >>> sr = ShiftReduceParser(grammar) + + >>> sentence1 = 'the cat chased the dog'.split() + >>> sentence2 = 'the cat chased the dog on the rug'.split() + + >>> for t in sr.parse(sentence1): + ... print(t) + (S (NP the (N cat)) (VP (V chased) (NP the (N dog)))) + + +The shift reduce parser uses heuristics to decide what to do when there are +multiple possible shift or reduce operations available - for the supplied +grammar clearly the wrong operation is selected. + + >>> for t in sr.parse(sentence2): + ... print(t) + + +Unit tests for the Chart Parser class +------------------------------------- + +We use the demo() function for testing. +We must turn off showing of times. + + >>> import nltk + +First we test tracing with a short sentence + + >>> nltk.parse.chart.demo(2, print_times=False, trace=1, + ... sent='I saw a dog', numparses=1) + * Sentence: + I saw a dog + ['I', 'saw', 'a', 'dog'] + + * Strategy: Bottom-up + + |. I . saw . a . dog .| + |[---------] . . .| [0:1] 'I' + |. [---------] . .| [1:2] 'saw' + |. . [---------] .| [2:3] 'a' + |. . . [---------]| [3:4] 'dog' + |> . . . .| [0:0] NP -> * 'I' + |[---------] . . .| [0:1] NP -> 'I' * + |> . . . .| [0:0] S -> * NP VP + |> . . . .| [0:0] NP -> * NP PP + |[---------> . . .| [0:1] S -> NP * VP + |[---------> . . .| [0:1] NP -> NP * PP + |. > . . .| [1:1] Verb -> * 'saw' + |. [---------] . .| [1:2] Verb -> 'saw' * + |. > . . .| [1:1] VP -> * Verb NP + |. > . . .| [1:1] VP -> * Verb + |. [---------> . .| [1:2] VP -> Verb * NP + |. [---------] . .| [1:2] VP -> Verb * + |. > . . .| [1:1] VP -> * VP PP + |[-------------------] . .| [0:2] S -> NP VP * + |. [---------> . .| [1:2] VP -> VP * PP + |. . > . .| [2:2] Det -> * 'a' + |. . [---------] .| [2:3] Det -> 'a' * + |. . > . .| [2:2] NP -> * Det Noun + |. . [---------> .| [2:3] NP -> Det * Noun + |. . . > .| [3:3] Noun -> * 'dog' + |. . . [---------]| [3:4] Noun -> 'dog' * + |. . [-------------------]| [2:4] NP -> Det Noun * + |. . > . .| [2:2] S -> * NP VP + |. . > . .| [2:2] NP -> * NP PP + |. [-----------------------------]| [1:4] VP -> Verb NP * + |. . [------------------->| [2:4] S -> NP * VP + |. . [------------------->| [2:4] NP -> NP * PP + |[=======================================]| [0:4] S -> NP VP * + |. [----------------------------->| [1:4] VP -> VP * PP + Nr edges in chart: 33 + (S (NP I) (VP (Verb saw) (NP (Det a) (Noun dog)))) + + +Then we test the different parsing Strategies. +Note that the number of edges differ between the strategies. + +Top-down + + >>> nltk.parse.chart.demo(1, print_times=False, trace=0, + ... sent='I saw John with a dog', numparses=2) + * Sentence: + I saw John with a dog + ['I', 'saw', 'John', 'with', 'a', 'dog'] + + * Strategy: Top-down + + Nr edges in chart: 48 + (S + (NP I) + (VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog)))))) + (S + (NP I) + (VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog))))) + + +Bottom-up + + >>> nltk.parse.chart.demo(2, print_times=False, trace=0, + ... sent='I saw John with a dog', numparses=2) + * Sentence: + I saw John with a dog + ['I', 'saw', 'John', 'with', 'a', 'dog'] + + * Strategy: Bottom-up + + Nr edges in chart: 53 + (S + (NP I) + (VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog))))) + (S + (NP I) + (VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog)))))) + + +Bottom-up Left-Corner + + >>> nltk.parse.chart.demo(3, print_times=False, trace=0, + ... sent='I saw John with a dog', numparses=2) + * Sentence: + I saw John with a dog + ['I', 'saw', 'John', 'with', 'a', 'dog'] + + * Strategy: Bottom-up left-corner + + Nr edges in chart: 36 + (S + (NP I) + (VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog))))) + (S + (NP I) + (VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog)))))) + + +Left-Corner with Bottom-Up Filter + + >>> nltk.parse.chart.demo(4, print_times=False, trace=0, + ... sent='I saw John with a dog', numparses=2) + * Sentence: + I saw John with a dog + ['I', 'saw', 'John', 'with', 'a', 'dog'] + + * Strategy: Filtered left-corner + + Nr edges in chart: 28 + (S + (NP I) + (VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog))))) + (S + (NP I) + (VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog)))))) + + +The stepping chart parser + + >>> nltk.parse.chart.demo(5, print_times=False, trace=1, + ... sent='I saw John with a dog', numparses=2) + * Sentence: + I saw John with a dog + ['I', 'saw', 'John', 'with', 'a', 'dog'] + + * Strategy: Stepping (top-down vs bottom-up) + + *** SWITCH TO TOP DOWN + |[------] . . . . .| [0:1] 'I' + |. [------] . . . .| [1:2] 'saw' + |. . [------] . . .| [2:3] 'John' + |. . . [------] . .| [3:4] 'with' + |. . . . [------] .| [4:5] 'a' + |. . . . . [------]| [5:6] 'dog' + |> . . . . . .| [0:0] S -> * NP VP + |> . . . . . .| [0:0] NP -> * NP PP + |> . . . . . .| [0:0] NP -> * Det Noun + |> . . . . . .| [0:0] NP -> * 'I' + |[------] . . . . .| [0:1] NP -> 'I' * + |[------> . . . . .| [0:1] S -> NP * VP + |[------> . . . . .| [0:1] NP -> NP * PP + |. > . . . . .| [1:1] VP -> * VP PP + |. > . . . . .| [1:1] VP -> * Verb NP + |. > . . . . .| [1:1] VP -> * Verb + |. > . . . . .| [1:1] Verb -> * 'saw' + |. [------] . . . .| [1:2] Verb -> 'saw' * + |. [------> . . . .| [1:2] VP -> Verb * NP + |. [------] . . . .| [1:2] VP -> Verb * + |[-------------] . . . .| [0:2] S -> NP VP * + |. [------> . . . .| [1:2] VP -> VP * PP + *** SWITCH TO BOTTOM UP + |. . > . . . .| [2:2] NP -> * 'John' + |. . . > . . .| [3:3] PP -> * 'with' NP + |. . . > . . .| [3:3] Prep -> * 'with' + |. . . . > . .| [4:4] Det -> * 'a' + |. . . . . > .| [5:5] Noun -> * 'dog' + |. . [------] . . .| [2:3] NP -> 'John' * + |. . . [------> . .| [3:4] PP -> 'with' * NP + |. . . [------] . .| [3:4] Prep -> 'with' * + |. . . . [------] .| [4:5] Det -> 'a' * + |. . . . . [------]| [5:6] Noun -> 'dog' * + |. [-------------] . . .| [1:3] VP -> Verb NP * + |[--------------------] . . .| [0:3] S -> NP VP * + |. [-------------> . . .| [1:3] VP -> VP * PP + |. . > . . . .| [2:2] S -> * NP VP + |. . > . . . .| [2:2] NP -> * NP PP + |. . . . > . .| [4:4] NP -> * Det Noun + |. . [------> . . .| [2:3] S -> NP * VP + |. . [------> . . .| [2:3] NP -> NP * PP + |. . . . [------> .| [4:5] NP -> Det * Noun + |. . . . [-------------]| [4:6] NP -> Det Noun * + |. . . [--------------------]| [3:6] PP -> 'with' NP * + |. [----------------------------------]| [1:6] VP -> VP PP * + *** SWITCH TO TOP DOWN + |. . > . . . .| [2:2] NP -> * Det Noun + |. . . . > . .| [4:4] NP -> * NP PP + |. . . > . . .| [3:3] VP -> * VP PP + |. . . > . . .| [3:3] VP -> * Verb NP + |. . . > . . .| [3:3] VP -> * Verb + |[=========================================]| [0:6] S -> NP VP * + |. [---------------------------------->| [1:6] VP -> VP * PP + |. . [---------------------------]| [2:6] NP -> NP PP * + |. . . . [------------->| [4:6] NP -> NP * PP + |. [----------------------------------]| [1:6] VP -> Verb NP * + |. . [--------------------------->| [2:6] S -> NP * VP + |. . [--------------------------->| [2:6] NP -> NP * PP + |[=========================================]| [0:6] S -> NP VP * + |. [---------------------------------->| [1:6] VP -> VP * PP + |. . . . . . >| [6:6] VP -> * VP PP + |. . . . . . >| [6:6] VP -> * Verb NP + |. . . . . . >| [6:6] VP -> * Verb + *** SWITCH TO BOTTOM UP + |. . . . > . .| [4:4] S -> * NP VP + |. . . . [------------->| [4:6] S -> NP * VP + *** SWITCH TO TOP DOWN + *** SWITCH TO BOTTOM UP + *** SWITCH TO TOP DOWN + *** SWITCH TO BOTTOM UP + *** SWITCH TO TOP DOWN + *** SWITCH TO BOTTOM UP + Nr edges in chart: 61 + (S + (NP I) + (VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog))))) + (S + (NP I) + (VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog)))))) + + + +Unit tests for the Incremental Chart Parser class +------------------------------------------------- + +The incremental chart parsers are defined in earleychart.py. +We use the demo() function for testing. We must turn off showing of times. + + >>> import nltk + +Earley Chart Parser + + >>> nltk.parse.earleychart.demo(print_times=False, trace=1, + ... sent='I saw John with a dog', numparses=2) + * Sentence: + I saw John with a dog + ['I', 'saw', 'John', 'with', 'a', 'dog'] + + |. I . saw . John . with . a . dog .| + |[------] . . . . .| [0:1] 'I' + |. [------] . . . .| [1:2] 'saw' + |. . [------] . . .| [2:3] 'John' + |. . . [------] . .| [3:4] 'with' + |. . . . [------] .| [4:5] 'a' + |. . . . . [------]| [5:6] 'dog' + |> . . . . . .| [0:0] S -> * NP VP + |> . . . . . .| [0:0] NP -> * NP PP + |> . . . . . .| [0:0] NP -> * Det Noun + |> . . . . . .| [0:0] NP -> * 'I' + |[------] . . . . .| [0:1] NP -> 'I' * + |[------> . . . . .| [0:1] S -> NP * VP + |[------> . . . . .| [0:1] NP -> NP * PP + |. > . . . . .| [1:1] VP -> * VP PP + |. > . . . . .| [1:1] VP -> * Verb NP + |. > . . . . .| [1:1] VP -> * Verb + |. > . . . . .| [1:1] Verb -> * 'saw' + |. [------] . . . .| [1:2] Verb -> 'saw' * + |. [------> . . . .| [1:2] VP -> Verb * NP + |. [------] . . . .| [1:2] VP -> Verb * + |[-------------] . . . .| [0:2] S -> NP VP * + |. [------> . . . .| [1:2] VP -> VP * PP + |. . > . . . .| [2:2] NP -> * NP PP + |. . > . . . .| [2:2] NP -> * Det Noun + |. . > . . . .| [2:2] NP -> * 'John' + |. . [------] . . .| [2:3] NP -> 'John' * + |. [-------------] . . .| [1:3] VP -> Verb NP * + |. . [------> . . .| [2:3] NP -> NP * PP + |. . . > . . .| [3:3] PP -> * 'with' NP + |[--------------------] . . .| [0:3] S -> NP VP * + |. [-------------> . . .| [1:3] VP -> VP * PP + |. . . [------> . .| [3:4] PP -> 'with' * NP + |. . . . > . .| [4:4] NP -> * NP PP + |. . . . > . .| [4:4] NP -> * Det Noun + |. . . . > . .| [4:4] Det -> * 'a' + |. . . . [------] .| [4:5] Det -> 'a' * + |. . . . [------> .| [4:5] NP -> Det * Noun + |. . . . . > .| [5:5] Noun -> * 'dog' + |. . . . . [------]| [5:6] Noun -> 'dog' * + |. . . . [-------------]| [4:6] NP -> Det Noun * + |. . . [--------------------]| [3:6] PP -> 'with' NP * + |. . . . [------------->| [4:6] NP -> NP * PP + |. . [---------------------------]| [2:6] NP -> NP PP * + |. [----------------------------------]| [1:6] VP -> VP PP * + |[=========================================]| [0:6] S -> NP VP * + |. [---------------------------------->| [1:6] VP -> VP * PP + |. [----------------------------------]| [1:6] VP -> Verb NP * + |. . [--------------------------->| [2:6] NP -> NP * PP + |[=========================================]| [0:6] S -> NP VP * + |. [---------------------------------->| [1:6] VP -> VP * PP + (S + (NP I) + (VP (VP (Verb saw) (NP John)) (PP with (NP (Det a) (Noun dog))))) + (S + (NP I) + (VP (Verb saw) (NP (NP John) (PP with (NP (Det a) (Noun dog)))))) + + +Unit tests for LARGE context-free grammars +------------------------------------------ + +Reading the ATIS grammar. + + >>> grammar = nltk.data.load('grammars/large_grammars/atis.cfg') + >>> grammar + + +Reading the test sentences. + + >>> sentences = nltk.data.load('grammars/large_grammars/atis_sentences.txt') + >>> sentences = nltk.parse.util.extract_test_sentences(sentences) + >>> len(sentences) + 98 + >>> testsentence = sentences[22] + >>> testsentence[0] + ['show', 'me', 'northwest', 'flights', 'to', 'detroit', '.'] + >>> testsentence[1] + 17 + >>> sentence = testsentence[0] + +Now we test all different parsing strategies. +Note that the number of edges differ between the strategies. + +Bottom-up parsing. + + >>> parser = nltk.parse.BottomUpChartParser(grammar) + >>> chart = parser.chart_parse(sentence) + >>> print((chart.num_edges())) + 7661 + >>> print((len(list(chart.parses(grammar.start()))))) + 17 + +Bottom-up Left-corner parsing. + + >>> parser = nltk.parse.BottomUpLeftCornerChartParser(grammar) + >>> chart = parser.chart_parse(sentence) + >>> print((chart.num_edges())) + 4986 + >>> print((len(list(chart.parses(grammar.start()))))) + 17 + +Left-corner parsing with bottom-up filter. + + >>> parser = nltk.parse.LeftCornerChartParser(grammar) + >>> chart = parser.chart_parse(sentence) + >>> print((chart.num_edges())) + 1342 + >>> print((len(list(chart.parses(grammar.start()))))) + 17 + +Top-down parsing. + + >>> parser = nltk.parse.TopDownChartParser(grammar) + >>> chart = parser.chart_parse(sentence) + >>> print((chart.num_edges())) + 28352 + >>> print((len(list(chart.parses(grammar.start()))))) + 17 + +Incremental Bottom-up parsing. + + >>> parser = nltk.parse.IncrementalBottomUpChartParser(grammar) + >>> chart = parser.chart_parse(sentence) + >>> print((chart.num_edges())) + 7661 + >>> print((len(list(chart.parses(grammar.start()))))) + 17 + +Incremental Bottom-up Left-corner parsing. + + >>> parser = nltk.parse.IncrementalBottomUpLeftCornerChartParser(grammar) + >>> chart = parser.chart_parse(sentence) + >>> print((chart.num_edges())) + 4986 + >>> print((len(list(chart.parses(grammar.start()))))) + 17 + +Incremental Left-corner parsing with bottom-up filter. + + >>> parser = nltk.parse.IncrementalLeftCornerChartParser(grammar) + >>> chart = parser.chart_parse(sentence) + >>> print((chart.num_edges())) + 1342 + >>> print((len(list(chart.parses(grammar.start()))))) + 17 + +Incremental Top-down parsing. + + >>> parser = nltk.parse.IncrementalTopDownChartParser(grammar) + >>> chart = parser.chart_parse(sentence) + >>> print((chart.num_edges())) + 28352 + >>> print((len(list(chart.parses(grammar.start()))))) + 17 + +Earley parsing. This is similar to the incremental top-down algorithm. + + >>> parser = nltk.parse.EarleyChartParser(grammar) + >>> chart = parser.chart_parse(sentence) + >>> print((chart.num_edges())) + 28352 + >>> print((len(list(chart.parses(grammar.start()))))) + 17 + + +Unit tests for the Probabilistic CFG class +------------------------------------------ + + >>> from nltk.corpus import treebank + >>> from itertools import islice + >>> from nltk.grammar import PCFG, induce_pcfg + >>> toy_pcfg1 = PCFG.fromstring(""" + ... S -> NP VP [1.0] + ... NP -> Det N [0.5] | NP PP [0.25] | 'John' [0.1] | 'I' [0.15] + ... Det -> 'the' [0.8] | 'my' [0.2] + ... N -> 'man' [0.5] | 'telescope' [0.5] + ... VP -> VP PP [0.1] | V NP [0.7] | V [0.2] + ... V -> 'ate' [0.35] | 'saw' [0.65] + ... PP -> P NP [1.0] + ... P -> 'with' [0.61] | 'under' [0.39] + ... """) + + >>> toy_pcfg2 = PCFG.fromstring(""" + ... S -> NP VP [1.0] + ... VP -> V NP [.59] + ... VP -> V [.40] + ... VP -> VP PP [.01] + ... NP -> Det N [.41] + ... NP -> Name [.28] + ... NP -> NP PP [.31] + ... PP -> P NP [1.0] + ... V -> 'saw' [.21] + ... V -> 'ate' [.51] + ... V -> 'ran' [.28] + ... N -> 'boy' [.11] + ... N -> 'cookie' [.12] + ... N -> 'table' [.13] + ... N -> 'telescope' [.14] + ... N -> 'hill' [.5] + ... Name -> 'Jack' [.52] + ... Name -> 'Bob' [.48] + ... P -> 'with' [.61] + ... P -> 'under' [.39] + ... Det -> 'the' [.41] + ... Det -> 'a' [.31] + ... Det -> 'my' [.28] + ... """) + +Create a set of PCFG productions. + + >>> grammar = PCFG.fromstring(""" + ... A -> B B [.3] | C B C [.7] + ... B -> B D [.5] | C [.5] + ... C -> 'a' [.1] | 'b' [0.9] + ... D -> 'b' [1.0] + ... """) + >>> prod = grammar.productions()[0] + >>> prod + A -> B B [0.3] + + >>> prod.lhs() + A + + >>> prod.rhs() + (B, B) + + >>> print((prod.prob())) + 0.3 + + >>> grammar.start() + A + + >>> grammar.productions() + [A -> B B [0.3], A -> C B C [0.7], B -> B D [0.5], B -> C [0.5], C -> 'a' [0.1], C -> 'b' [0.9], D -> 'b' [1.0]] + +Induce some productions using parsed Treebank data. + + >>> productions = [] + >>> for fileid in treebank.fileids()[:2]: + ... for t in treebank.parsed_sents(fileid): + ... productions += t.productions() + + >>> grammar = induce_pcfg(S, productions) + >>> grammar + + + >>> sorted(grammar.productions(lhs=Nonterminal('PP')))[:2] + [PP -> IN NP [1.0]] + >>> sorted(grammar.productions(lhs=Nonterminal('NNP')))[:2] + [NNP -> 'Agnew' [0.0714286], NNP -> 'Consolidated' [0.0714286]] + >>> sorted(grammar.productions(lhs=Nonterminal('JJ')))[:2] + [JJ -> 'British' [0.142857], JJ -> 'former' [0.142857]] + >>> sorted(grammar.productions(lhs=Nonterminal('NP')))[:2] + [NP -> CD NNS [0.133333], NP -> DT JJ JJ NN [0.0666667]] + +Unit tests for the Probabilistic Chart Parse classes +---------------------------------------------------- + + >>> tokens = "Jack saw Bob with my cookie".split() + >>> grammar = toy_pcfg2 + >>> print(grammar) + Grammar with 23 productions (start state = S) + S -> NP VP [1.0] + VP -> V NP [0.59] + VP -> V [0.4] + VP -> VP PP [0.01] + NP -> Det N [0.41] + NP -> Name [0.28] + NP -> NP PP [0.31] + PP -> P NP [1.0] + V -> 'saw' [0.21] + V -> 'ate' [0.51] + V -> 'ran' [0.28] + N -> 'boy' [0.11] + N -> 'cookie' [0.12] + N -> 'table' [0.13] + N -> 'telescope' [0.14] + N -> 'hill' [0.5] + Name -> 'Jack' [0.52] + Name -> 'Bob' [0.48] + P -> 'with' [0.61] + P -> 'under' [0.39] + Det -> 'the' [0.41] + Det -> 'a' [0.31] + Det -> 'my' [0.28] + +Create several parsers using different queuing strategies and show the +resulting parses. + + >>> from nltk.parse import pchart + + >>> parser = pchart.InsideChartParser(grammar) + >>> for t in parser.parse(tokens): + ... print(t) + (S + (NP (Name Jack)) + (VP + (V saw) + (NP + (NP (Name Bob)) + (PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06) + (S + (NP (Name Jack)) + (VP + (VP (V saw) (NP (Name Bob))) + (PP (P with) (NP (Det my) (N cookie))))) (p=2.03744e-07) + + >>> parser = pchart.RandomChartParser(grammar) + >>> for t in parser.parse(tokens): + ... print(t) + (S + (NP (Name Jack)) + (VP + (V saw) + (NP + (NP (Name Bob)) + (PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06) + (S + (NP (Name Jack)) + (VP + (VP (V saw) (NP (Name Bob))) + (PP (P with) (NP (Det my) (N cookie))))) (p=2.03744e-07) + + >>> parser = pchart.UnsortedChartParser(grammar) + >>> for t in parser.parse(tokens): + ... print(t) + (S + (NP (Name Jack)) + (VP + (V saw) + (NP + (NP (Name Bob)) + (PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06) + (S + (NP (Name Jack)) + (VP + (VP (V saw) (NP (Name Bob))) + (PP (P with) (NP (Det my) (N cookie))))) (p=2.03744e-07) + + >>> parser = pchart.LongestChartParser(grammar) + >>> for t in parser.parse(tokens): + ... print(t) + (S + (NP (Name Jack)) + (VP + (V saw) + (NP + (NP (Name Bob)) + (PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06) + (S + (NP (Name Jack)) + (VP + (VP (V saw) (NP (Name Bob))) + (PP (P with) (NP (Det my) (N cookie))))) (p=2.03744e-07) + + >>> parser = pchart.InsideChartParser(grammar, beam_size = len(tokens)+1) + >>> for t in parser.parse(tokens): + ... print(t) + + +Unit tests for the Viterbi Parse classes +---------------------------------------- + + >>> from nltk.parse import ViterbiParser + >>> tokens = "Jack saw Bob with my cookie".split() + >>> grammar = toy_pcfg2 + +Parse the tokenized sentence. + + >>> parser = ViterbiParser(grammar) + >>> for t in parser.parse(tokens): + ... print(t) + (S + (NP (Name Jack)) + (VP + (V saw) + (NP + (NP (Name Bob)) + (PP (P with) (NP (Det my) (N cookie)))))) (p=6.31607e-06) + + +Unit tests for the FeatStructNonterminal class +---------------------------------------------- + + >>> from nltk.grammar import FeatStructNonterminal + >>> FeatStructNonterminal( + ... pos='n', agr=FeatStructNonterminal(number='pl', gender='f')) + [agr=[gender='f', number='pl'], pos='n'] + + >>> FeatStructNonterminal('VP[+fin]/NP[+pl]') + VP[+fin]/NP[+pl] + + +Tracing the Feature Chart Parser +-------------------------------- + +We use the featurechart.demo() function for tracing the Feature Chart Parser. + + >>> nltk.parse.featurechart.demo(print_times=False, + ... print_grammar=True, + ... parser=nltk.parse.featurechart.FeatureChartParser, + ... sent='I saw John with a dog') + + Grammar with 18 productions (start state = S[]) + S[] -> NP[] VP[] + PP[] -> Prep[] NP[] + NP[] -> NP[] PP[] + VP[] -> VP[] PP[] + VP[] -> Verb[] NP[] + VP[] -> Verb[] + NP[] -> Det[pl=?x] Noun[pl=?x] + NP[] -> 'John' + NP[] -> 'I' + Det[] -> 'the' + Det[] -> 'my' + Det[-pl] -> 'a' + Noun[-pl] -> 'dog' + Noun[-pl] -> 'cookie' + Verb[] -> 'ate' + Verb[] -> 'saw' + Prep[] -> 'with' + Prep[] -> 'under' + + * FeatureChartParser + Sentence: I saw John with a dog + |.I.s.J.w.a.d.| + |[-] . . . . .| [0:1] 'I' + |. [-] . . . .| [1:2] 'saw' + |. . [-] . . .| [2:3] 'John' + |. . . [-] . .| [3:4] 'with' + |. . . . [-] .| [4:5] 'a' + |. . . . . [-]| [5:6] 'dog' + |[-] . . . . .| [0:1] NP[] -> 'I' * + |[-> . . . . .| [0:1] S[] -> NP[] * VP[] {} + |[-> . . . . .| [0:1] NP[] -> NP[] * PP[] {} + |. [-] . . . .| [1:2] Verb[] -> 'saw' * + |. [-> . . . .| [1:2] VP[] -> Verb[] * NP[] {} + |. [-] . . . .| [1:2] VP[] -> Verb[] * + |. [-> . . . .| [1:2] VP[] -> VP[] * PP[] {} + |[---] . . . .| [0:2] S[] -> NP[] VP[] * + |. . [-] . . .| [2:3] NP[] -> 'John' * + |. . [-> . . .| [2:3] S[] -> NP[] * VP[] {} + |. . [-> . . .| [2:3] NP[] -> NP[] * PP[] {} + |. [---] . . .| [1:3] VP[] -> Verb[] NP[] * + |. [---> . . .| [1:3] VP[] -> VP[] * PP[] {} + |[-----] . . .| [0:3] S[] -> NP[] VP[] * + |. . . [-] . .| [3:4] Prep[] -> 'with' * + |. . . [-> . .| [3:4] PP[] -> Prep[] * NP[] {} + |. . . . [-] .| [4:5] Det[-pl] -> 'a' * + |. . . . [-> .| [4:5] NP[] -> Det[pl=?x] * Noun[pl=?x] {?x: False} + |. . . . . [-]| [5:6] Noun[-pl] -> 'dog' * + |. . . . [---]| [4:6] NP[] -> Det[-pl] Noun[-pl] * + |. . . . [--->| [4:6] S[] -> NP[] * VP[] {} + |. . . . [--->| [4:6] NP[] -> NP[] * PP[] {} + |. . . [-----]| [3:6] PP[] -> Prep[] NP[] * + |. . [-------]| [2:6] NP[] -> NP[] PP[] * + |. [---------]| [1:6] VP[] -> VP[] PP[] * + |. [--------->| [1:6] VP[] -> VP[] * PP[] {} + |[===========]| [0:6] S[] -> NP[] VP[] * + |. . [------->| [2:6] S[] -> NP[] * VP[] {} + |. . [------->| [2:6] NP[] -> NP[] * PP[] {} + |. [---------]| [1:6] VP[] -> Verb[] NP[] * + |. [--------->| [1:6] VP[] -> VP[] * PP[] {} + |[===========]| [0:6] S[] -> NP[] VP[] * + (S[] + (NP[] I) + (VP[] + (VP[] (Verb[] saw) (NP[] John)) + (PP[] (Prep[] with) (NP[] (Det[-pl] a) (Noun[-pl] dog))))) + (S[] + (NP[] I) + (VP[] + (Verb[] saw) + (NP[] + (NP[] John) + (PP[] (Prep[] with) (NP[] (Det[-pl] a) (Noun[-pl] dog)))))) + + +Unit tests for the Feature Chart Parser classes +----------------------------------------------- + +The list of parsers we want to test. + + >>> parsers = [nltk.parse.featurechart.FeatureChartParser, + ... nltk.parse.featurechart.FeatureTopDownChartParser, + ... nltk.parse.featurechart.FeatureBottomUpChartParser, + ... nltk.parse.featurechart.FeatureBottomUpLeftCornerChartParser, + ... nltk.parse.earleychart.FeatureIncrementalChartParser, + ... nltk.parse.earleychart.FeatureEarleyChartParser, + ... nltk.parse.earleychart.FeatureIncrementalTopDownChartParser, + ... nltk.parse.earleychart.FeatureIncrementalBottomUpChartParser, + ... nltk.parse.earleychart.FeatureIncrementalBottomUpLeftCornerChartParser, + ... ] + +A helper function that tests each parser on the given grammar and sentence. +We check that the number of trees are correct, and that all parsers +return the same trees. Otherwise an error is printed. + + >>> def unittest(grammar, sentence, nr_trees): + ... sentence = sentence.split() + ... trees = None + ... for P in parsers: + ... result = P(grammar).parse(sentence) + ... result = set(tree.freeze() for tree in result) + ... if len(result) != nr_trees: + ... print("Wrong nr of trees:", len(result)) + ... elif trees is None: + ... trees = result + ... elif result != trees: + ... print("Trees differ for parser:", P.__name__) + +The demo grammar from before, with an ambiguous sentence. + + >>> isawjohn = nltk.parse.featurechart.demo_grammar() + >>> unittest(isawjohn, "I saw John with a dog with my cookie", 5) + +This grammar tests that variables in different grammar rules are renamed +before unification. (The problematic variable is in this case ?X). + + >>> whatwasthat = nltk.grammar.FeatureGrammar.fromstring(''' + ... S[] -> NP[num=?N] VP[num=?N, slash=?X] + ... NP[num=?X] -> "what" + ... NP[num=?X] -> "that" + ... VP[num=?P, slash=none] -> V[num=?P] NP[] + ... V[num=sg] -> "was" + ... ''') + >>> unittest(whatwasthat, "what was that", 1) + +This grammar tests that the same rule can be used in different places +in another rule, and that the variables are properly renamed. + + >>> thislovesthat = nltk.grammar.FeatureGrammar.fromstring(''' + ... S[] -> NP[case=nom] V[] NP[case=acc] + ... NP[case=?X] -> Pron[case=?X] + ... Pron[] -> "this" + ... Pron[] -> "that" + ... V[] -> "loves" + ... ''') + >>> unittest(thislovesthat, "this loves that", 1) + + +Tests for loading feature grammar files +--------------------------------------- + +Alternative 1: first load the grammar, then create the parser. + + >>> fcfg = nltk.data.load('grammars/book_grammars/feat0.fcfg') + >>> fcp1 = nltk.parse.FeatureChartParser(fcfg) + >>> print((type(fcp1))) + + +Alternative 2: directly load the parser. + + >>> fcp2 = nltk.parse.load_parser('grammars/book_grammars/feat0.fcfg') + >>> print((type(fcp2))) + diff --git a/lib/python3.10/site-packages/nltk/test/portuguese_en.doctest b/lib/python3.10/site-packages/nltk/test/portuguese_en.doctest new file mode 100644 index 0000000000000000000000000000000000000000..aacaf1d16d375c318ab38c961e8c1094f81a1284 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/portuguese_en.doctest @@ -0,0 +1,568 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +================================== +Examples for Portuguese Processing +================================== + +This HOWTO contains a variety of examples relating to the Portuguese language. +It is intended to be read in conjunction with the NLTK book +(``https://www.nltk.org/book/``). For instructions on running the Python +interpreter, please see the section *Getting Started with Python*, in Chapter 1. + +-------------------------------------------- +Python Programming, with Portuguese Examples +-------------------------------------------- + +Chapter 1 of the NLTK book contains many elementary programming examples, all +with English texts. In this section, we'll see some corresponding examples +using Portuguese. Please refer to the chapter for full discussion. *Vamos!* + + >>> from nltk.test.portuguese_en_fixt import setup_module + >>> setup_module() + + >>> from nltk.examples.pt import * + *** Introductory Examples for the NLTK Book *** + Loading ptext1, ... and psent1, ... + Type the name of the text or sentence to view it. + Type: 'texts()' or 'sents()' to list the materials. + ptext1: Memórias Póstumas de Brás Cubas (1881) + ptext2: Dom Casmurro (1899) + ptext3: Gênesis + ptext4: Folha de Sao Paulo (1994) + + +Any time we want to find out about these texts, we just have +to enter their names at the Python prompt: + + >>> ptext2 + + +Searching Text +-------------- + +A concordance permits us to see words in context. + + >>> ptext1.concordance('olhos') + Building index... + Displaying 25 of 138 matches: + De pé , à cabeceira da cama , com os olhos estúpidos , a boca entreaberta , a t + orelhas . Pela minha parte fechei os olhos e deixei - me ir à ventura . Já agor + xões de cérebro enfermo . Como ia de olhos fechados , não via o caminho ; lembr + gelos eternos . Com efeito , abri os olhos e vi que o meu animal galopava numa + me apareceu então , fitando - me uns olhos rutilantes como o sol . Tudo nessa f + mim mesmo . Então , encarei - a com olhos súplices , e pedi mais alguns anos . + ... + +For a given word, we can find words with a similar text distribution: + + >>> ptext1.similar('chegar') + Building word-context index... + acabada acudir aludir avistar bramanismo casamento cheguei com contar + contrário corpo dali deixei desferirem dizer fazer filhos já leitor lhe + >>> ptext3.similar('chegar') + Building word-context index... + achar alumiar arrombar destruir governar guardar ir lavrar passar que + toda tomar ver vir + +We can search for the statistically significant collocations in a text: + + >>> ptext1.collocations() + Building collocations list + Quincas Borba; Lobo Neves; alguma coisa; Brás Cubas; meu pai; dia + seguinte; não sei; Meu pai; alguns instantes; outra vez; outra coisa; + por exemplo; mim mesmo; coisa nenhuma; mesma coisa; não era; dias + depois; Passeio Público; olhar para; das coisas + +We can search for words in context, with the help of *regular expressions*, e.g.: + + >>> ptext1.findall(" (<.*>)") + estúpidos; e; fechados; rutilantes; súplices; a; do; babavam; + na; moles; se; da; umas; espraiavam; chamejantes; espetados; + ... + +We can automatically generate random text based on a given text, e.g.: + + >>> ptext3.generate() # doctest: +SKIP + No princípio , criou Deus os abençoou , dizendo : Onde { estão } e até + à ave dos céus , { que } será . Disse mais Abrão : Dá - me a mulher + que tomaste ; porque daquele poço Eseque , { tinha .} E disse : Não + poderemos descer ; mas , do campo ainda não estava na casa do teu + pescoço . E viveu Serugue , depois Simeão e Levi { são } estes ? E o + varão , porque habitava na terra de Node , da mão de Esaú : Jeús , + Jalão e Corá + +Texts as List of Words +---------------------- + +A few sentences have been defined for you. + + >>> psent1 + ['o', 'amor', 'da', 'gl\xf3ria', 'era', 'a', 'coisa', 'mais', + 'verdadeiramente', 'humana', 'que', 'h\xe1', 'no', 'homem', ',', + 'e', ',', 'conseq\xfcentemente', ',', 'a', 'sua', 'mais', + 'genu\xedna', 'fei\xe7\xe3o', '.'] + >>> + +Notice that the sentence has been *tokenized*. Each token is +represented as a string, represented using quotes, e.g. ``'coisa'``. +Some strings contain special characters, e.g. ``\xf3``, +the internal representation for ó. +The tokens are combined in the form of a *list*. How long is this list? + + >>> len(psent1) + 25 + >>> + +What is the vocabulary of this sentence? + + >>> sorted(set(psent1)) + [',', '.', 'a', 'amor', 'coisa', 'conseqüentemente', 'da', 'e', 'era', + 'feição', 'genuína', 'glória', 'homem', 'humana', 'há', 'mais', 'no', + 'o', 'que', 'sua', 'verdadeiramente'] + >>> + +Let's iterate over each item in ``psent2``, and print information for each: + + >>> for w in psent2: + ... print(w, len(w), w[-1]) + ... + Não 3 o + consultes 9 s + dicionários 11 s + . 1 . + +Observe how we make a human-readable version of a string, using ``decode()``. +Also notice that we accessed the last character of a string ``w`` using ``w[-1]``. + +We just saw a ``for`` loop above. Another useful control structure is a +*list comprehension*. + + >>> [w.upper() for w in psent2] + ['N\xc3O', 'CONSULTES', 'DICION\xc1RIOS', '.'] + >>> [w for w in psent1 if w.endswith('a')] + ['da', 'gl\xf3ria', 'era', 'a', 'coisa', 'humana', 'a', 'sua', 'genu\xedna'] + >>> [w for w in ptext4 if len(w) > 15] + ['norte-irlandeses', 'pan-nacionalismo', 'predominatemente', 'primeiro-ministro', + 'primeiro-ministro', 'irlandesa-americana', 'responsabilidades', 'significativamente'] + +We can examine the relative frequency of words in a text, using ``FreqDist``: + + >>> fd1 = FreqDist(ptext1) + >>> fd1 + + >>> fd1['olhos'] + 137 + >>> fd1.max() + ',' + >>> fd1.samples()[:100] + [',', '.', 'a', 'que', 'de', 'e', '-', 'o', ';', 'me', 'um', 'n\xe3o', + '\x97', 'se', 'do', 'da', 'uma', 'com', 'os', '\xe9', 'era', 'as', 'eu', + 'lhe', 'ao', 'em', 'para', 'mas', '...', '!', '\xe0', 'na', 'mais', '?', + 'no', 'como', 'por', 'N\xe3o', 'dos', 'o', 'ele', ':', 'Virg\xedlia', + 'me', 'disse', 'minha', 'das', 'O', '/', 'A', 'CAP\xcdTULO', 'muito', + 'depois', 'coisa', 'foi', 'sem', 'olhos', 'ela', 'nos', 'tinha', 'nem', + 'E', 'outro', 'vida', 'nada', 'tempo', 'menos', 'outra', 'casa', 'homem', + 'porque', 'quando', 'mim', 'mesmo', 'ser', 'pouco', 'estava', 'dia', + 't\xe3o', 'tudo', 'Mas', 'at\xe9', 'D', 'ainda', 's\xf3', 'alguma', + 'la', 'vez', 'anos', 'h\xe1', 'Era', 'pai', 'esse', 'lo', 'dizer', 'assim', + 'ent\xe3o', 'dizia', 'aos', 'Borba'] + +--------------- +Reading Corpora +--------------- + +Accessing the Machado Text Corpus +--------------------------------- + +NLTK includes the complete works of Machado de Assis. + + >>> from nltk.corpus import machado + >>> machado.fileids() + ['contos/macn001.txt', 'contos/macn002.txt', 'contos/macn003.txt', ...] + +Each file corresponds to one of the works of Machado de Assis. To see a complete +list of works, you can look at the corpus README file: ``print machado.readme()``. +Let's access the text of the *Posthumous Memories of Brás Cubas*. + +We can access the text as a list of characters, and access 200 characters starting +from position 10,000. + + >>> raw_text = machado.raw('romance/marm05.txt') + >>> raw_text[10000:10200] + u', primou no\nEstado, e foi um dos amigos particulares do vice-rei Conde + da Cunha.\n\nComo este apelido de Cubas lhe\ncheirasse excessivamente a + tanoaria, alegava meu pai, bisneto de Dami\xe3o, que o\ndito ape' + +However, this is not a very useful way to work with a text. We generally think +of a text as a sequence of words and punctuation, not characters: + + >>> text1 = machado.words('romance/marm05.txt') + >>> text1 + ['Romance', ',', 'Mem\xf3rias', 'P\xf3stumas', 'de', ...] + >>> len(text1) + 77098 + >>> len(set(text1)) + 10848 + +Here's a program that finds the most common ngrams that contain a +particular target word. + + >>> from nltk import ngrams, FreqDist + >>> target_word = 'olhos' + >>> fd = FreqDist(ng + ... for ng in ngrams(text1, 5) + ... if target_word in ng) + >>> for hit in fd.samples(): + ... print(' '.join(hit)) + ... + , com os olhos no + com os olhos no ar + com os olhos no chão + e todos com os olhos + me estar com os olhos + os olhos estúpidos , a + os olhos na costura , + os olhos no ar , + , com os olhos espetados + , com os olhos estúpidos + , com os olhos fitos + , com os olhos naquele + , com os olhos para + + +Accessing the MacMorpho Tagged Corpus +------------------------------------- + +NLTK includes the MAC-MORPHO Brazilian Portuguese POS-tagged news text, +with over a million words of +journalistic texts extracted from ten sections of +the daily newspaper *Folha de Sao Paulo*, 1994. + +We can access this corpus as a sequence of words or tagged words as follows: + + >>> import nltk.corpus + >>> nltk.corpus.mac_morpho.words() + ['Jersei', 'atinge', 'm\xe9dia', 'de', 'Cr$', '1,4', ...] + >>> nltk.corpus.mac_morpho.sents() + [['Jersei', 'atinge', 'm\xe9dia', 'de', 'Cr$', '1,4', 'milh\xe3o', + 'em', 'a', 'venda', 'de', 'a', 'Pinhal', 'em', 'S\xe3o', 'Paulo'], + ['Programe', 'sua', 'viagem', 'a', 'a', 'Exposi\xe7\xe3o', 'Nacional', + 'do', 'Zeb', ',', 'que', 'come\xe7a', 'dia', '25'], ...] + >>> nltk.corpus.mac_morpho.tagged_words() + [('Jersei', 'N'), ('atinge', 'V'), ('m\xe9dia', 'N'), ...] + +We can also access it in sentence chunks. + + >>> nltk.corpus.mac_morpho.tagged_sents() + [[('Jersei', 'N'), ('atinge', 'V'), ('m\xe9dia', 'N'), ('de', 'PREP'), + ('Cr$', 'CUR'), ('1,4', 'NUM'), ('milh\xe3o', 'N'), ('em', 'PREP|+'), + ('a', 'ART'), ('venda', 'N'), ('de', 'PREP|+'), ('a', 'ART'), + ('Pinhal', 'NPROP'), ('em', 'PREP'), ('S\xe3o', 'NPROP'), + ('Paulo', 'NPROP')], + [('Programe', 'V'), ('sua', 'PROADJ'), ('viagem', 'N'), ('a', 'PREP|+'), + ('a', 'ART'), ('Exposi\xe7\xe3o', 'NPROP'), ('Nacional', 'NPROP'), + ('do', 'NPROP'), ('Zeb', 'NPROP'), (',', ','), ('que', 'PRO-KS-REL'), + ('come\xe7a', 'V'), ('dia', 'N'), ('25', 'N|AP')], ...] + +This data can be used to train taggers (examples below for the Floresta treebank). + +Accessing the Floresta Portuguese Treebank +------------------------------------------ + +The NLTK data distribution includes the +"Floresta Sinta(c)tica Corpus" version 7.4, available from +``https://www.linguateca.pt/Floresta/``. + +We can access this corpus as a sequence of words or tagged words as follows: + + >>> from nltk.corpus import floresta + >>> floresta.words() + ['Um', 'revivalismo', 'refrescante', 'O', '7_e_Meio', ...] + >>> floresta.tagged_words() + [('Um', '>N+art'), ('revivalismo', 'H+n'), ...] + +The tags consist of some syntactic information, followed by a plus sign, +followed by a conventional part-of-speech tag. Let's strip off the material before +the plus sign: + + >>> def simplify_tag(t): + ... if "+" in t: + ... return t[t.index("+")+1:] + ... else: + ... return t + >>> twords = floresta.tagged_words() + >>> twords = [(w.lower(), simplify_tag(t)) for (w,t) in twords] + >>> twords[:10] + [('um', 'art'), ('revivalismo', 'n'), ('refrescante', 'adj'), ('o', 'art'), ('7_e_meio', 'prop'), + ('\xe9', 'v-fin'), ('um', 'art'), ('ex-libris', 'n'), ('de', 'prp'), ('a', 'art')] + +Pretty printing the tagged words: + + >>> print(' '.join(word + '/' + tag for (word, tag) in twords[:10])) + um/art revivalismo/n refrescante/adj o/art 7_e_meio/prop é/v-fin um/art ex-libris/n de/prp a/art + +Count the word tokens and types, and determine the most common word: + + >>> words = floresta.words() + >>> len(words) + 211852 + >>> fd = nltk.FreqDist(words) + >>> len(fd) + 29421 + >>> fd.max() + 'de' + +List the 20 most frequent tags, in order of decreasing frequency: + + >>> tags = [simplify_tag(tag) for (word,tag) in floresta.tagged_words()] + >>> fd = nltk.FreqDist(tags) + >>> fd.keys()[:20] + ['n', 'prp', 'art', 'v-fin', ',', 'prop', 'adj', 'adv', '.', + 'conj-c', 'v-inf', 'pron-det', 'v-pcp', 'num', 'pron-indp', + 'pron-pers', '\xab', '\xbb', 'conj-s', '}'] + +We can also access the corpus grouped by sentence: + + >>> floresta.sents() + [['Um', 'revivalismo', 'refrescante'], + ['O', '7_e_Meio', '\xe9', 'um', 'ex-libris', 'de', 'a', 'noite', + 'algarvia', '.'], ...] + >>> floresta.tagged_sents() + [[('Um', '>N+art'), ('revivalismo', 'H+n'), ('refrescante', 'N<+adj')], + [('O', '>N+art'), ('7_e_Meio', 'H+prop'), ('\xe9', 'P+v-fin'), + ('um', '>N+art'), ('ex-libris', 'H+n'), ('de', 'H+prp'), + ('a', '>N+art'), ('noite', 'H+n'), ('algarvia', 'N<+adj'), ('.', '.')], + ...] + >>> floresta.parsed_sents() + [Tree('UTT+np', [Tree('>N+art', ['Um']), Tree('H+n', ['revivalismo']), + Tree('N<+adj', ['refrescante'])]), + Tree('STA+fcl', + [Tree('SUBJ+np', [Tree('>N+art', ['O']), + Tree('H+prop', ['7_e_Meio'])]), + Tree('P+v-fin', ['\xe9']), + Tree('SC+np', + [Tree('>N+art', ['um']), + Tree('H+n', ['ex-libris']), + Tree('N<+pp', [Tree('H+prp', ['de']), + Tree('P<+np', [Tree('>N+art', ['a']), + Tree('H+n', ['noite']), + Tree('N<+adj', ['algarvia'])])])]), + Tree('.', ['.'])]), ...] + +To view a parse tree, use the ``draw()`` method, e.g.: + + >>> psents = floresta.parsed_sents() + >>> psents[5].draw() # doctest: +SKIP + +Character Encodings +------------------- + +Python understands the common character encoding used for Portuguese, ISO 8859-1 (ISO Latin 1). + + >>> import os, nltk.test + >>> testdir = os.path.split(nltk.test.__file__)[0] + >>> text = open(os.path.join(testdir, 'floresta.txt'), 'rb').read().decode('ISO 8859-1') + >>> text[:60] + 'O 7 e Meio \xe9 um ex-libris da noite algarvia.\n\xc9 uma das mais ' + >>> print(text[:60]) + O 7 e Meio é um ex-libris da noite algarvia. + É uma das mais + +For more information about character encodings and Python, please see section 3.3 of the book. + +---------------- +Processing Tasks +---------------- + + +Simple Concordancing +-------------------- + +Here's a function that takes a word and a specified amount of context (measured +in characters), and generates a concordance for that word. + + >>> def concordance(word, context=30): + ... for sent in floresta.sents(): + ... if word in sent: + ... pos = sent.index(word) + ... left = ' '.join(sent[:pos]) + ... right = ' '.join(sent[pos+1:]) + ... print('%*s %s %-*s' % + ... (context, left[-context:], word, context, right[:context])) + + >>> concordance("dar") # doctest: +SKIP + anduru , foi o suficiente para dar a volta a o resultado . + 1. O P?BLICO veio dar a a imprensa di?ria portuguesa + A fartura de pensamento pode dar maus resultados e n?s n?o quer + Come?a a dar resultados a pol?tica de a Uni + ial come?ar a incorporar- lo e dar forma a um ' site ' que tem se + r com Constantino para ele lhe dar tamb?m os pap?is assinados . + va a brincar , pois n?o lhe ia dar procura??o nenhuma enquanto n? + ?rica como o ant?doto capaz de dar sentido a o seu enorme poder . + . . . + >>> concordance("vender") # doctest: +SKIP + er recebido uma encomenda para vender 4000 blindados a o Iraque . + m?rico_Amorim caso conseguisse vender o lote de ac??es de o empres?r + mpre ter jovens simp?ticos a ? vender ? chega ! } + Disse que o governo vai vender ? desde autom?vel at? particip + ndiciou ontem duas pessoas por vender carro com ?gio . + A inten??o de Fleury ? vender as a??es para equilibrar as fi + +Part-of-Speech Tagging +---------------------- + +Let's begin by getting the tagged sentence data, and simplifying the tags +as described earlier. + + >>> from nltk.corpus import floresta + >>> tsents = floresta.tagged_sents() + >>> tsents = [[(w.lower(),simplify_tag(t)) for (w,t) in sent] for sent in tsents if sent] + >>> train = tsents[100:] + >>> test = tsents[:100] + +We already know that ``n`` is the most common tag, so we can set up a +default tagger that tags every word as a noun, and see how well it does: + + >>> tagger0 = nltk.DefaultTagger('n') + >>> nltk.tag.accuracy(tagger0, test) + 0.17697228144989338 + +Evidently, about one in every six words is a noun. Let's improve on this by +training a unigram tagger: + + >>> tagger1 = nltk.UnigramTagger(train, backoff=tagger0) + >>> nltk.tag.accuracy(tagger1, test) + 0.87029140014214645 + +Next a bigram tagger: + + >>> tagger2 = nltk.BigramTagger(train, backoff=tagger1) + >>> nltk.tag.accuracy(tagger2, test) + 0.89019189765458417 + + +Sentence Segmentation +--------------------- + +Punkt is a language-neutral sentence segmentation tool. We + + >>> sent_tokenizer=nltk.data.load('tokenizers/punkt/portuguese.pickle') + >>> raw_text = machado.raw('romance/marm05.txt') + >>> sentences = sent_tokenizer.tokenize(raw_text) + >>> for sent in sentences[1000:1005]: + ... print("<<", sent, ">>") + ... + << Em verdade, parecia ainda mais mulher do que era; + seria criança nos seus folgares de moça; mas assim quieta, impassível, tinha a + compostura da mulher casada. >> + << Talvez essa circunstância lhe diminuía um pouco da + graça virginal. >> + << Depressa nos familiarizamos; a mãe fazia-lhe grandes elogios, eu + escutava-os de boa sombra, e ela sorria com os olhos fúlgidos, como se lá dentro + do cérebro lhe estivesse a voar uma borboletinha de asas de ouro e olhos de + diamante... >> + << Digo lá dentro, porque cá fora o + que esvoaçou foi uma borboleta preta, que subitamente penetrou na varanda, e + começou a bater as asas em derredor de D. Eusébia. >> + << D. Eusébia deu um grito, + levantou-se, praguejou umas palavras soltas: - T'esconjuro!... >> + +The sentence tokenizer can be trained and evaluated on other text. +The source text (from the Floresta Portuguese Treebank) contains one sentence per line. +We read the text, split it into its lines, and then join these lines together using +spaces. Now the information about sentence breaks has been discarded. We split this +material into training and testing data: + + >>> import os, nltk.test + >>> testdir = os.path.split(nltk.test.__file__)[0] + >>> text = open(os.path.join(testdir, 'floresta.txt'), 'rb').read().decode('ISO-8859-1') + >>> lines = text.split('\n') + >>> train = ' '.join(lines[10:]) + >>> test = ' '.join(lines[:10]) + +Now we train the sentence segmenter (or sentence tokenizer) and use it on our test sentences: + + >>> stok = nltk.PunktSentenceTokenizer(train) + >>> print(stok.tokenize(test)) + ['O 7 e Meio \xe9 um ex-libris da noite algarvia.', + '\xc9 uma das mais antigas discotecas do Algarve, situada em Albufeira, + que continua a manter os tra\xe7os decorativos e as clientelas de sempre.', + '\xc9 um pouco a vers\xe3o de uma esp\xe9cie de \xaboutro lado\xbb da noite, + a meio caminho entre os devaneios de uma fauna perif\xe9rica, seja de Lisboa, + Londres, Dublin ou Faro e Portim\xe3o, e a postura circunspecta dos fi\xe9is da casa, + que dela esperam a m\xfasica \xabgeracionista\xbb dos 60 ou dos 70.', + 'N\xe3o deixa de ser, nos tempos que correm, um certo \xabvery typical\xbb algarvio, + cabe\xe7a de cartaz para os que querem fugir a algumas movimenta\xe7\xf5es nocturnas + j\xe1 a caminho da ritualiza\xe7\xe3o de massas, do g\xe9nero \xabvamos todos ao + Calypso e encontramo-nos na Locomia\xbb.', + 'E assim, aos 2,5 milh\xf5es que o Minist\xe9rio do Planeamento e Administra\xe7\xe3o + do Territ\xf3rio j\xe1 gasta no pagamento do pessoal afecto a estes organismos, + v\xeam juntar-se os montantes das obras propriamente ditas, que os munic\xedpios, + j\xe1 com projectos na m\xe3o, v\xeam reivindicar junto do Executivo, como salienta + aquele membro do Governo.', + 'E o dinheiro \xabn\xe3o falta s\xf3 \xe0s c\xe2maras\xbb, lembra o secret\xe1rio de Estado, + que considera que a solu\xe7\xe3o para as autarquias \xe9 \xabespecializarem-se em + fundos comunit\xe1rios\xbb.', + 'Mas como, se muitas n\xe3o disp\xf5em, nos seus quadros, dos t\xe9cnicos necess\xe1rios?', + '\xabEncomendem-nos a projectistas de fora\xbb porque, se as obras vierem a ser financiadas, + eles at\xe9 saem de gra\xe7a, j\xe1 que, nesse caso, \xabos fundos comunit\xe1rios pagam + os projectos, o mesmo n\xe3o acontecendo quando eles s\xe3o feitos pelos GAT\xbb, + dado serem organismos do Estado.', + 'Essa poder\xe1 vir a ser uma hip\xf3tese, at\xe9 porque, no terreno, a capacidade dos GAT + est\xe1 cada vez mais enfraquecida.', + 'Alguns at\xe9 j\xe1 desapareceram, como o de Castro Verde, e outros t\xeam vindo a perder quadros.'] + +NLTK's data collection includes a trained model for Portuguese sentence +segmentation, which can be loaded as follows. It is faster to load a trained model than +to retrain it. + + >>> stok = nltk.data.load('tokenizers/punkt/portuguese.pickle') + +Stemming +-------- + +NLTK includes the RSLP Portuguese stemmer. Here we use it to stem some Portuguese text: + + >>> stemmer = nltk.stem.RSLPStemmer() + >>> stemmer.stem("copiar") + 'copi' + >>> stemmer.stem("paisagem") + 'pais' + + +Stopwords +--------- + +NLTK includes Portuguese stopwords: + + >>> stopwords = nltk.corpus.stopwords.words('portuguese') + >>> stopwords[:10] + ['a', 'ao', 'aos', 'aquela', 'aquelas', 'aquele', 'aqueles', 'aquilo', 'as', 'at\xe9'] + +Now we can use these to filter text. Let's find the most frequent words (other than stopwords) +and print them in descending order of frequency: + + >>> fd = nltk.FreqDist(w.lower() for w in floresta.words() if w not in stopwords) + >>> for word in list(fd.keys())[:20]: + ... print(word, fd[word]) + , 13444 + . 7725 + « 2369 + » 2310 + é 1305 + o 1086 + } 1047 + { 1044 + a 897 + ; 633 + em 516 + ser 466 + sobre 349 + os 313 + anos 301 + ontem 292 + ainda 279 + segundo 256 + ter 249 + dois 231 diff --git a/lib/python3.10/site-packages/nltk/test/portuguese_en_fixt.py b/lib/python3.10/site-packages/nltk/test/portuguese_en_fixt.py new file mode 100644 index 0000000000000000000000000000000000000000..1e86682b0810ef1299cf353ae606db9f9e9114d7 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/portuguese_en_fixt.py @@ -0,0 +1,4 @@ +def setup_module(): + import pytest + + pytest.skip("portuguese_en.doctest imports nltk.examples.pt which doesn't exist!") diff --git a/lib/python3.10/site-packages/nltk/test/probability_fixt.py b/lib/python3.10/site-packages/nltk/test/probability_fixt.py new file mode 100644 index 0000000000000000000000000000000000000000..a67809384d3780fa9d1b3efcf4ca51e10cd4be00 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/probability_fixt.py @@ -0,0 +1,8 @@ +# probability.doctest uses HMM which requires numpy; +# skip probability.doctest if numpy is not available + + +def setup_module(): + import pytest + + pytest.importorskip("numpy") diff --git a/lib/python3.10/site-packages/nltk/test/sentiwordnet.doctest b/lib/python3.10/site-packages/nltk/test/sentiwordnet.doctest new file mode 100644 index 0000000000000000000000000000000000000000..8cab0d9590c71b37b1319e5f21b388168777c476 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/sentiwordnet.doctest @@ -0,0 +1,41 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +====================== +SentiWordNet Interface +====================== + +SentiWordNet can be imported like this: + + >>> from nltk.corpus import sentiwordnet as swn + +------------ +SentiSynsets +------------ + + >>> breakdown = swn.senti_synset('breakdown.n.03') + >>> print(breakdown) + + >>> breakdown.pos_score() + 0.0 + >>> breakdown.neg_score() + 0.25 + >>> breakdown.obj_score() + 0.75 + + +------ +Lookup +------ + + >>> list(swn.senti_synsets('slow')) + [SentiSynset('decelerate.v.01'), SentiSynset('slow.v.02'), + SentiSynset('slow.v.03'), SentiSynset('slow.a.01'), + SentiSynset('slow.a.02'), SentiSynset('dense.s.04'), + SentiSynset('slow.a.04'), SentiSynset('boring.s.01'), + SentiSynset('dull.s.08'), SentiSynset('slowly.r.01'), + SentiSynset('behind.r.03')] + + >>> happy = swn.senti_synsets('happy', 'a') + + >>> all = swn.all_senti_synsets() diff --git a/lib/python3.10/site-packages/nltk/test/setup_fixt.py b/lib/python3.10/site-packages/nltk/test/setup_fixt.py new file mode 100644 index 0000000000000000000000000000000000000000..e7f3a27464b1875107354eb01e1fe9467c653539 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/setup_fixt.py @@ -0,0 +1,26 @@ +from nltk.internals import find_binary, find_jar + + +def check_binary(binary: str, **args): + """Skip a test via `pytest.skip` if the `binary` executable is not found. + Keyword arguments are passed to `nltk.internals.find_binary`.""" + import pytest + + try: + find_binary(binary, **args) + except LookupError: + pytest.skip(f"Skipping test because the {binary} binary was not found.") + + +def check_jar(name_pattern: str, **args): + """Skip a test via `pytest.skip` if the `name_pattern` jar is not found. + Keyword arguments are passed to `nltk.internals.find_jar`. + + TODO: Investigate why the CoreNLP tests that rely on this check_jar failed + on the CI. https://github.com/nltk/nltk/pull/3060#issuecomment-1268355108 + """ + import pytest + + pytest.skip( + "Skipping test because the doctests requiring jars are inconsistent on the CI." + ) diff --git a/lib/python3.10/site-packages/nltk/test/simple.doctest b/lib/python3.10/site-packages/nltk/test/simple.doctest new file mode 100644 index 0000000000000000000000000000000000000000..5cff34f2b3aab1dcfed64ffa93a63e3ce3c40c35 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/simple.doctest @@ -0,0 +1,83 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +================= +EasyInstall Tests +================= + +This file contains some simple tests that will be run by EasyInstall in +order to test the installation when NLTK-Data is absent. + + +------------ +Tokenization +------------ + + >>> from nltk.tokenize import wordpunct_tokenize + >>> s = ("Good muffins cost $3.88\nin New York. Please buy me\n" + ... "two of them.\n\nThanks.") + >>> wordpunct_tokenize(s) + ['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York', '.', + 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.'] + +------- +Metrics +------- + + >>> from nltk.metrics import precision, recall, f_measure + >>> reference = 'DET NN VB DET JJ NN NN IN DET NN'.split() + >>> test = 'DET VB VB DET NN NN NN IN DET NN'.split() + >>> reference_set = set(reference) + >>> test_set = set(test) + >>> precision(reference_set, test_set) + 1.0 + >>> print(recall(reference_set, test_set)) + 0.8 + >>> print(f_measure(reference_set, test_set)) + 0.88888888888... + +------------------ +Feature Structures +------------------ + + >>> from nltk import FeatStruct + >>> fs1 = FeatStruct(PER=3, NUM='pl', GND='fem') + >>> fs2 = FeatStruct(POS='N', AGR=fs1) + >>> print(fs2) + [ [ GND = 'fem' ] ] + [ AGR = [ NUM = 'pl' ] ] + [ [ PER = 3 ] ] + [ ] + [ POS = 'N' ] + >>> print(fs2['AGR']) + [ GND = 'fem' ] + [ NUM = 'pl' ] + [ PER = 3 ] + >>> print(fs2['AGR']['PER']) + 3 + +------- +Parsing +------- + + >>> from nltk.parse.recursivedescent import RecursiveDescentParser + >>> from nltk.grammar import CFG + >>> grammar = CFG.fromstring(""" + ... S -> NP VP + ... PP -> P NP + ... NP -> 'the' N | N PP | 'the' N PP + ... VP -> V NP | V PP | V NP PP + ... N -> 'cat' | 'dog' | 'rug' + ... V -> 'chased' + ... P -> 'on' + ... """) + >>> rd = RecursiveDescentParser(grammar) + >>> sent = 'the cat chased the dog on the rug'.split() + >>> for t in rd.parse(sent): + ... print(t) + (S + (NP the (N cat)) + (VP (V chased) (NP the (N dog) (PP (P on) (NP the (N rug)))))) + (S + (NP the (N cat)) + (VP (V chased) (NP the (N dog)) (PP (P on) (NP the (N rug))))) diff --git a/lib/python3.10/site-packages/nltk/test/stem.doctest b/lib/python3.10/site-packages/nltk/test/stem.doctest new file mode 100644 index 0000000000000000000000000000000000000000..c2c40a66d4202e13b46eb81424b7902637c7f942 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/stem.doctest @@ -0,0 +1,105 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +========== + Stemmers +========== + +Overview +~~~~~~~~ + +Stemmers remove morphological affixes from words, leaving only the +word stem. + + >>> from nltk.stem import * + +Unit tests for the Porter stemmer +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + >>> from nltk.stem.porter import * + +Create a new Porter stemmer. + + >>> stemmer = PorterStemmer() + +Test the stemmer on various pluralised words. + + >>> plurals = ['caresses', 'flies', 'dies', 'mules', 'denied', + ... 'died', 'agreed', 'owned', 'humbled', 'sized', + ... 'meeting', 'stating', 'siezing', 'itemization', + ... 'sensational', 'traditional', 'reference', 'colonizer', + ... 'plotted'] + + >>> singles = [stemmer.stem(plural) for plural in plurals] + + >>> print(' '.join(singles)) + caress fli die mule deni die agre own humbl size meet + state siez item sensat tradit refer colon plot + + +Unit tests for Snowball stemmer +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + >>> from nltk.stem.snowball import SnowballStemmer + +See which languages are supported. + + >>> print(" ".join(SnowballStemmer.languages)) + arabic danish dutch english finnish french german hungarian italian + norwegian porter portuguese romanian russian spanish swedish + +Create a new instance of a language specific subclass. + + >>> stemmer = SnowballStemmer("english") + +Stem a word. + + >>> print(stemmer.stem("running")) + run + +Decide not to stem stopwords. + + >>> stemmer2 = SnowballStemmer("english", ignore_stopwords=True) + >>> print(stemmer.stem("having")) + have + >>> print(stemmer2.stem("having")) + having + +The 'english' stemmer is better than the original 'porter' stemmer. + + >>> print(SnowballStemmer("english").stem("generously")) + generous + >>> print(SnowballStemmer("porter").stem("generously")) + gener + +.. note:: + + Extra stemmer tests can be found in `nltk.test.unit.test_stem`. + +Unit tests for ARLSTem Stemmer +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + >>> from nltk.stem.arlstem import ARLSTem + +Create a Stemmer instance. + + >>> stemmer = ARLSTem() + +Stem a word. + + >>> stemmer.stem('يعمل') + 'عمل' + +Unit tests for ARLSTem2 Stemmer +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + >>> from nltk.stem.arlstem2 import ARLSTem2 + +Create a Stemmer instance. + + >>> stemmer = ARLSTem2() + +Stem a word. + + >>> stemmer.stem('يعمل') + 'عمل' diff --git a/lib/python3.10/site-packages/nltk/test/tokenize.doctest b/lib/python3.10/site-packages/nltk/test/tokenize.doctest new file mode 100644 index 0000000000000000000000000000000000000000..c3f40c8b64820315eb3c809e31ac53517d4dfca8 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/tokenize.doctest @@ -0,0 +1,397 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + + >>> from nltk.tokenize import * + +Regression Tests: NLTKWordTokenizer +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Tokenizing some test strings. + + >>> s1 = "On a $50,000 mortgage of 30 years at 8 percent, the monthly payment would be $366.88." + >>> word_tokenize(s1) + ['On', 'a', '$', '50,000', 'mortgage', 'of', '30', 'years', 'at', '8', 'percent', ',', 'the', 'monthly', 'payment', 'would', 'be', '$', '366.88', '.'] + >>> s2 = "\"We beat some pretty good teams to get here,\" Slocum said." + >>> word_tokenize(s2) + ['``', 'We', 'beat', 'some', 'pretty', 'good', 'teams', 'to', 'get', 'here', ',', "''", 'Slocum', 'said', '.'] + >>> s3 = "Well, we couldn't have this predictable, cliche-ridden, \"Touched by an Angel\" (a show creator John Masius worked on) wanna-be if she didn't." + >>> word_tokenize(s3) + ['Well', ',', 'we', 'could', "n't", 'have', 'this', 'predictable', ',', 'cliche-ridden', ',', '``', 'Touched', 'by', 'an', 'Angel', "''", '(', 'a', 'show', 'creator', 'John', 'Masius', 'worked', 'on', ')', 'wanna-be', 'if', 'she', 'did', "n't", '.'] + >>> s4 = "I cannot cannot work under these conditions!" + >>> word_tokenize(s4) + ['I', 'can', 'not', 'can', 'not', 'work', 'under', 'these', 'conditions', '!'] + >>> s5 = "The company spent $30,000,000 last year." + >>> word_tokenize(s5) + ['The', 'company', 'spent', '$', '30,000,000', 'last', 'year', '.'] + >>> s6 = "The company spent 40.75% of its income last year." + >>> word_tokenize(s6) + ['The', 'company', 'spent', '40.75', '%', 'of', 'its', 'income', 'last', 'year', '.'] + >>> s7 = "He arrived at 3:00 pm." + >>> word_tokenize(s7) + ['He', 'arrived', 'at', '3:00', 'pm', '.'] + >>> s8 = "I bought these items: books, pencils, and pens." + >>> word_tokenize(s8) + ['I', 'bought', 'these', 'items', ':', 'books', ',', 'pencils', ',', 'and', 'pens', '.'] + >>> s9 = "Though there were 150, 100 of them were old." + >>> word_tokenize(s9) + ['Though', 'there', 'were', '150', ',', '100', 'of', 'them', 'were', 'old', '.'] + >>> s10 = "There were 300,000, but that wasn't enough." + >>> word_tokenize(s10) + ['There', 'were', '300,000', ',', 'but', 'that', 'was', "n't", 'enough', '.'] + >>> s11 = "It's more'n enough." + >>> word_tokenize(s11) + ['It', "'s", 'more', "'n", 'enough', '.'] + +Gathering the spans of the tokenized strings. + + >>> s = '''Good muffins cost $3.88\nin New (York). Please (buy) me\ntwo of them.\n(Thanks).''' + >>> expected = [(0, 4), (5, 12), (13, 17), (18, 19), (19, 23), + ... (24, 26), (27, 30), (31, 32), (32, 36), (36, 37), (37, 38), + ... (40, 46), (47, 48), (48, 51), (51, 52), (53, 55), (56, 59), + ... (60, 62), (63, 68), (69, 70), (70, 76), (76, 77), (77, 78)] + >>> list(NLTKWordTokenizer().span_tokenize(s)) == expected + True + >>> expected = ['Good', 'muffins', 'cost', '$', '3.88', 'in', + ... 'New', '(', 'York', ')', '.', 'Please', '(', 'buy', ')', + ... 'me', 'two', 'of', 'them.', '(', 'Thanks', ')', '.'] + >>> [s[start:end] for start, end in NLTKWordTokenizer().span_tokenize(s)] == expected + True + + >>> s = '''I said, "I'd like to buy some ''good muffins" which cost $3.88\n each in New (York)."''' + >>> expected = [(0, 1), (2, 6), (6, 7), (8, 9), (9, 10), (10, 12), + ... (13, 17), (18, 20), (21, 24), (25, 29), (30, 32), (32, 36), + ... (37, 44), (44, 45), (46, 51), (52, 56), (57, 58), (58, 62), + ... (64, 68), (69, 71), (72, 75), (76, 77), (77, 81), (81, 82), + ... (82, 83), (83, 84)] + >>> list(NLTKWordTokenizer().span_tokenize(s)) == expected + True + >>> expected = ['I', 'said', ',', '"', 'I', "'d", 'like', 'to', + ... 'buy', 'some', "''", "good", 'muffins', '"', 'which', 'cost', + ... '$', '3.88', 'each', 'in', 'New', '(', 'York', ')', '.', '"'] + >>> [s[start:end] for start, end in NLTKWordTokenizer().span_tokenize(s)] == expected + True + +Testing improvement made to the TreebankWordTokenizer + + >>> sx1 = '\xabNow that I can do.\xbb' + >>> expected = ['\xab', 'Now', 'that', 'I', 'can', 'do', '.', '\xbb'] + >>> word_tokenize(sx1) == expected + True + >>> sx2 = 'The unicode 201C and 201D \u201cLEFT(RIGHT) DOUBLE QUOTATION MARK\u201d is also OPEN_PUNCT and CLOSE_PUNCT.' + >>> expected = ['The', 'unicode', '201C', 'and', '201D', '\u201c', 'LEFT', '(', 'RIGHT', ')', 'DOUBLE', 'QUOTATION', 'MARK', '\u201d', 'is', 'also', 'OPEN_PUNCT', 'and', 'CLOSE_PUNCT', '.'] + >>> word_tokenize(sx2) == expected + True + + +Testing treebank's detokenizer + + >>> from nltk.tokenize.treebank import TreebankWordDetokenizer + >>> detokenizer = TreebankWordDetokenizer() + >>> s = "On a $50,000 mortgage of 30 years at 8 percent, the monthly payment would be $366.88." + >>> detokenizer.detokenize(word_tokenize(s)) + 'On a $50,000 mortgage of 30 years at 8 percent, the monthly payment would be $366.88.' + >>> s = "\"We beat some pretty good teams to get here,\" Slocum said." + >>> detokenizer.detokenize(word_tokenize(s)) + '"We beat some pretty good teams to get here," Slocum said.' + >>> s = "Well, we couldn't have this predictable, cliche-ridden, \"Touched by an Angel\" (a show creator John Masius worked on) wanna-be if she didn't." + >>> detokenizer.detokenize(word_tokenize(s)) + 'Well, we couldn\'t have this predictable, cliche-ridden, "Touched by an Angel" (a show creator John Masius worked on) wanna-be if she didn\'t.' + >>> s = "I cannot cannot work under these conditions!" + >>> detokenizer.detokenize(word_tokenize(s)) + 'I cannot cannot work under these conditions!' + >>> s = "The company spent $30,000,000 last year." + >>> detokenizer.detokenize(word_tokenize(s)) + 'The company spent $30,000,000 last year.' + >>> s = "The company spent 40.75% of its income last year." + >>> detokenizer.detokenize(word_tokenize(s)) + 'The company spent 40.75% of its income last year.' + >>> s = "He arrived at 3:00 pm." + >>> detokenizer.detokenize(word_tokenize(s)) + 'He arrived at 3:00 pm.' + >>> s = "I bought these items: books, pencils, and pens." + >>> detokenizer.detokenize(word_tokenize(s)) + 'I bought these items: books, pencils, and pens.' + >>> s = "Though there were 150, 100 of them were old." + >>> detokenizer.detokenize(word_tokenize(s)) + 'Though there were 150, 100 of them were old.' + >>> s = "There were 300,000, but that wasn't enough." + >>> detokenizer.detokenize(word_tokenize(s)) + "There were 300,000, but that wasn't enough." + >>> s = 'How "are" you?' + >>> detokenizer.detokenize(word_tokenize(s)) + 'How "are" you?' + >>> s = "Hello (world)" + >>> detokenizer.detokenize(word_tokenize(s)) + 'Hello (world)' + >>> s = ' with (many) [kinds] of {parentheses}. "Sometimes it\'s inside (quotes)". ("Sometimes the otherway around").' + >>> detokenizer.detokenize(word_tokenize(s)) + ' with (many) [kinds] of {parentheses}. "Sometimes it\'s inside (quotes)". ("Sometimes the otherway around").' + >>> s = "Sentence ending with (parentheses)" + >>> detokenizer.detokenize(word_tokenize(s)) + 'Sentence ending with (parentheses)' + >>> s = "(Sentence) starting with parentheses." + >>> detokenizer.detokenize(word_tokenize(s)) + '(Sentence) starting with parentheses.' + >>> s = "I've" + >>> detokenizer.detokenize(word_tokenize(s)) + "I've" + >>> s = "Don't" + >>> detokenizer.detokenize(word_tokenize(s)) + "Don't" + >>> s = "I'd" + >>> detokenizer.detokenize(word_tokenize(s)) + "I'd" + + +Sentence tokenization in word_tokenize: + + >>> s11 = "I called Dr. Jones. I called Dr. Jones." + >>> word_tokenize(s11) + ['I', 'called', 'Dr.', 'Jones', '.', 'I', 'called', 'Dr.', 'Jones', '.'] + >>> s12 = ("Ich muss unbedingt daran denken, Mehl, usw. fur einen " + ... "Kuchen einzukaufen. Ich muss.") + >>> word_tokenize(s12) + ['Ich', 'muss', 'unbedingt', 'daran', 'denken', ',', 'Mehl', ',', 'usw', + '.', 'fur', 'einen', 'Kuchen', 'einzukaufen', '.', 'Ich', 'muss', '.'] + >>> word_tokenize(s12, 'german') + ['Ich', 'muss', 'unbedingt', 'daran', 'denken', ',', 'Mehl', ',', 'usw.', + 'fur', 'einen', 'Kuchen', 'einzukaufen', '.', 'Ich', 'muss', '.'] + + +Regression Tests: Regexp Tokenizer +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Some additional test strings. + + >>> s = ("Good muffins cost $3.88\nin New York. Please buy me\n" + ... "two of them.\n\nThanks.") + >>> s2 = ("Alas, it has not rained today. When, do you think, " + ... "will it rain again?") + >>> s3 = ("

Although this is not the case here, we must " + ... "not relax our vigilance!

") + + >>> regexp_tokenize(s2, r'[,\.\?!"]\s*', gaps=False) + [', ', '. ', ', ', ', ', '?'] + >>> regexp_tokenize(s2, r'[,\.\?!"]\s*', gaps=True) + ['Alas', 'it has not rained today', 'When', 'do you think', + 'will it rain again'] + +Take care to avoid using capturing groups: + + >>> regexp_tokenize(s3, r'', gaps=False) + ['

', '', '', '

'] + >>> regexp_tokenize(s3, r'', gaps=False) + ['

', '', '', '

'] + >>> regexp_tokenize(s3, r'', gaps=True) + ['Although this is ', 'not', + ' the case here, we must not relax our vigilance!'] + +Named groups are capturing groups, and confuse the tokenizer: + + >>> regexp_tokenize(s3, r'b|p)>', gaps=False) + ['p', 'b', 'b', 'p'] + >>> regexp_tokenize(s3, r'b|p)>', gaps=True) + ['p', 'Although this is ', 'b', 'not', 'b', + ' the case here, we must not relax our vigilance!', 'p'] + +Make sure that nested groups don't confuse the tokenizer: + + >>> regexp_tokenize(s2, r'(?:h|r|l)a(?:s|(?:i|n0))', gaps=False) + ['las', 'has', 'rai', 'rai'] + >>> regexp_tokenize(s2, r'(?:h|r|l)a(?:s|(?:i|n0))', gaps=True) + ['A', ', it ', ' not ', 'ned today. When, do you think, will it ', + 'n again?'] + +Back-references require capturing groups, and these are not supported: + + >>> regexp_tokenize("aabbbcccc", r'(.)\1') + ['a', 'b', 'c', 'c'] + +A simple sentence tokenizer '\.(\s+|$)' + + >>> regexp_tokenize(s, pattern=r'\.(?:\s+|$)', gaps=True) + ['Good muffins cost $3.88\nin New York', + 'Please buy me\ntwo of them', 'Thanks'] + + +Regression Tests: TweetTokenizer +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +TweetTokenizer is a tokenizer specifically designed for micro-blogging tokenization tasks. + + >>> from nltk.tokenize import TweetTokenizer + >>> tknzr = TweetTokenizer() + >>> s0 = "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--" + >>> tknzr.tokenize(s0) + ['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', '<--'] + >>> s1 = "@Joyster2012 @CathStaincliffe Good for you, girl!! Best wishes :-)" + >>> tknzr.tokenize(s1) + ['@Joyster2012', '@CathStaincliffe', 'Good', 'for', 'you', ',', 'girl', '!', '!', 'Best', 'wishes', ':-)'] + >>> s2 = "3Points for #DreamTeam Gooo BAILEY! :) #PBB737Gold @PBBabscbn" + >>> tknzr.tokenize(s2) + ['3Points', 'for', '#DreamTeam', 'Gooo', 'BAILEY', '!', ':)', '#PBB737Gold', '@PBBabscbn'] + >>> s3 = "@Insanomania They do... Their mentality doesn't :(" + >>> tknzr.tokenize(s3) + ['@Insanomania', 'They', 'do', '...', 'Their', 'mentality', "doesn't", ':('] + >>> s4 = "RT @facugambande: Ya por arrancar a grabar !!! #TirenTirenTiren vamoo !!" + >>> tknzr.tokenize(s4) + ['RT', '@facugambande', ':', 'Ya', 'por', 'arrancar', 'a', 'grabar', '!', '!', '!', '#TirenTirenTiren', 'vamoo', '!', '!'] + >>> tknzr = TweetTokenizer(reduce_len=True) + >>> s5 = "@crushinghes the summer holidays are great but I'm so bored already :(" + >>> tknzr.tokenize(s5) + ['@crushinghes', 'the', 'summer', 'holidays', 'are', 'great', 'but', "I'm", 'so', 'bored', 'already', ':('] + +It is possible to specify `strip_handles` and `reduce_len` parameters for a TweetTokenizer instance. Setting `strip_handles` to True, the tokenizer will remove Twitter handles (e.g. usernames). Setting `reduce_len` to True, repeated character sequences of length 3 or greater will be replaced with sequences of length 3. + + >>> tknzr = TweetTokenizer(strip_handles=True, reduce_len=True) + >>> s6 = '@remy: This is waaaaayyyy too much for you!!!!!!' + >>> tknzr.tokenize(s6) + [':', 'This', 'is', 'waaayyy', 'too', 'much', 'for', 'you', '!', '!', '!'] + >>> s7 = '@_willy65: No place for @chuck tonight. Sorry.' + >>> tknzr.tokenize(s7) + [':', 'No', 'place', 'for', 'tonight', '.', 'Sorry', '.'] + >>> s8 = '@mar_tin is a great developer. Contact him at mar_tin@email.com.' + >>> tknzr.tokenize(s8) + ['is', 'a', 'great', 'developer', '.', 'Contact', 'him', 'at', 'mar_tin@email.com', '.'] + +The `preserve_case` parameter (default: True) allows to convert uppercase tokens to lowercase tokens. Emoticons are not affected: + + >>> tknzr = TweetTokenizer(preserve_case=False) + >>> s9 = "@jrmy: I'm REALLY HAPPYYY about that! NICEEEE :D :P" + >>> tknzr.tokenize(s9) + ['@jrmy', ':', "i'm", 'really', 'happyyy', 'about', 'that', '!', 'niceeee', ':D', ':P'] + +It should not hang on long sequences of the same punctuation character. + + >>> tknzr = TweetTokenizer() + >>> s10 = "Photo: Aujourd'hui sur http://t.co/0gebOFDUzn Projet... http://t.co/bKfIUbydz2.............................. http://fb.me/3b6uXpz0L" + >>> tknzr.tokenize(s10) + ['Photo', ':', "Aujourd'hui", 'sur', 'http://t.co/0gebOFDUzn', 'Projet', '...', 'http://t.co/bKfIUbydz2', '...', 'http://fb.me/3b6uXpz0L'] + +Tokenizing multiple sentences at once: + + >>> tknzr = TweetTokenizer() + >>> sentences = [ + ... "This is a cooool #dummysmiley: :-) :-P <3 and some arrows < > -> <--", + ... "@jrmy: I'm REALLY HAPPYYY about that! NICEEEE :D :P", + ... "@_willy65: No place for @chuck tonight. Sorry." + ... ] + >>> tknzr.tokenize_sents(sentences) # doctest: +NORMALIZE_WHITESPACE + [['This', 'is', 'a', 'cooool', '#dummysmiley', ':', ':-)', ':-P', '<3', 'and', 'some', 'arrows', '<', '>', '->', '<--'], + ['@jrmy', ':', "I'm", 'REALLY', 'HAPPYYY', 'about', 'that', '!', 'NICEEEE', ':D', ':P'], + ['@_willy65', ':', 'No', 'place', 'for', '@chuck', 'tonight', '.', 'Sorry', '.']] + + +Regression Tests: PunktSentenceTokenizer +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The sentence splitter should remove whitespace following the sentence boundary. + + >>> pst = PunktSentenceTokenizer() + >>> pst.tokenize('See Section 3). Or Section 2). ') + ['See Section 3).', 'Or Section 2).'] + >>> pst.tokenize('See Section 3.) Or Section 2.) ') + ['See Section 3.)', 'Or Section 2.)'] + >>> pst.tokenize('See Section 3.) Or Section 2.) ', realign_boundaries=False) + ['See Section 3.', ') Or Section 2.', ')'] + + +Two instances of PunktSentenceTokenizer should not share PunktParameters. + + >>> pst = PunktSentenceTokenizer() + >>> pst2 = PunktSentenceTokenizer() + >>> pst._params is pst2._params + False + +Testing mutable default arguments for https://github.com/nltk/nltk/pull/2067 + + >>> from nltk.tokenize.punkt import PunktBaseClass, PunktTrainer, PunktSentenceTokenizer + >>> from nltk.tokenize.punkt import PunktLanguageVars, PunktParameters + >>> pbc = PunktBaseClass(lang_vars=None, params=None) + >>> type(pbc._params) + + >>> type(pbc._lang_vars) + + >>> pt = PunktTrainer(lang_vars=None) + >>> type(pt._lang_vars) + + >>> pst = PunktSentenceTokenizer(lang_vars=None) + >>> type(pst._lang_vars) + + +Testing that inputs can start with dots. + + >>> pst = PunktSentenceTokenizer(lang_vars=None) + >>> pst.tokenize(". This input starts with a dot. This used to cause issues.") + ['.', 'This input starts with a dot.', 'This used to cause issues.'] + +Regression Tests: align_tokens +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Post-hoc alignment of tokens with a source string + + >>> from nltk.tokenize.util import align_tokens + >>> list(align_tokens([''], "")) + [(0, 0)] + >>> list(align_tokens([''], " ")) + [(0, 0)] + >>> list(align_tokens([], "")) + [] + >>> list(align_tokens([], " ")) + [] + >>> list(align_tokens(['a'], "a")) + [(0, 1)] + >>> list(align_tokens(['abc', 'def'], "abcdef")) + [(0, 3), (3, 6)] + >>> list(align_tokens(['abc', 'def'], "abc def")) + [(0, 3), (4, 7)] + >>> list(align_tokens(['ab', 'cd'], "ab cd ef")) + [(0, 2), (3, 5)] + >>> list(align_tokens(['ab', 'cd', 'ef'], "ab cd ef")) + [(0, 2), (3, 5), (6, 8)] + >>> list(align_tokens(['ab', 'cd', 'efg'], "ab cd ef")) + Traceback (most recent call last): + .... + ValueError: substring "efg" not found in "ab cd ef" + >>> list(align_tokens(['ab', 'cd', 'ef', 'gh'], "ab cd ef")) + Traceback (most recent call last): + .... + ValueError: substring "gh" not found in "ab cd ef" + >>> list(align_tokens(['The', 'plane', ',', 'bound', 'for', 'St', 'Petersburg', ',', 'crashed', 'in', 'Egypt', "'s", 'Sinai', 'desert', 'just', '23', 'minutes', 'after', 'take-off', 'from', 'Sharm', 'el-Sheikh', 'on', 'Saturday', '.'], "The plane, bound for St Petersburg, crashed in Egypt's Sinai desert just 23 minutes after take-off from Sharm el-Sheikh on Saturday.")) + [(0, 3), (4, 9), (9, 10), (11, 16), (17, 20), (21, 23), (24, 34), (34, 35), (36, 43), (44, 46), (47, 52), (52, 54), (55, 60), (61, 67), (68, 72), (73, 75), (76, 83), (84, 89), (90, 98), (99, 103), (104, 109), (110, 119), (120, 122), (123, 131), (131, 132)] + + +Regression Tests: MWETokenizer +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Pickle an MWETokenizer + + >>> from nltk.tokenize import MWETokenizer + >>> import pickle + + >>> tokenizer = MWETokenizer([('hors', "d'oeuvre")], separator='+') + >>> p = pickle.dumps(tokenizer) + >>> unpickeled = pickle.loads(p) + >>> unpickeled.tokenize("An hors d'oeuvre tonight, sir?".split()) + ['An', "hors+d'oeuvre", 'tonight,', 'sir?'] + + +Regression Tests: TextTilingTokenizer +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +TextTilingTokenizer tokenizes text into coherent subtopic chunks based upon Hearst's TextTiling algorithm. + + >>> from nltk.tokenize import TextTilingTokenizer + >>> from nltk.corpus import brown + >>> tt = TextTilingTokenizer() + >>> tt.tokenize(brown.raw()[0:1000]) + ["\n\n\tThe/at Fulton/np-tl County/nn-tl Grand/jj-tl Jury/nn-tl said/vbd Friday/nr an/at investigation/nn of/in Atlanta's/np$ recent/jj primary/nn election/nn produced/vbd ``/`` no/at evidence/nn ''/'' that/cs any/dti irregularities/nns took/vbd place/nn ./.\n\n\n\tThe/at jury/nn further/rbr said/vbd in/in term-end/nn presentments/nns that/cs the/at City/nn-tl Executive/jj-tl Committee/nn-tl ,/, which/wdt had/hvd over-all/jj charge/nn of/in the/at election/nn ,/, ``/`` deserves/vbz the/at praise/nn and/cc thanks/nns of/in the/at City/nn-tl of/in-tl Atlanta/np-tl ''/'' for/in the/at manner/nn in/in which/wdt the/at election/nn was/bedz conducted/vbn ./.\n\n\n\tThe/at September-October/np term/nn jury/nn had/hvd been/ben charged/vbn by/in Fulton/np-tl Superior/jj-tl Court/nn-tl Judge/nn-tl Durwood/np Pye/np to/to investigate/vb reports/nns of/in possible/jj ``/`` irregularities/nns ''/'' in/in the/at hard-fought/jj primary/nn which/wdt was/bedz won/vbn by/in Mayor-nominate/nn-tl Ivan/np Allen/np Jr./"] + +Test that `ValueError` exceptions are raised when illegal arguments are used. + + >>> TextTilingTokenizer(similarity_method='foo').tokenize(brown.raw()[0:1000]) + Traceback (most recent call last): + ... + ValueError: Similarity method foo not recognized + >>> TextTilingTokenizer(smoothing_method='bar').tokenize(brown.raw()[0:1000]) + Traceback (most recent call last): + ... + ValueError: Smoothing method bar not recognized diff --git a/lib/python3.10/site-packages/nltk/test/toolbox.doctest b/lib/python3.10/site-packages/nltk/test/toolbox.doctest new file mode 100644 index 0000000000000000000000000000000000000000..0dcf8495ad83460e081d47007ee5439aa54e097e --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/toolbox.doctest @@ -0,0 +1,306 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +=============================== +Unit test cases for ``toolbox`` +=============================== + + >>> from nltk import toolbox + +-------------------------- +``toolbox.StandardFormat`` +-------------------------- + + >>> f = toolbox.StandardFormat() + +``toolbox.StandardFormat.open()`` +--------------------------------- + >>> import os, tempfile + >>> (fd, fname) = tempfile.mkstemp() + >>> tf = os.fdopen(fd, "w") + >>> _ = tf.write('\\lx a value\n\\lx another value\n') + >>> tf.close() + >>> f = toolbox.StandardFormat() + >>> f.open(fname) + >>> list(f.fields()) + [('lx', 'a value'), ('lx', 'another value')] + >>> f.close() + >>> os.unlink(fname) + +``toolbox.StandardFormat.open_string()`` +---------------------------------------- + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx a value\n\\lx another value\n') + >>> list(f.fields()) + [('lx', 'a value'), ('lx', 'another value')] + >>> f.close() + +``toolbox.StandardFormat.close()`` +---------------------------------- + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx a value\n\\lx another value\n') + >>> list(f.fields()) + [('lx', 'a value'), ('lx', 'another value')] + >>> f.close() + +``toolbox.StandardFormat.line_num`` +--------------------------------------- + +``StandardFormat.line_num`` contains the line number of the last line returned: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx a value\n\\lx another value\n\\lx a third value\n') + >>> line_nums = [] + >>> for l in f.raw_fields(): + ... line_nums.append(f.line_num) + >>> line_nums + [1, 2, 3] + +``StandardFormat.line_num`` contains the line number of the last line returned: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx two\nlines\n\\lx three\nlines\n\n\\lx two\nlines\n') + >>> line_nums = [] + >>> for l in f.raw_fields(): + ... line_nums.append(f.line_num) + >>> line_nums + [2, 5, 7] + +``StandardFormat.line_num`` doesn't exist before opening or after closing +a file or string: + + >>> f = toolbox.StandardFormat() + >>> f.line_num + Traceback (most recent call last): + ... + AttributeError: 'StandardFormat' object has no attribute 'line_num' + >>> f.open_string('\\lx two\nlines\n\\lx three\nlines\n\n\\lx two\nlines\n') + >>> line_nums = [] + >>> for l in f.raw_fields(): + ... line_nums.append(f.line_num) + >>> line_nums + [2, 5, 7] + >>> f.close() + >>> f.line_num + Traceback (most recent call last): + ... + AttributeError: 'StandardFormat' object has no attribute 'line_num' + +``toolbox.StandardFormat.raw_fields()`` +--------------------------------------- +``raw_fields()`` returns an iterator over tuples of two strings representing the +marker and its value. The marker is given without the backslash and the value +without its trailing newline: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx a value\n\\lx another value\n') + >>> list(f.raw_fields()) + [('lx', 'a value'), ('lx', 'another value')] + +an empty file returns nothing: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('') + >>> list(f.raw_fields()) + [] + +file with only a newline returns WHAT SHOULD IT RETURN???: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\n') + >>> list(f.raw_fields()) + [(None, '')] + +file with only one field should be parsed ok: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx one value\n') + >>> list(f.raw_fields()) + [('lx', 'one value')] + +file without a trailing newline should be parsed ok: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx a value\n\\lx another value') + >>> list(f.raw_fields()) + [('lx', 'a value'), ('lx', 'another value')] + +trailing white space is preserved except for the final newline: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx trailing space \n\\lx trailing tab\t\n\\lx extra newline\n\n') + >>> list(f.raw_fields()) + [('lx', 'trailing space '), ('lx', 'trailing tab\t'), ('lx', 'extra newline\n')] + +line wrapping is preserved: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx a value\nmore of the value\nand still more\n\\lc another val\n') + >>> list(f.raw_fields()) + [('lx', 'a value\nmore of the value\nand still more'), ('lc', 'another val')] + +file beginning with a multiline record should be parsed ok: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx a value\nmore of the value\nand still more\n\\lc another val\n') + >>> list(f.raw_fields()) + [('lx', 'a value\nmore of the value\nand still more'), ('lc', 'another val')] + +file ending with a multiline record should be parsed ok: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lc a value\n\\lx another value\nmore of the value\nand still more\n') + >>> list(f.raw_fields()) + [('lc', 'a value'), ('lx', 'another value\nmore of the value\nand still more')] + +file beginning with a BOM should be parsed ok: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\xef\xbb\xbf\\lx a value\n\\lx another value\n') + >>> list(f.raw_fields()) + [('lx', 'a value'), ('lx', 'another value')] + +file beginning with two BOMs should ignore only the first one: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\xef\xbb\xbf\xef\xbb\xbf\\lx a value\n\\lx another value\n') + >>> list(f.raw_fields()) + [(None, '\xef\xbb\xbf\\lx a value'), ('lx', 'another value')] + +should not ignore a BOM not at the beginning of the file: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx a value\n\xef\xbb\xbf\\lx another value\n') + >>> list(f.raw_fields()) + [('lx', 'a value\n\xef\xbb\xbf\\lx another value')] + +``toolbox.StandardFormat.fields()`` +----------------------------------- +trailing white space is not preserved: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx trailing space \n\\lx trailing tab\t\n\\lx extra newline\n\n') + >>> list(f.fields()) + [('lx', 'trailing space'), ('lx', 'trailing tab'), ('lx', 'extra newline')] + +multiline fields are unwrapped: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\lx a value\nmore of the value\nand still more\n\\lc another val\n') + >>> list(f.fields()) + [('lx', 'a value more of the value and still more'), ('lc', 'another val')] + +markers +------- +A backslash in the first position on a new line indicates the start of a +marker. The backslash is not part of the marker: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\mk a value\n') + >>> list(f.fields()) + [('mk', 'a value')] + +If the backslash occurs later in the line it does not indicate the start +of a marker: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\mk a value\n \\mk another one\n') + >>> list(f.raw_fields()) + [('mk', 'a value\n \\mk another one')] + +There is no specific limit to the length of a marker: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\this_is_an_extremely_long_marker value\n') + >>> list(f.fields()) + [('this_is_an_extremely_long_marker', 'value')] + +A marker can contain any non white space character: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\`~!@#$%^&*()_-=+[{]}\\|,<.>/?;:"0123456789 value\n') + >>> list(f.fields()) + [('`~!@#$%^&*()_-=+[{]}\\|,<.>/?;:"0123456789', 'value')] + +A marker is terminated by any white space character: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\mk a value\n\\mk\tanother one\n\\mk\rthird one\n\\mk\ffourth one') + >>> list(f.fields()) + [('mk', 'a value'), ('mk', 'another one'), ('mk', 'third one'), ('mk', 'fourth one')] + +Consecutive whitespace characters (except newline) are treated the same as one: + + >>> f = toolbox.StandardFormat() + >>> f.open_string('\\mk \t\r\fa value\n') + >>> list(f.fields()) + [('mk', 'a value')] + +----------------------- +``toolbox.ToolboxData`` +----------------------- + + >>> db = toolbox.ToolboxData() + +``toolbox.ToolboxData.parse()`` +------------------------------- +check that normal parsing works: + + >>> from xml.etree import ElementTree + >>> td = toolbox.ToolboxData() + >>> s = """\\_sh v3.0 400 Rotokas Dictionary + ... \\_DateStampHasFourDigitYear + ... + ... \\lx kaa + ... \\ps V.A + ... \\ge gag + ... \\gp nek i pas + ... + ... \\lx kaa + ... \\ps V.B + ... \\ge strangle + ... \\gp pasim nek + ... """ + >>> td.open_string(s) + >>> tree = td.parse(key='lx') + >>> tree.tag + 'toolbox_data' + >>> ElementTree.tostring(list(tree)[0]).decode('utf8') + '
<_sh>v3.0 400 Rotokas Dictionary<_DateStampHasFourDigitYear />
' + >>> ElementTree.tostring(list(tree)[1]).decode('utf8') + 'kaaV.Agagnek i pas' + >>> ElementTree.tostring(list(tree)[2]).decode('utf8') + 'kaaV.Bstranglepasim nek' + +check that guessing the key marker works: + + >>> from xml.etree import ElementTree + >>> td = toolbox.ToolboxData() + >>> s = """\\_sh v3.0 400 Rotokas Dictionary + ... \\_DateStampHasFourDigitYear + ... + ... \\lx kaa + ... \\ps V.A + ... \\ge gag + ... \\gp nek i pas + ... + ... \\lx kaa + ... \\ps V.B + ... \\ge strangle + ... \\gp pasim nek + ... """ + >>> td.open_string(s) + >>> tree = td.parse() + >>> ElementTree.tostring(list(tree)[0]).decode('utf8') + '
<_sh>v3.0 400 Rotokas Dictionary<_DateStampHasFourDigitYear />
' + >>> ElementTree.tostring(list(tree)[1]).decode('utf8') + 'kaaV.Agagnek i pas' + >>> ElementTree.tostring(list(tree)[2]).decode('utf8') + 'kaaV.Bstranglepasim nek' + +----------------------- +``toolbox`` functions +----------------------- + +``toolbox.to_sfm_string()`` +------------------------------- diff --git a/lib/python3.10/site-packages/nltk/test/translate.doctest b/lib/python3.10/site-packages/nltk/test/translate.doctest new file mode 100644 index 0000000000000000000000000000000000000000..fd8eb4c1b50ac9f24fcc18cd12cb614f2b2feda9 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/translate.doctest @@ -0,0 +1,240 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +.. -*- coding: utf-8 -*- + +========= +Alignment +========= + +Corpus Reader +------------- + + >>> from nltk.corpus import comtrans + >>> words = comtrans.words('alignment-en-fr.txt') + >>> for word in words[:6]: + ... print(word) + Resumption + of + the + session + I + declare + >>> als = comtrans.aligned_sents('alignment-en-fr.txt')[0] + >>> als + AlignedSent(['Resumption', 'of', 'the', 'session'], + ['Reprise', 'de', 'la', 'session'], + Alignment([(0, 0), (1, 1), (2, 2), (3, 3)])) + + +Alignment Objects +----------------- + +Aligned sentences are simply a mapping between words in a sentence: + + >>> print(" ".join(als.words)) + Resumption of the session + >>> print(" ".join(als.mots)) + Reprise de la session + >>> als.alignment + Alignment([(0, 0), (1, 1), (2, 2), (3, 3)]) + + +Usually we look at them from the perspective of a source to a target language, +but they are easily inverted: + + >>> als.invert() + AlignedSent(['Reprise', 'de', 'la', 'session'], + ['Resumption', 'of', 'the', 'session'], + Alignment([(0, 0), (1, 1), (2, 2), (3, 3)])) + + +We can create new alignments, but these need to be in the correct range of +the corresponding sentences: + + >>> from nltk.translate import Alignment, AlignedSent + >>> als = AlignedSent(['Reprise', 'de', 'la', 'session'], + ... ['Resumption', 'of', 'the', 'session'], + ... Alignment([(0, 0), (1, 4), (2, 1), (3, 3)])) + Traceback (most recent call last): + ... + IndexError: Alignment is outside boundary of mots + + +You can set alignments with any sequence of tuples, so long as the first two +indexes of the tuple are the alignment indices: + + >>> als.alignment = Alignment([(0, 0), (1, 1), (2, 2, "boat"), (3, 3, False, (1,2))]) + + >>> Alignment([(0, 0), (1, 1), (2, 2, "boat"), (3, 3, False, (1,2))]) + Alignment([(0, 0), (1, 1), (2, 2, 'boat'), (3, 3, False, (1, 2))]) + + +Alignment Algorithms +-------------------- + +EM for IBM Model 1 +~~~~~~~~~~~~~~~~~~ + +Here is an example from Koehn, 2010: + + >>> from nltk.translate import IBMModel1 + >>> corpus = [AlignedSent(['the', 'house'], ['das', 'Haus']), + ... AlignedSent(['the', 'book'], ['das', 'Buch']), + ... AlignedSent(['a', 'book'], ['ein', 'Buch'])] + >>> em_ibm1 = IBMModel1(corpus, 20) + >>> print(round(em_ibm1.translation_table['the']['das'], 1)) + 1.0 + >>> print(round(em_ibm1.translation_table['book']['das'], 1)) + 0.0 + >>> print(round(em_ibm1.translation_table['house']['das'], 1)) + 0.0 + >>> print(round(em_ibm1.translation_table['the']['Buch'], 1)) + 0.0 + >>> print(round(em_ibm1.translation_table['book']['Buch'], 1)) + 1.0 + >>> print(round(em_ibm1.translation_table['a']['Buch'], 1)) + 0.0 + >>> print(round(em_ibm1.translation_table['book']['ein'], 1)) + 0.0 + >>> print(round(em_ibm1.translation_table['a']['ein'], 1)) + 1.0 + >>> print(round(em_ibm1.translation_table['the']['Haus'], 1)) + 0.0 + >>> print(round(em_ibm1.translation_table['house']['Haus'], 1)) + 1.0 + >>> print(round(em_ibm1.translation_table['book'][None], 1)) + 0.5 + +And using an NLTK corpus. We train on only 10 sentences, since it is so slow: + + >>> from nltk.corpus import comtrans + >>> com_ibm1 = IBMModel1(comtrans.aligned_sents()[:10], 20) + >>> print(round(com_ibm1.translation_table['bitte']['Please'], 1)) + 0.2 + >>> print(round(com_ibm1.translation_table['Sitzungsperiode']['session'], 1)) + 1.0 + + +Evaluation +---------- +The evaluation metrics for alignments are usually not interested in the +contents of alignments but more often the comparison to a "gold standard" +alignment that has been been constructed by human experts. For this reason we +often want to work just with raw set operations against the alignment points. +This then gives us a very clean form for defining our evaluation metrics. + +.. Note:: + The AlignedSent class has no distinction of "possible" or "sure" + alignments. Thus all alignments are treated as "sure". + +Consider the following aligned sentence for evaluation: + + >>> my_als = AlignedSent(['Resumption', 'of', 'the', 'session'], + ... ['Reprise', 'de', 'la', 'session'], + ... Alignment([(0, 0), (3, 3), (1, 2), (1, 1), (1, 3)])) + +Precision +~~~~~~~~~ +``precision = |A∩P| / |A|`` + +**Precision** is probably the most well known evaluation metric and it is implemented +in `nltk.metrics.scores.precision`_. Since precision is simply interested in the +proportion of correct alignments, we calculate the ratio of the number of our +test alignments (*A*) that match a possible alignment (*P*), over the number of +test alignments provided. There is no penalty for missing a possible alignment +in our test alignments. An easy way to game this metric is to provide just one +test alignment that is in *P* [OCH2000]_. + +Here are some examples: + + >>> from nltk.metrics import precision + >>> als.alignment = Alignment([(0,0), (1,1), (2,2), (3,3)]) + >>> precision(Alignment([]), als.alignment) + 0.0 + >>> precision(Alignment([(0,0), (1,1), (2,2), (3,3)]), als.alignment) + 1.0 + >>> precision(Alignment([(0,0), (3,3)]), als.alignment) + 0.5 + >>> precision(Alignment.fromstring('0-0 3-3'), als.alignment) + 0.5 + >>> precision(Alignment([(0,0), (1,1), (2,2), (3,3), (1,2), (2,1)]), als.alignment) + 1.0 + >>> precision(als.alignment, my_als.alignment) + 0.6 + + +.. _nltk.metrics.scores.precision: + https://www.nltk.org/api/nltk.metrics.html#nltk.metrics.scores.precision + + +Recall +~~~~~~ +``recall = |A∩S| / |S|`` + +**Recall** is another well known evaluation metric that has a set based +implementation in NLTK as `nltk.metrics.scores.recall`_. Since recall is +simply interested in the proportion of found alignments, we calculate the +ratio of the number of our test alignments (*A*) that match a sure alignment +(*S*) over the number of sure alignments. There is no penalty for producing +a lot of test alignments. An easy way to game this metric is to include every +possible alignment in our test alignments, regardless if they are correct or +not [OCH2000]_. + +Here are some examples: + + >>> from nltk.metrics import recall + >>> print(recall(Alignment([]), als.alignment)) + None + >>> recall(Alignment([(0,0), (1,1), (2,2), (3,3)]), als.alignment) + 1.0 + >>> recall(Alignment.fromstring('0-0 3-3'), als.alignment) + 1.0 + >>> recall(Alignment([(0,0), (3,3)]), als.alignment) + 1.0 + >>> recall(Alignment([(0,0), (1,1), (2,2), (3,3), (1,2), (2,1)]), als.alignment) + 0.66666... + >>> recall(als.alignment, my_als.alignment) + 0.75 + + +.. _nltk.metrics.scores.recall: + https://www.nltk.org/api/nltk.metrics.html#nltk.metrics.scores.recall + + +Alignment Error Rate (AER) +~~~~~~~~~~~~~~~~~~~~~~~~~~ +``AER = 1 - (|A∩S| + |A∩P|) / (|A| + |S|)`` + +**Alignment Error Rate** is commonly used metric for assessing sentence +alignments. It combines precision and recall metrics together such that a +perfect alignment must have all of the sure alignments and may have some +possible alignments [MIHALCEA2003]_ [KOEHN2010]_. + +.. Note:: + [KOEHN2010]_ defines the AER as ``AER = (|A∩S| + |A∩P|) / (|A| + |S|)`` + in his book, but corrects it to the above in his online errata. This is + in line with [MIHALCEA2003]_. + +Here are some examples: + + >>> from nltk.translate import alignment_error_rate + >>> alignment_error_rate(Alignment([]), als.alignment) + 1.0 + >>> alignment_error_rate(Alignment([(0,0), (1,1), (2,2), (3,3)]), als.alignment) + 0.0 + >>> alignment_error_rate(als.alignment, my_als.alignment) + 0.333333... + >>> alignment_error_rate(als.alignment, my_als.alignment, + ... als.alignment | Alignment([(1,2), (2,1)])) + 0.222222... + + +.. [OCH2000] Och, F. and Ney, H. (2000) + *Statistical Machine Translation*, EAMT Workshop + +.. [MIHALCEA2003] Mihalcea, R. and Pedersen, T. (2003) + *An evaluation exercise for word alignment*, HLT-NAACL 2003 + +.. [KOEHN2010] Koehn, P. (2010) + *Statistical Machine Translation*, Cambridge University Press diff --git a/lib/python3.10/site-packages/nltk/test/tree.doctest b/lib/python3.10/site-packages/nltk/test/tree.doctest new file mode 100644 index 0000000000000000000000000000000000000000..7b6748bd4abdde316b92b38789c777b2209c3da0 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/tree.doctest @@ -0,0 +1,1223 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +=============================== + Unit tests for nltk.tree.Tree +=============================== + + >>> from nltk.tree import * + +Some trees to run tests on: + + >>> dp1 = Tree('dp', [Tree('d', ['the']), Tree('np', ['dog'])]) + >>> dp2 = Tree('dp', [Tree('d', ['the']), Tree('np', ['cat'])]) + >>> vp = Tree('vp', [Tree('v', ['chased']), dp2]) + >>> tree = Tree('s', [dp1, vp]) + >>> print(tree) + (s (dp (d the) (np dog)) (vp (v chased) (dp (d the) (np cat)))) + +The node label is accessed using the `label()` method: + + >>> dp1.label(), dp2.label(), vp.label(), tree.label() + ('dp', 'dp', 'vp', 's') + + >>> print(tree[1,1,1,0]) + cat + +The `treepositions` method returns a list of the tree positions of +subtrees and leaves in a tree. By default, it gives the position of +every tree, subtree, and leaf, in prefix order: + + >>> print(tree.treepositions()) + [(), (0,), (0, 0), (0, 0, 0), (0, 1), (0, 1, 0), (1,), (1, 0), (1, 0, 0), (1, 1), (1, 1, 0), (1, 1, 0, 0), (1, 1, 1), (1, 1, 1, 0)] + +In addition to `str` and `repr`, several methods exist to convert a +tree object to one of several standard tree encodings: + + >>> print(tree.pformat_latex_qtree()) + \Tree [.s + [.dp [.d the ] [.np dog ] ] + [.vp [.v chased ] [.dp [.d the ] [.np cat ] ] ] ] + +There is also a fancy ASCII art representation: + + >>> tree.pretty_print() + s + ________|_____ + | vp + | _____|___ + dp | dp + ___|___ | ___|___ + d np v d np + | | | | | + the dog chased the cat + + >>> tree.pretty_print(unicodelines=True, nodedist=4) + s + ┌──────────────┴────────┐ + │ vp + │ ┌────────┴──────┐ + dp │ dp + ┌──────┴──────┐ │ ┌──────┴──────┐ + d np v d np + │ │ │ │ │ + the dog chased the cat + +Trees can be initialized from treebank strings: + + >>> tree2 = Tree.fromstring('(S (NP I) (VP (V enjoyed) (NP my cookie)))') + >>> print(tree2) + (S (NP I) (VP (V enjoyed) (NP my cookie))) + +Trees can be compared for equality: + + >>> tree == Tree.fromstring(str(tree)) + True + >>> tree2 == Tree.fromstring(str(tree2)) + True + >>> tree == tree2 + False + >>> tree == Tree.fromstring(str(tree2)) + False + >>> tree2 == Tree.fromstring(str(tree)) + False + + >>> tree != Tree.fromstring(str(tree)) + False + >>> tree2 != Tree.fromstring(str(tree2)) + False + >>> tree != tree2 + True + >>> tree != Tree.fromstring(str(tree2)) + True + >>> tree2 != Tree.fromstring(str(tree)) + True + + >>> tree < tree2 or tree > tree2 + True + +Tree Parsing +============ + +The class method `Tree.fromstring()` can be used to parse trees, and it +provides some additional options. + + >>> tree = Tree.fromstring('(S (NP I) (VP (V enjoyed) (NP my cookie)))') + >>> print(tree) + (S (NP I) (VP (V enjoyed) (NP my cookie))) + +When called on a subclass of `Tree`, it will create trees of that +type: + + >>> tree = ImmutableTree.fromstring('(VP (V enjoyed) (NP my cookie))') + >>> print(tree) + (VP (V enjoyed) (NP my cookie)) + >>> print(type(tree)) + + >>> tree[1] = 'x' + Traceback (most recent call last): + . . . + ValueError: ImmutableTree may not be modified + >>> del tree[0] + Traceback (most recent call last): + . . . + ValueError: ImmutableTree may not be modified + +The ``brackets`` parameter can be used to specify two characters that +should be used as brackets: + + >>> print(Tree.fromstring('[S [NP I] [VP [V enjoyed] [NP my cookie]]]', + ... brackets='[]')) + (S (NP I) (VP (V enjoyed) (NP my cookie))) + >>> print(Tree.fromstring(' >>', + ... brackets='<>')) + (S (NP I) (VP (V enjoyed) (NP my cookie))) + +If ``brackets`` is not a string, or is not exactly two characters, +then `Tree.fromstring` raises an exception: + + >>> Tree.fromstring(' >', brackets='') + Traceback (most recent call last): + . . . + TypeError: brackets must be a length-2 string + >>> Tree.fromstring(' >', brackets='<<>>') + Traceback (most recent call last): + . . . + TypeError: brackets must be a length-2 string + >>> Tree.fromstring(' >', brackets=12) + Traceback (most recent call last): + . . . + TypeError: brackets must be a length-2 string + >>> Tree.fromstring('<>', brackets=('<<','>>')) + Traceback (most recent call last): + . . . + TypeError: brackets must be a length-2 string + +(We may add support for multi-character brackets in the future, in +which case the ``brackets=('<<','>>')`` example would start working.) + +Whitespace brackets are not permitted: + + >>> Tree.fromstring('(NP my cookie\n', brackets='(\n') + Traceback (most recent call last): + . . . + TypeError: whitespace brackets not allowed + +If an invalid tree is given to Tree.fromstring, then it raises a +ValueError, with a description of the problem: + + >>> Tree.fromstring('(NP my cookie) (NP my milk)') + Traceback (most recent call last): + . . . + ValueError: Tree.fromstring(): expected 'end-of-string' but got '(NP' + at index 15. + "...y cookie) (NP my mil..." + ^ + >>> Tree.fromstring(')NP my cookie(') + Traceback (most recent call last): + . . . + ValueError: Tree.fromstring(): expected '(' but got ')' + at index 0. + ")NP my coo..." + ^ + >>> Tree.fromstring('(NP my cookie))') + Traceback (most recent call last): + . . . + ValueError: Tree.fromstring(): expected 'end-of-string' but got ')' + at index 14. + "...my cookie))" + ^ + >>> Tree.fromstring('my cookie)') + Traceback (most recent call last): + . . . + ValueError: Tree.fromstring(): expected '(' but got 'my' + at index 0. + "my cookie)" + ^ + >>> Tree.fromstring('(NP my cookie') + Traceback (most recent call last): + . . . + ValueError: Tree.fromstring(): expected ')' but got 'end-of-string' + at index 13. + "... my cookie" + ^ + >>> Tree.fromstring('') + Traceback (most recent call last): + . . . + ValueError: Tree.fromstring(): expected '(' but got 'end-of-string' + at index 0. + "" + ^ + +Trees with no children are supported: + + >>> print(Tree.fromstring('(S)')) + (S ) + >>> print(Tree.fromstring('(X (Y) (Z))')) + (X (Y ) (Z )) + +Trees with an empty node label and no children are supported: + + >>> print(Tree.fromstring('()')) + ( ) + >>> print(Tree.fromstring('(X () ())')) + (X ( ) ( )) + +Trees with an empty node label and children are supported, but only if the +first child is not a leaf (otherwise, it will be treated as the node label). + + >>> print(Tree.fromstring('((A) (B) (C))')) + ( (A ) (B ) (C )) + >>> print(Tree.fromstring('((A) leaf)')) + ( (A ) leaf) + >>> print(Tree.fromstring('(((())))')) + ( ( ( ( )))) + +The optional arguments `read_node` and `read_leaf` may be used to +transform the string values of nodes or leaves. + + >>> print(Tree.fromstring('(A b (C d e) (F (G h i)))', + ... read_node=lambda s: '<%s>' % s, + ... read_leaf=lambda s: '"%s"' % s)) + (
"b" ( "d" "e") ( ( "h" "i"))) + +These transformation functions are typically used when the node or +leaf labels should be parsed to a non-string value (such as a feature +structure). If node and leaf labels need to be able to include +whitespace, then you must also use the optional `node_pattern` and +`leaf_pattern` arguments. + + >>> from nltk.featstruct import FeatStruct + >>> tree = Tree.fromstring('([cat=NP] [lex=the] [lex=dog])', + ... read_node=FeatStruct, read_leaf=FeatStruct) + >>> tree.set_label(tree.label().unify(FeatStruct('[num=singular]'))) + >>> print(tree) + ([cat='NP', num='singular'] [lex='the'] [lex='dog']) + +The optional argument ``remove_empty_top_bracketing`` can be used to +remove any top-level empty bracketing that occurs. + + >>> print(Tree.fromstring('((S (NP I) (VP (V enjoyed) (NP my cookie))))', + ... remove_empty_top_bracketing=True)) + (S (NP I) (VP (V enjoyed) (NP my cookie))) + +It will not remove a top-level empty bracketing with multiple children: + + >>> print(Tree.fromstring('((A a) (B b))')) + ( (A a) (B b)) + + +Tree.fromlist() +--------------- +The class method `Tree.fromlist()` can be used to parse trees +that are expressed as nested lists, such as those produced by +the tree() function from the wordnet module. + + >>> from nltk.corpus import wordnet as wn + >>> t=Tree.fromlist(wn.synset('dog.n.01').tree(lambda s:s.hypernyms())) + >>> print(t.height()) + 14 + >>> print(t.leaves()) + ["Synset('entity.n.01')", "Synset('entity.n.01')"] + >>> t.pretty_print() + Synset('dog.n.01') + _________________|__________________ + Synset('canine.n. | + 02') | + | | + Synset('carnivor | + e.n.01') | + | | + Synset('placenta | + l.n.01') | + | | + Synset('mammal.n. | + 01') | + | | + Synset('vertebra | + te.n.01') | + | | + Synset('chordate. Synset('domestic + n.01') _animal.n.01') + | | + Synset('animal.n. Synset('animal.n. + 01') 01') + | | + Synset('organism. Synset('organism. + n.01') n.01') + | | + Synset('living_t Synset('living_t + hing.n.01') hing.n.01') + | | + Synset('whole.n. Synset('whole.n. + 02') 02') + | | + Synset('object.n. Synset('object.n. + 01') 01') + | | + Synset('physical Synset('physical + _entity.n.01') _entity.n.01') + | | + Synset('entity.n. Synset('entity.n. + 01') 01') + + + +Parented Trees +============== +`ParentedTree` is a subclass of `Tree` that automatically maintains +parent pointers for single-parented trees. Parented trees can be +created directly from a node label and a list of children: + + >>> ptree = ( + ... ParentedTree('VP', [ + ... ParentedTree('VERB', ['saw']), + ... ParentedTree('NP', [ + ... ParentedTree('DET', ['the']), + ... ParentedTree('NOUN', ['dog'])])])) + >>> print(ptree) + (VP (VERB saw) (NP (DET the) (NOUN dog))) + +Parented trees can be created from strings using the classmethod +`ParentedTree.fromstring`: + + >>> ptree = ParentedTree.fromstring('(VP (VERB saw) (NP (DET the) (NOUN dog)))') + >>> print(ptree) + (VP (VERB saw) (NP (DET the) (NOUN dog))) + >>> print(type(ptree)) + + +Parented trees can also be created by using the classmethod +`ParentedTree.convert` to convert another type of tree to a parented +tree: + + >>> tree = Tree.fromstring('(VP (VERB saw) (NP (DET the) (NOUN dog)))') + >>> ptree = ParentedTree.convert(tree) + >>> print(ptree) + (VP (VERB saw) (NP (DET the) (NOUN dog))) + >>> print(type(ptree)) + + +.. clean-up: + + >>> del tree + +`ParentedTree`\ s should never be used in the same tree as `Tree`\ s +or `MultiParentedTree`\ s. Mixing tree implementations may result in +incorrect parent pointers and in `TypeError` exceptions: + + >>> # Inserting a Tree in a ParentedTree gives an exception: + >>> ParentedTree('NP', [ + ... Tree('DET', ['the']), Tree('NOUN', ['dog'])]) + Traceback (most recent call last): + . . . + TypeError: Can not insert a non-ParentedTree into a ParentedTree + + >>> # inserting a ParentedTree in a Tree gives incorrect parent pointers: + >>> broken_tree = Tree('NP', [ + ... ParentedTree('DET', ['the']), ParentedTree('NOUN', ['dog'])]) + >>> print(broken_tree[0].parent()) + None + +Parented Tree Methods +------------------------ +In addition to all the methods defined by the `Tree` class, the +`ParentedTree` class adds six new methods whose values are +automatically updated whenever a parented tree is modified: `parent()`, +`parent_index()`, `left_sibling()`, `right_sibling()`, `root()`, and +`treeposition()`. + +The `parent()` method contains a `ParentedTree`\ 's parent, if it has +one; and ``None`` otherwise. `ParentedTree`\ s that do not have +parents are known as "root trees." + + >>> for subtree in ptree.subtrees(): + ... print(subtree) + ... print(' Parent = %s' % subtree.parent()) + (VP (VERB saw) (NP (DET the) (NOUN dog))) + Parent = None + (VERB saw) + Parent = (VP (VERB saw) (NP (DET the) (NOUN dog))) + (NP (DET the) (NOUN dog)) + Parent = (VP (VERB saw) (NP (DET the) (NOUN dog))) + (DET the) + Parent = (NP (DET the) (NOUN dog)) + (NOUN dog) + Parent = (NP (DET the) (NOUN dog)) + +The `parent_index()` method stores the index of a tree in its parent's +child list. If a tree does not have a parent, then its `parent_index` +is ``None``. + + >>> for subtree in ptree.subtrees(): + ... print(subtree) + ... print(' Parent Index = %s' % subtree.parent_index()) + ... assert (subtree.parent() is None or + ... subtree.parent()[subtree.parent_index()] is subtree) + (VP (VERB saw) (NP (DET the) (NOUN dog))) + Parent Index = None + (VERB saw) + Parent Index = 0 + (NP (DET the) (NOUN dog)) + Parent Index = 1 + (DET the) + Parent Index = 0 + (NOUN dog) + Parent Index = 1 + +Note that ``ptree.parent().index(ptree)`` is *not* equivalent to +``ptree.parent_index()``. In particular, ``ptree.parent().index(ptree)`` +will return the index of the first child of ``ptree.parent()`` that is +equal to ``ptree`` (using ``==``); and that child may not be +``ptree``: + + >>> on_and_on = ParentedTree('CONJP', [ + ... ParentedTree('PREP', ['on']), + ... ParentedTree('COJN', ['and']), + ... ParentedTree('PREP', ['on'])]) + >>> second_on = on_and_on[2] + >>> print(second_on.parent_index()) + 2 + >>> print(second_on.parent().index(second_on)) + 0 + +The methods `left_sibling()` and `right_sibling()` can be used to get a +parented tree's siblings. If a tree does not have a left or right +sibling, then the corresponding method's value is ``None``: + + >>> for subtree in ptree.subtrees(): + ... print(subtree) + ... print(' Left Sibling = %s' % subtree.left_sibling()) + ... print(' Right Sibling = %s' % subtree.right_sibling()) + (VP (VERB saw) (NP (DET the) (NOUN dog))) + Left Sibling = None + Right Sibling = None + (VERB saw) + Left Sibling = None + Right Sibling = (NP (DET the) (NOUN dog)) + (NP (DET the) (NOUN dog)) + Left Sibling = (VERB saw) + Right Sibling = None + (DET the) + Left Sibling = None + Right Sibling = (NOUN dog) + (NOUN dog) + Left Sibling = (DET the) + Right Sibling = None + +A parented tree's root tree can be accessed using the `root()` +method. This method follows the tree's parent pointers until it +finds a tree without a parent. If a tree does not have a parent, then +it is its own root: + + >>> for subtree in ptree.subtrees(): + ... print(subtree) + ... print(' Root = %s' % subtree.root()) + (VP (VERB saw) (NP (DET the) (NOUN dog))) + Root = (VP (VERB saw) (NP (DET the) (NOUN dog))) + (VERB saw) + Root = (VP (VERB saw) (NP (DET the) (NOUN dog))) + (NP (DET the) (NOUN dog)) + Root = (VP (VERB saw) (NP (DET the) (NOUN dog))) + (DET the) + Root = (VP (VERB saw) (NP (DET the) (NOUN dog))) + (NOUN dog) + Root = (VP (VERB saw) (NP (DET the) (NOUN dog))) + +The `treeposition()` method can be used to find a tree's treeposition +relative to its root: + + >>> for subtree in ptree.subtrees(): + ... print(subtree) + ... print(' Tree Position = %s' % (subtree.treeposition(),)) + ... assert subtree.root()[subtree.treeposition()] is subtree + (VP (VERB saw) (NP (DET the) (NOUN dog))) + Tree Position = () + (VERB saw) + Tree Position = (0,) + (NP (DET the) (NOUN dog)) + Tree Position = (1,) + (DET the) + Tree Position = (1, 0) + (NOUN dog) + Tree Position = (1, 1) + +Whenever a parented tree is modified, all of the methods described +above (`parent()`, `parent_index()`, `left_sibling()`, `right_sibling()`, +`root()`, and `treeposition()`) are automatically updated. For example, +if we replace ``ptree``\ 's subtree for the word "dog" with a new +subtree for "cat," the method values for both the "dog" subtree and the +"cat" subtree get automatically updated: + + >>> # Replace the dog with a cat + >>> dog = ptree[1,1] + >>> cat = ParentedTree('NOUN', ['cat']) + >>> ptree[1,1] = cat + + >>> # the noun phrase is no longer the dog's parent: + >>> print(dog.parent(), dog.parent_index(), dog.left_sibling()) + None None None + >>> # dog is now its own root. + >>> print(dog.root()) + (NOUN dog) + >>> print(dog.treeposition()) + () + + >>> # the cat's parent is now the noun phrase: + >>> print(cat.parent()) + (NP (DET the) (NOUN cat)) + >>> print(cat.parent_index()) + 1 + >>> print(cat.left_sibling()) + (DET the) + >>> print(cat.root()) + (VP (VERB saw) (NP (DET the) (NOUN cat))) + >>> print(cat.treeposition()) + (1, 1) + +ParentedTree Regression Tests +----------------------------- +Keep track of all trees that we create (including subtrees) using this +variable: + + >>> all_ptrees = [] + +Define a helper function to create new parented trees: + + >>> def make_ptree(s): + ... ptree = ParentedTree.convert(Tree.fromstring(s)) + ... all_ptrees.extend(t for t in ptree.subtrees() + ... if isinstance(t, Tree)) + ... return ptree + +Define a test function that examines every subtree in all_ptrees; and +checks that all six of its methods are defined correctly. If any +ptrees are passed as arguments, then they are printed. + + >>> def pcheck(*print_ptrees): + ... for ptree in all_ptrees: + ... # Check ptree's methods. + ... if ptree.parent() is not None: + ... i = ptree.parent_index() + ... assert ptree.parent()[i] is ptree + ... if i > 0: + ... assert ptree.left_sibling() is ptree.parent()[i-1] + ... if i < (len(ptree.parent())-1): + ... assert ptree.right_sibling() is ptree.parent()[i+1] + ... assert len(ptree.treeposition()) > 0 + ... assert (ptree.treeposition() == + ... ptree.parent().treeposition() + (ptree.parent_index(),)) + ... assert ptree.root() is not ptree + ... assert ptree.root() is not None + ... assert ptree.root() is ptree.parent().root() + ... assert ptree.root()[ptree.treeposition()] is ptree + ... else: + ... assert ptree.parent_index() is None + ... assert ptree.left_sibling() is None + ... assert ptree.right_sibling() is None + ... assert ptree.root() is ptree + ... assert ptree.treeposition() == () + ... # Check ptree's children's methods: + ... for i, child in enumerate(ptree): + ... if isinstance(child, Tree): + ... # pcheck parent() & parent_index() methods + ... assert child.parent() is ptree + ... assert child.parent_index() == i + ... # pcheck sibling methods + ... if i == 0: + ... assert child.left_sibling() is None + ... else: + ... assert child.left_sibling() is ptree[i-1] + ... if i == len(ptree)-1: + ... assert child.right_sibling() is None + ... else: + ... assert child.right_sibling() is ptree[i+1] + ... if print_ptrees: + ... print('ok!', end=' ') + ... for ptree in print_ptrees: print(ptree) + ... else: + ... print('ok!') + +Run our test function on a variety of newly-created trees: + + >>> pcheck(make_ptree('(A)')) + ok! (A ) + >>> pcheck(make_ptree('(A (B (C (D) (E f)) g) h)')) + ok! (A (B (C (D ) (E f)) g) h) + >>> pcheck(make_ptree('(A (B) (C c) (D d d) (E e e e))')) + ok! (A (B ) (C c) (D d d) (E e e e)) + >>> pcheck(make_ptree('(A (B) (C (c)) (D (d) (d)) (E (e) (e) (e)))')) + ok! (A (B ) (C (c )) (D (d ) (d )) (E (e ) (e ) (e ))) + +Run our test function after performing various tree-modification +operations: + +**__delitem__()** + + >>> ptree = make_ptree('(A (B (C (D) (E f) (Q p)) g) h)') + >>> e = ptree[0,0,1] + >>> del ptree[0,0,1]; pcheck(ptree); pcheck(e) + ok! (A (B (C (D ) (Q p)) g) h) + ok! (E f) + >>> del ptree[0,0,0]; pcheck(ptree) + ok! (A (B (C (Q p)) g) h) + >>> del ptree[0,1]; pcheck(ptree) + ok! (A (B (C (Q p))) h) + >>> del ptree[-1]; pcheck(ptree) + ok! (A (B (C (Q p)))) + >>> del ptree[-100] + Traceback (most recent call last): + . . . + IndexError: index out of range + >>> del ptree[()] + Traceback (most recent call last): + . . . + IndexError: The tree position () may not be deleted. + + >>> # With slices: + >>> ptree = make_ptree('(A (B c) (D e) f g (H i) j (K l))') + >>> b = ptree[0] + >>> del ptree[0:0]; pcheck(ptree) + ok! (A (B c) (D e) f g (H i) j (K l)) + >>> del ptree[:1]; pcheck(ptree); pcheck(b) + ok! (A (D e) f g (H i) j (K l)) + ok! (B c) + >>> del ptree[-2:]; pcheck(ptree) + ok! (A (D e) f g (H i)) + >>> del ptree[1:3]; pcheck(ptree) + ok! (A (D e) (H i)) + >>> ptree = make_ptree('(A (B c) (D e) f g (H i) j (K l))') + >>> del ptree[5:1000]; pcheck(ptree) + ok! (A (B c) (D e) f g (H i)) + >>> del ptree[-2:1000]; pcheck(ptree) + ok! (A (B c) (D e) f) + >>> del ptree[-100:1]; pcheck(ptree) + ok! (A (D e) f) + >>> ptree = make_ptree('(A (B c) (D e) f g (H i) j (K l))') + >>> del ptree[1:-2:2]; pcheck(ptree) + ok! (A (B c) f (H i) j (K l)) + +**__setitem__()** + + >>> ptree = make_ptree('(A (B (C (D) (E f) (Q p)) g) h)') + >>> d, e, q = ptree[0,0] + >>> ptree[0,0,0] = 'x'; pcheck(ptree); pcheck(d) + ok! (A (B (C x (E f) (Q p)) g) h) + ok! (D ) + >>> ptree[0,0,1] = make_ptree('(X (Y z))'); pcheck(ptree); pcheck(e) + ok! (A (B (C x (X (Y z)) (Q p)) g) h) + ok! (E f) + >>> ptree[1] = d; pcheck(ptree) + ok! (A (B (C x (X (Y z)) (Q p)) g) (D )) + >>> ptree[-1] = 'x'; pcheck(ptree) + ok! (A (B (C x (X (Y z)) (Q p)) g) x) + >>> ptree[-100] = 'y' + Traceback (most recent call last): + . . . + IndexError: index out of range + >>> ptree[()] = make_ptree('(X y)') + Traceback (most recent call last): + . . . + IndexError: The tree position () may not be assigned to. + + >>> # With slices: + >>> ptree = make_ptree('(A (B c) (D e) f g (H i) j (K l))') + >>> b = ptree[0] + >>> ptree[0:0] = ('x', make_ptree('(Y)')); pcheck(ptree) + ok! (A x (Y ) (B c) (D e) f g (H i) j (K l)) + >>> ptree[2:6] = (); pcheck(ptree); pcheck(b) + ok! (A x (Y ) (H i) j (K l)) + ok! (B c) + >>> ptree[-2:] = ('z', 'p'); pcheck(ptree) + ok! (A x (Y ) (H i) z p) + >>> ptree[1:3] = [make_ptree('(X)') for x in range(10)]; pcheck(ptree) + ok! (A x (X ) (X ) (X ) (X ) (X ) (X ) (X ) (X ) (X ) (X ) z p) + >>> ptree[5:1000] = []; pcheck(ptree) + ok! (A x (X ) (X ) (X ) (X )) + >>> ptree[-2:1000] = ['n']; pcheck(ptree) + ok! (A x (X ) (X ) n) + >>> ptree[-100:1] = [make_ptree('(U v)')]; pcheck(ptree) + ok! (A (U v) (X ) (X ) n) + >>> ptree[-1:] = (make_ptree('(X)') for x in range(3)); pcheck(ptree) + ok! (A (U v) (X ) (X ) (X ) (X ) (X )) + >>> ptree[1:-2:2] = ['x', 'y']; pcheck(ptree) + ok! (A (U v) x (X ) y (X ) (X )) + +**append()** + + >>> ptree = make_ptree('(A (B (C (D) (E f) (Q p)) g) h)') + >>> ptree.append('x'); pcheck(ptree) + ok! (A (B (C (D ) (E f) (Q p)) g) h x) + >>> ptree.append(make_ptree('(X (Y z))')); pcheck(ptree) + ok! (A (B (C (D ) (E f) (Q p)) g) h x (X (Y z))) + +**extend()** + + >>> ptree = make_ptree('(A (B (C (D) (E f) (Q p)) g) h)') + >>> ptree.extend(['x', 'y', make_ptree('(X (Y z))')]); pcheck(ptree) + ok! (A (B (C (D ) (E f) (Q p)) g) h x y (X (Y z))) + >>> ptree.extend([]); pcheck(ptree) + ok! (A (B (C (D ) (E f) (Q p)) g) h x y (X (Y z))) + >>> ptree.extend(make_ptree('(X)') for x in range(3)); pcheck(ptree) + ok! (A (B (C (D ) (E f) (Q p)) g) h x y (X (Y z)) (X ) (X ) (X )) + +**insert()** + + >>> ptree = make_ptree('(A (B (C (D) (E f) (Q p)) g) h)') + >>> ptree.insert(0, make_ptree('(X (Y z))')); pcheck(ptree) + ok! (A (X (Y z)) (B (C (D ) (E f) (Q p)) g) h) + >>> ptree.insert(-1, make_ptree('(X (Y z))')); pcheck(ptree) + ok! (A (X (Y z)) (B (C (D ) (E f) (Q p)) g) (X (Y z)) h) + >>> ptree.insert(-4, make_ptree('(X (Y z))')); pcheck(ptree) + ok! (A (X (Y z)) (X (Y z)) (B (C (D ) (E f) (Q p)) g) (X (Y z)) h) + >>> # Note: as with ``list``, inserting at a negative index that + >>> # gives a position before the start of the list does *not* + >>> # raise an IndexError exception; it just inserts at 0. + >>> ptree.insert(-400, make_ptree('(X (Y z))')); pcheck(ptree) + ok! (A + (X (Y z)) + (X (Y z)) + (X (Y z)) + (B (C (D ) (E f) (Q p)) g) + (X (Y z)) + h) + +**pop()** + + >>> ptree = make_ptree('(A (B (C (D) (E f) (Q p)) g) h)') + >>> ptree[0,0].pop(1); pcheck(ptree) + ParentedTree('E', ['f']) + ok! (A (B (C (D ) (Q p)) g) h) + >>> ptree[0].pop(-1); pcheck(ptree) + 'g' + ok! (A (B (C (D ) (Q p))) h) + >>> ptree.pop(); pcheck(ptree) + 'h' + ok! (A (B (C (D ) (Q p)))) + >>> ptree.pop(-100) + Traceback (most recent call last): + . . . + IndexError: index out of range + +**remove()** + + >>> ptree = make_ptree('(A (B (C (D) (E f) (Q p)) g) h)') + >>> e = ptree[0,0,1] + >>> ptree[0,0].remove(ptree[0,0,1]); pcheck(ptree); pcheck(e) + ok! (A (B (C (D ) (Q p)) g) h) + ok! (E f) + >>> ptree[0,0].remove(make_ptree('(Q p)')); pcheck(ptree) + ok! (A (B (C (D )) g) h) + >>> ptree[0,0].remove(make_ptree('(Q p)')) + Traceback (most recent call last): + . . . + ValueError: ParentedTree('Q', ['p']) is not in list + >>> ptree.remove('h'); pcheck(ptree) + ok! (A (B (C (D )) g)) + >>> ptree.remove('h'); + Traceback (most recent call last): + . . . + ValueError: 'h' is not in list + >>> # remove() removes the first subtree that is equal (==) to the + >>> # given tree, which may not be the identical tree we give it: + >>> ptree = make_ptree('(A (X x) (Y y) (X x))') + >>> x1, y, x2 = ptree + >>> ptree.remove(ptree[-1]); pcheck(ptree) + ok! (A (Y y) (X x)) + >>> print(x1.parent()); pcheck(x1) + None + ok! (X x) + >>> print(x2.parent()) + (A (Y y) (X x)) + +Test that a tree can not be given multiple parents: + + >>> ptree = make_ptree('(A (X x) (Y y) (Z z))') + >>> ptree[0] = ptree[1] + Traceback (most recent call last): + . . . + ValueError: Can not insert a subtree that already has a parent. + >>> pcheck() + ok! + +[more to be written] + +Shallow copying can be tricky for Tree and several of its subclasses. +For shallow copies of Tree, only the root node is reconstructed, while +all the children are shared between the two trees. Modify the children +of one tree - and the shallowly copied tree will also update. + + >>> from nltk.tree import Tree, ParentedTree, MultiParentedTree + >>> tree = Tree.fromstring("(TOP (S (NP (NNP Bell,)) (NP (NP (DT a) (NN company)) (SBAR (WHNP (WDT which)) (S (VP (VBZ is) (VP (VBN based) (PP (IN in) (NP (NNP LA,)))))))) (VP (VBZ makes) (CC and) (VBZ distributes) (NP (NN computer))) (. products.)))") + >>> copy_tree = tree.copy(deep=False) + >>> tree == copy_tree # Ensure identical labels and nodes + True + >>> id(copy_tree[0]) == id(tree[0]) # Ensure shallow copy - the children are the same objects in memory + True + +For ParentedTree objects, this behaviour is not possible. With a shallow +copy, the children of the root node would be reused for both the original +and the shallow copy. For this to be possible, some children would need +to have multiple parents. As this is forbidden for ParentedTree objects, +attempting to make a shallow copy will cause a warning, and a deep copy +is made instead. + + >>> ptree = ParentedTree.fromstring("(TOP (S (NP (NNP Bell,)) (NP (NP (DT a) (NN company)) (SBAR (WHNP (WDT which)) (S (VP (VBZ is) (VP (VBN based) (PP (IN in) (NP (NNP LA,)))))))) (VP (VBZ makes) (CC and) (VBZ distributes) (NP (NN computer))) (. products.)))") + >>> copy_ptree = ptree.copy(deep=False) + >>> copy_ptree == ptree # Ensure identical labels and nodes + True + >>> id(copy_ptree[0]) != id(ptree[0]) # Shallow copying isn't supported - it defaults to deep copy. + True + +For MultiParentedTree objects, the issue of only allowing one parent that +can be seen for ParentedTree objects is no more. Shallow copying a +MultiParentedTree gives the children of the root node two parents: +the original and the newly copied root. + + >>> mptree = MultiParentedTree.fromstring("(TOP (S (NP (NNP Bell,)) (NP (NP (DT a) (NN company)) (SBAR (WHNP (WDT which)) (S (VP (VBZ is) (VP (VBN based) (PP (IN in) (NP (NNP LA,)))))))) (VP (VBZ makes) (CC and) (VBZ distributes) (NP (NN computer))) (. products.)))") + >>> len(mptree[0].parents()) + 1 + >>> copy_mptree = mptree.copy(deep=False) + >>> copy_mptree == mptree # Ensure identical labels and nodes + True + >>> len(mptree[0].parents()) + 2 + >>> len(copy_mptree[0].parents()) + 2 + +Shallow copying a MultiParentedTree is similar to creating a second root +which is identically labeled as the root on which the copy method was called. + + +ImmutableParentedTree Regression Tests +-------------------------------------- + + >>> iptree = ImmutableParentedTree.convert(ptree) + >>> type(iptree) + + >>> del iptree[0] + Traceback (most recent call last): + . . . + ValueError: ImmutableParentedTree may not be modified + >>> iptree.set_label('newnode') + Traceback (most recent call last): + . . . + ValueError: ImmutableParentedTree may not be modified + + +MultiParentedTree Regression Tests +---------------------------------- +Keep track of all trees that we create (including subtrees) using this +variable: + + >>> all_mptrees = [] + +Define a helper function to create new parented trees: + + >>> def make_mptree(s): + ... mptree = MultiParentedTree.convert(Tree.fromstring(s)) + ... all_mptrees.extend(t for t in mptree.subtrees() + ... if isinstance(t, Tree)) + ... return mptree + +Define a test function that examines every subtree in all_mptrees; and +checks that all six of its methods are defined correctly. If any +mptrees are passed as arguments, then they are printed. + + >>> def mpcheck(*print_mptrees): + ... def has(seq, val): # uses identity comparison + ... for item in seq: + ... if item is val: return True + ... return False + ... for mptree in all_mptrees: + ... # Check mptree's methods. + ... if len(mptree.parents()) == 0: + ... assert len(mptree.left_siblings()) == 0 + ... assert len(mptree.right_siblings()) == 0 + ... assert len(mptree.roots()) == 1 + ... assert mptree.roots()[0] is mptree + ... assert mptree.treepositions(mptree) == [()] + ... left_siblings = right_siblings = () + ... roots = {id(mptree): 1} + ... else: + ... roots = dict((id(r), 0) for r in mptree.roots()) + ... left_siblings = mptree.left_siblings() + ... right_siblings = mptree.right_siblings() + ... for parent in mptree.parents(): + ... for i in mptree.parent_indices(parent): + ... assert parent[i] is mptree + ... # check left siblings + ... if i > 0: + ... for j in range(len(left_siblings)): + ... if left_siblings[j] is parent[i-1]: + ... del left_siblings[j] + ... break + ... else: + ... assert 0, 'sibling not found!' + ... # check ight siblings + ... if i < (len(parent)-1): + ... for j in range(len(right_siblings)): + ... if right_siblings[j] is parent[i+1]: + ... del right_siblings[j] + ... break + ... else: + ... assert 0, 'sibling not found!' + ... # check roots + ... for root in parent.roots(): + ... assert id(root) in roots, 'missing root' + ... roots[id(root)] += 1 + ... # check that we don't have any unexplained values + ... assert len(left_siblings)==0, 'unexpected sibling' + ... assert len(right_siblings)==0, 'unexpected sibling' + ... for v in roots.values(): assert v>0, roots #'unexpected root' + ... # check treepositions + ... for root in mptree.roots(): + ... for treepos in mptree.treepositions(root): + ... assert root[treepos] is mptree + ... # Check mptree's children's methods: + ... for i, child in enumerate(mptree): + ... if isinstance(child, Tree): + ... # mpcheck parent() & parent_index() methods + ... assert has(child.parents(), mptree) + ... assert i in child.parent_indices(mptree) + ... # mpcheck sibling methods + ... if i > 0: + ... assert has(child.left_siblings(), mptree[i-1]) + ... if i < len(mptree)-1: + ... assert has(child.right_siblings(), mptree[i+1]) + ... if print_mptrees: + ... print('ok!', end=' ') + ... for mptree in print_mptrees: print(mptree) + ... else: + ... print('ok!') + +Run our test function on a variety of newly-created trees: + + >>> mpcheck(make_mptree('(A)')) + ok! (A ) + >>> mpcheck(make_mptree('(A (B (C (D) (E f)) g) h)')) + ok! (A (B (C (D ) (E f)) g) h) + >>> mpcheck(make_mptree('(A (B) (C c) (D d d) (E e e e))')) + ok! (A (B ) (C c) (D d d) (E e e e)) + >>> mpcheck(make_mptree('(A (B) (C (c)) (D (d) (d)) (E (e) (e) (e)))')) + ok! (A (B ) (C (c )) (D (d ) (d )) (E (e ) (e ) (e ))) + >>> subtree = make_mptree('(A (B (C (D) (E f)) g) h)') + +Including some trees that contain multiple parents: + + >>> mpcheck(MultiParentedTree('Z', [subtree, subtree])) + ok! (Z (A (B (C (D ) (E f)) g) h) (A (B (C (D ) (E f)) g) h)) + +Run our test function after performing various tree-modification +operations (n.b., these are the same tests that we ran for +`ParentedTree`, above; thus, none of these trees actually *uses* +multiple parents.) + +**__delitem__()** + + >>> mptree = make_mptree('(A (B (C (D) (E f) (Q p)) g) h)') + >>> e = mptree[0,0,1] + >>> del mptree[0,0,1]; mpcheck(mptree); mpcheck(e) + ok! (A (B (C (D ) (Q p)) g) h) + ok! (E f) + >>> del mptree[0,0,0]; mpcheck(mptree) + ok! (A (B (C (Q p)) g) h) + >>> del mptree[0,1]; mpcheck(mptree) + ok! (A (B (C (Q p))) h) + >>> del mptree[-1]; mpcheck(mptree) + ok! (A (B (C (Q p)))) + >>> del mptree[-100] + Traceback (most recent call last): + . . . + IndexError: index out of range + >>> del mptree[()] + Traceback (most recent call last): + . . . + IndexError: The tree position () may not be deleted. + + >>> # With slices: + >>> mptree = make_mptree('(A (B c) (D e) f g (H i) j (K l))') + >>> b = mptree[0] + >>> del mptree[0:0]; mpcheck(mptree) + ok! (A (B c) (D e) f g (H i) j (K l)) + >>> del mptree[:1]; mpcheck(mptree); mpcheck(b) + ok! (A (D e) f g (H i) j (K l)) + ok! (B c) + >>> del mptree[-2:]; mpcheck(mptree) + ok! (A (D e) f g (H i)) + >>> del mptree[1:3]; mpcheck(mptree) + ok! (A (D e) (H i)) + >>> mptree = make_mptree('(A (B c) (D e) f g (H i) j (K l))') + >>> del mptree[5:1000]; mpcheck(mptree) + ok! (A (B c) (D e) f g (H i)) + >>> del mptree[-2:1000]; mpcheck(mptree) + ok! (A (B c) (D e) f) + >>> del mptree[-100:1]; mpcheck(mptree) + ok! (A (D e) f) + >>> mptree = make_mptree('(A (B c) (D e) f g (H i) j (K l))') + >>> del mptree[1:-2:2]; mpcheck(mptree) + ok! (A (B c) f (H i) j (K l)) + +**__setitem__()** + + >>> mptree = make_mptree('(A (B (C (D) (E f) (Q p)) g) h)') + >>> d, e, q = mptree[0,0] + >>> mptree[0,0,0] = 'x'; mpcheck(mptree); mpcheck(d) + ok! (A (B (C x (E f) (Q p)) g) h) + ok! (D ) + >>> mptree[0,0,1] = make_mptree('(X (Y z))'); mpcheck(mptree); mpcheck(e) + ok! (A (B (C x (X (Y z)) (Q p)) g) h) + ok! (E f) + >>> mptree[1] = d; mpcheck(mptree) + ok! (A (B (C x (X (Y z)) (Q p)) g) (D )) + >>> mptree[-1] = 'x'; mpcheck(mptree) + ok! (A (B (C x (X (Y z)) (Q p)) g) x) + >>> mptree[-100] = 'y' + Traceback (most recent call last): + . . . + IndexError: index out of range + >>> mptree[()] = make_mptree('(X y)') + Traceback (most recent call last): + . . . + IndexError: The tree position () may not be assigned to. + + >>> # With slices: + >>> mptree = make_mptree('(A (B c) (D e) f g (H i) j (K l))') + >>> b = mptree[0] + >>> mptree[0:0] = ('x', make_mptree('(Y)')); mpcheck(mptree) + ok! (A x (Y ) (B c) (D e) f g (H i) j (K l)) + >>> mptree[2:6] = (); mpcheck(mptree); mpcheck(b) + ok! (A x (Y ) (H i) j (K l)) + ok! (B c) + >>> mptree[-2:] = ('z', 'p'); mpcheck(mptree) + ok! (A x (Y ) (H i) z p) + >>> mptree[1:3] = [make_mptree('(X)') for x in range(10)]; mpcheck(mptree) + ok! (A x (X ) (X ) (X ) (X ) (X ) (X ) (X ) (X ) (X ) (X ) z p) + >>> mptree[5:1000] = []; mpcheck(mptree) + ok! (A x (X ) (X ) (X ) (X )) + >>> mptree[-2:1000] = ['n']; mpcheck(mptree) + ok! (A x (X ) (X ) n) + >>> mptree[-100:1] = [make_mptree('(U v)')]; mpcheck(mptree) + ok! (A (U v) (X ) (X ) n) + >>> mptree[-1:] = (make_mptree('(X)') for x in range(3)); mpcheck(mptree) + ok! (A (U v) (X ) (X ) (X ) (X ) (X )) + >>> mptree[1:-2:2] = ['x', 'y']; mpcheck(mptree) + ok! (A (U v) x (X ) y (X ) (X )) + +**append()** + + >>> mptree = make_mptree('(A (B (C (D) (E f) (Q p)) g) h)') + >>> mptree.append('x'); mpcheck(mptree) + ok! (A (B (C (D ) (E f) (Q p)) g) h x) + >>> mptree.append(make_mptree('(X (Y z))')); mpcheck(mptree) + ok! (A (B (C (D ) (E f) (Q p)) g) h x (X (Y z))) + +**extend()** + + >>> mptree = make_mptree('(A (B (C (D) (E f) (Q p)) g) h)') + >>> mptree.extend(['x', 'y', make_mptree('(X (Y z))')]); mpcheck(mptree) + ok! (A (B (C (D ) (E f) (Q p)) g) h x y (X (Y z))) + >>> mptree.extend([]); mpcheck(mptree) + ok! (A (B (C (D ) (E f) (Q p)) g) h x y (X (Y z))) + >>> mptree.extend(make_mptree('(X)') for x in range(3)); mpcheck(mptree) + ok! (A (B (C (D ) (E f) (Q p)) g) h x y (X (Y z)) (X ) (X ) (X )) + +**insert()** + + >>> mptree = make_mptree('(A (B (C (D) (E f) (Q p)) g) h)') + >>> mptree.insert(0, make_mptree('(X (Y z))')); mpcheck(mptree) + ok! (A (X (Y z)) (B (C (D ) (E f) (Q p)) g) h) + >>> mptree.insert(-1, make_mptree('(X (Y z))')); mpcheck(mptree) + ok! (A (X (Y z)) (B (C (D ) (E f) (Q p)) g) (X (Y z)) h) + >>> mptree.insert(-4, make_mptree('(X (Y z))')); mpcheck(mptree) + ok! (A (X (Y z)) (X (Y z)) (B (C (D ) (E f) (Q p)) g) (X (Y z)) h) + >>> # Note: as with ``list``, inserting at a negative index that + >>> # gives a position before the start of the list does *not* + >>> # raise an IndexError exception; it just inserts at 0. + >>> mptree.insert(-400, make_mptree('(X (Y z))')); mpcheck(mptree) + ok! (A + (X (Y z)) + (X (Y z)) + (X (Y z)) + (B (C (D ) (E f) (Q p)) g) + (X (Y z)) + h) + +**pop()** + + >>> mptree = make_mptree('(A (B (C (D) (E f) (Q p)) g) h)') + >>> mptree[0,0].pop(1); mpcheck(mptree) + MultiParentedTree('E', ['f']) + ok! (A (B (C (D ) (Q p)) g) h) + >>> mptree[0].pop(-1); mpcheck(mptree) + 'g' + ok! (A (B (C (D ) (Q p))) h) + >>> mptree.pop(); mpcheck(mptree) + 'h' + ok! (A (B (C (D ) (Q p)))) + >>> mptree.pop(-100) + Traceback (most recent call last): + . . . + IndexError: index out of range + +**remove()** + + >>> mptree = make_mptree('(A (B (C (D) (E f) (Q p)) g) h)') + >>> e = mptree[0,0,1] + >>> mptree[0,0].remove(mptree[0,0,1]); mpcheck(mptree); mpcheck(e) + ok! (A (B (C (D ) (Q p)) g) h) + ok! (E f) + >>> mptree[0,0].remove(make_mptree('(Q p)')); mpcheck(mptree) + ok! (A (B (C (D )) g) h) + >>> mptree[0,0].remove(make_mptree('(Q p)')) + Traceback (most recent call last): + . . . + ValueError: MultiParentedTree('Q', ['p']) is not in list + >>> mptree.remove('h'); mpcheck(mptree) + ok! (A (B (C (D )) g)) + >>> mptree.remove('h'); + Traceback (most recent call last): + . . . + ValueError: 'h' is not in list + >>> # remove() removes the first subtree that is equal (==) to the + >>> # given tree, which may not be the identical tree we give it: + >>> mptree = make_mptree('(A (X x) (Y y) (X x))') + >>> x1, y, x2 = mptree + >>> mptree.remove(mptree[-1]); mpcheck(mptree) + ok! (A (Y y) (X x)) + >>> print([str(p) for p in x1.parents()]) + [] + >>> print([str(p) for p in x2.parents()]) + ['(A (Y y) (X x))'] + + +ImmutableMultiParentedTree Regression Tests +------------------------------------------- + + >>> imptree = ImmutableMultiParentedTree.convert(mptree) + >>> type(imptree) + + >>> del imptree[0] + Traceback (most recent call last): + . . . + ValueError: ImmutableMultiParentedTree may not be modified + >>> imptree.set_label('newnode') + Traceback (most recent call last): + . . . + ValueError: ImmutableMultiParentedTree may not be modified + + +ProbabilisticTree Regression Tests +---------------------------------- + + >>> prtree = ProbabilisticTree("S", [ProbabilisticTree("NP", ["N"], prob=0.3)], prob=0.6) + >>> print(prtree) + (S (NP N)) (p=0.6) + >>> import copy + >>> prtree == copy.deepcopy(prtree) == prtree.copy(deep=True) == prtree.copy() + True + >>> prtree[0] is prtree.copy()[0] + True + >>> prtree[0] is prtree.copy(deep=True)[0] + False + + >>> imprtree = ImmutableProbabilisticTree.convert(prtree) + >>> type(imprtree) + + >>> del imprtree[0] + Traceback (most recent call last): + . . . + ValueError: ImmutableProbabilisticTree may not be modified + >>> imprtree.set_label('newnode') + Traceback (most recent call last): + . . . + ValueError: ImmutableProbabilisticTree may not be modified + + +Squashed Bugs +============= + +This used to discard the ``(B b)`` subtree (fixed in svn 6270): + + >>> print(Tree.fromstring('((A a) (B b))')) + ( (A a) (B b)) + +Pickling ParentedTree instances didn't work for Python 3.7 onwards (See #2478) + + >>> import pickle + >>> tree = ParentedTree.fromstring('(S (NN x) (NP x) (NN x))') + >>> print(tree) + (S (NN x) (NP x) (NN x)) + + >>> pickled = pickle.dumps(tree) + >>> tree_loaded = pickle.loads(pickled) + >>> print(tree_loaded) + (S (NN x) (NP x) (NN x)) + +ParentedTree used to be impossible to (deep)copy. (See #1324) + + >>> from nltk.tree import ParentedTree + >>> import copy + >>> tree = ParentedTree.fromstring("(TOP (S (NP (NNP Bell,)) (NP (NP (DT a) (NN company)) (SBAR (WHNP (WDT which)) (S (VP (VBZ is) (VP (VBN based) (PP (IN in) (NP (NNP LA,)))))))) (VP (VBZ makes) (CC and) (VBZ distributes) (NP (NN computer))) (. products.)))") + >>> tree == copy.deepcopy(tree) == copy.copy(tree) == tree.copy(deep=True) == tree.copy() + True diff --git a/lib/python3.10/site-packages/nltk/test/treeprettyprinter.doctest b/lib/python3.10/site-packages/nltk/test/treeprettyprinter.doctest new file mode 100644 index 0000000000000000000000000000000000000000..b85c6d1e251d7e6e95a68fbeaf88eb8dd20fed00 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/treeprettyprinter.doctest @@ -0,0 +1,177 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +========================================================= + Unit tests for nltk.tree.prettyprinter.TreePrettyPrinter +========================================================= + + >>> from nltk.tree import Tree, TreePrettyPrinter + +Tree nr 2170 from nltk.corpus.treebank: + + >>> tree = Tree.fromstring( + ... '(S (NP-SBJ (PRP I)) (VP (VBP feel) (ADJP-PRD (RB pretty) ' + ... '(JJ good)) (PP-CLR (IN about) (NP (PRP it)))) (. .))') + >>> tpp = TreePrettyPrinter(tree) + >>> print(tpp.text()) + S + __________________________|_____________________ + | VP | + | ____________________|___________ | + | | | PP-CLR | + | | | _____|_____ | + NP-SBJ | ADJP-PRD | NP | + | | _______|______ | | | + PRP VBP RB JJ IN PRP . + | | | | | | | + I feel pretty good about it . + + >>> print(tpp.text(unicodelines=True)) + S + ┌──────────────────────────┼─────────────────────┐ + │ VP │ + │ ┌─────────────┬──────┴───────────┐ │ + │ │ │ PP-CLR │ + │ │ │ ┌─────┴─────┐ │ + NP-SBJ │ ADJP-PRD │ NP │ + │ │ ┌───────┴──────┐ │ │ │ + PRP VBP RB JJ IN PRP . + │ │ │ │ │ │ │ + I feel pretty good about it . + +A tree with long labels: + + >>> tree = Tree.fromstring( + ... '(sentence (plural-noun-phrase (plural-noun Superconductors)) ' + ... '(verb-phrase (plural-verb conduct) ' + ... '(noun-phrase (singular-noun electricity))))') + >>> tpp = TreePrettyPrinter(tree) + >>> print(tpp.text(abbreviate=8, nodedist=2)) + sentence + __________|__________ + | verb-phr. + | __________|__________ + plural-n. | noun-phr. + | | | + plural-n. plural-v. singular. + | | | + Supercon. conduct electric. + + >>> print(tpp.text(maxwidth=8, nodedist=2)) + sentence + _________|________ + | verb- + | phrase + | ________|_________ + plural- | noun- + noun- | phrase + phrase | | + | | | + plural- plural- singular- + noun verb noun + | | | + Supercon conduct electric + ductors ity + +A discontinuous tree: + + >>> tree = Tree.fromstring( + ... '(top (punct 8) (smain (noun 0) (verb 1) (inf (verb 5) (inf (verb 6) ' + ... '(conj (inf (pp (prep 2) (np (det 3) (noun 4))) (verb 7)) (inf (verb 9)) ' + ... '(vg 10) (inf (verb 11)))))) (punct 12))', read_leaf=int) + >>> sentence = ('Ze had met haar moeder kunnen gaan winkelen ,' + ... ' zwemmen of terrassen .'.split()) + >>> tpp = TreePrettyPrinter(tree, sentence) + >>> print(tpp.text()) + top + _____|______________________________________________ + smain | | + _______________________________|_____ | | + | | inf | | + | | _____|____ | | + | | | inf | | + | | | ____|_____ | | + | | | | conj | | + | | _____ | ___ | _________|______ | __________________ | + | | inf | | | | | | | + | | _________|_____ | ___ | _________ | | | | | + | | pp | | | | | | | | + | | ____|____ | | | | | | | | + | | | np | | | | inf | inf | + | | | ____|____ | | | | | | | | + noun verb prep det noun verb verb verb punct verb vg verb punct + | | | | | | | | | | | | | + Ze had met haar moeder kunnen gaan winkelen , zwemmen of terrassen . + + >>> print(tpp.text(unicodelines=True)) + top + ┌─────┴──────────────────┬───────────────────────────┐ + smain │ │ + ┌────┬──────────────────────────┴─────┐ │ │ + │ │ inf │ │ + │ │ ┌─────┴────┐ │ │ + │ │ │ inf │ │ + │ │ │ ┌────┴─────┐ │ │ + │ │ │ │ conj │ │ + │ │ ┌───── │ ─── │ ─────────┴────── │ ─────┬─────┬──────┐ │ + │ │ inf │ │ │ │ │ │ │ + │ │ ┌─────────┴───── │ ─── │ ─────────┐ │ │ │ │ │ + │ │ pp │ │ │ │ │ │ │ │ + │ │ ┌────┴────┐ │ │ │ │ │ │ │ │ + │ │ │ np │ │ │ │ inf │ inf │ + │ │ │ ┌────┴────┐ │ │ │ │ │ │ │ │ + noun verb prep det noun verb verb verb punct verb vg verb punct + │ │ │ │ │ │ │ │ │ │ │ │ │ + Ze had met haar moeder kunnen gaan winkelen , zwemmen of terrassen . + +Importing TreePrettyPrinter +--------------------------- + +First of all, a simple tree will be constructed:: + + >>> from nltk.tree import Tree + >>> tree = Tree.fromstring('(S (NP Mary) (VP walks))') + +We'll use this sample tree to show that the method of importing `TreePrettyPrinter` work correctly: + +- Recommended:: + + >>> from nltk.tree import TreePrettyPrinter + >>> print(TreePrettyPrinter(tree).text()) + S + ____|____ + NP VP + | | + Mary walks + +- Alternative but valid options:: + + >>> from nltk import TreePrettyPrinter + >>> print(TreePrettyPrinter(tree).text()) + S + ____|____ + NP VP + | | + Mary walks + + >>> from nltk.tree.prettyprinter import TreePrettyPrinter + >>> print(TreePrettyPrinter(tree).text()) + S + ____|____ + NP VP + | | + Mary walks + +- Deprecated, do not use:: + + >>> from nltk.treeprettyprinter import TreePrettyPrinter + >>> print(TreePrettyPrinter(tree).text()) + S + ____|____ + NP VP + | | + Mary walks + + This method will throw a DeprecationWarning:: + + Import `TreePrettyPrinter` using `from nltk.tree import TreePrettyPrinter` instead. diff --git a/lib/python3.10/site-packages/nltk/test/wsd.doctest b/lib/python3.10/site-packages/nltk/test/wsd.doctest new file mode 100644 index 0000000000000000000000000000000000000000..e4445c51d038f01e73214f370c220471530f859d --- /dev/null +++ b/lib/python3.10/site-packages/nltk/test/wsd.doctest @@ -0,0 +1,68 @@ +.. Copyright (C) 2001-2023 NLTK Project +.. For license information, see LICENSE.TXT + +.. -*- coding: utf-8 -*- + +========================= +Word Sense Disambiguation +========================= + + +Lesk Algorithm +-------------- + + +Performs the classic Lesk algorithm for Word Sense Disambiguation (WSD) using +a the definitions of the ambiguous word. + +Given an ambiguous word and the context in which the word occurs, Lesk returns +a Synset with the highest number of overlapping words between the context +sentence and different definitions from each Synset. + + >>> from nltk.wsd import lesk + >>> sent = ['I', 'went', 'to', 'the', 'bank', 'to', 'deposit', 'money', '.'] + + >>> print(lesk(sent, 'bank', 'n')) + Synset('savings_bank.n.02') + + >>> print(lesk(sent, 'bank')) + Synset('savings_bank.n.02') + +The definitions for "bank" are: + + >>> from nltk.corpus import wordnet as wn + >>> for ss in wn.synsets('bank'): + ... print(ss, ss.definition()) + ... + Synset('bank.n.01') sloping land (especially the slope beside a body of water) + Synset('depository_financial_institution.n.01') a financial institution that accepts deposits and channels the money into lending activities + Synset('bank.n.03') a long ridge or pile + Synset('bank.n.04') an arrangement of similar objects in a row or in tiers + Synset('bank.n.05') a supply or stock held in reserve for future use (especially in emergencies) + Synset('bank.n.06') the funds held by a gambling house or the dealer in some gambling games + Synset('bank.n.07') a slope in the turn of a road or track; the outside is higher than the inside in order to reduce the effects of centrifugal force + Synset('savings_bank.n.02') a container (usually with a slot in the top) for keeping money at home + Synset('bank.n.09') a building in which the business of banking transacted + Synset('bank.n.10') a flight maneuver; aircraft tips laterally about its longitudinal axis (especially in turning) + Synset('bank.v.01') tip laterally + Synset('bank.v.02') enclose with a bank + Synset('bank.v.03') do business with a bank or keep an account at a bank + Synset('bank.v.04') act as the banker in a game or in gambling + Synset('bank.v.05') be in the banking business + Synset('deposit.v.02') put into a bank account + Synset('bank.v.07') cover with ashes so to control the rate of burning + Synset('trust.v.01') have confidence or faith in + +Test disambiguation of POS tagged `able`. + + >>> [(s, s.pos()) for s in wn.synsets('able')] + [(Synset('able.a.01'), 'a'), (Synset('able.s.02'), 's'), (Synset('able.s.03'), 's'), (Synset('able.s.04'), 's')] + >>> sent = 'people should be able to marry a person of their choice'.split() + >>> lesk(sent, 'able') + Synset('able.s.04') + >>> lesk(sent, 'able', pos='a') + Synset('able.a.01') + +Test behavior if there is are no matching senses. + + >>> lesk('John loves Mary'.split(), 'loves', synsets=[]) diff --git a/lib/python3.10/site-packages/nltk/text.py b/lib/python3.10/site-packages/nltk/text.py new file mode 100644 index 0000000000000000000000000000000000000000..85614dbd22f6a4afd388cf96cb5f8e3520883c16 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/text.py @@ -0,0 +1,779 @@ +# Natural Language Toolkit: Texts +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Edward Loper +# URL: +# For license information, see LICENSE.TXT + +""" +This module brings together a variety of NLTK functionality for +text analysis, and provides simple, interactive interfaces. +Functionality includes: concordancing, collocation discovery, +regular expression search over tokenized strings, and +distributional similarity. +""" + +import re +import sys +from collections import Counter, defaultdict, namedtuple +from functools import reduce +from math import log + +from nltk.collocations import BigramCollocationFinder +from nltk.lm import MLE +from nltk.lm.preprocessing import padded_everygram_pipeline +from nltk.metrics import BigramAssocMeasures, f_measure +from nltk.probability import ConditionalFreqDist as CFD +from nltk.probability import FreqDist +from nltk.tokenize import sent_tokenize +from nltk.util import LazyConcatenation, tokenwrap + +ConcordanceLine = namedtuple( + "ConcordanceLine", + ["left", "query", "right", "offset", "left_print", "right_print", "line"], +) + + +class ContextIndex: + """ + A bidirectional index between words and their 'contexts' in a text. + The context of a word is usually defined to be the words that occur + in a fixed window around the word; but other definitions may also + be used by providing a custom context function. + """ + + @staticmethod + def _default_context(tokens, i): + """One left token and one right token, normalized to lowercase""" + left = tokens[i - 1].lower() if i != 0 else "*START*" + right = tokens[i + 1].lower() if i != len(tokens) - 1 else "*END*" + return (left, right) + + def __init__(self, tokens, context_func=None, filter=None, key=lambda x: x): + self._key = key + self._tokens = tokens + if context_func: + self._context_func = context_func + else: + self._context_func = self._default_context + if filter: + tokens = [t for t in tokens if filter(t)] + self._word_to_contexts = CFD( + (self._key(w), self._context_func(tokens, i)) for i, w in enumerate(tokens) + ) + self._context_to_words = CFD( + (self._context_func(tokens, i), self._key(w)) for i, w in enumerate(tokens) + ) + + def tokens(self): + """ + :rtype: list(str) + :return: The document that this context index was + created from. + """ + return self._tokens + + def word_similarity_dict(self, word): + """ + Return a dictionary mapping from words to 'similarity scores,' + indicating how often these two words occur in the same + context. + """ + word = self._key(word) + word_contexts = set(self._word_to_contexts[word]) + + scores = {} + for w, w_contexts in self._word_to_contexts.items(): + scores[w] = f_measure(word_contexts, set(w_contexts)) + + return scores + + def similar_words(self, word, n=20): + scores = defaultdict(int) + for c in self._word_to_contexts[self._key(word)]: + for w in self._context_to_words[c]: + if w != word: + scores[w] += ( + self._context_to_words[c][word] * self._context_to_words[c][w] + ) + return sorted(scores, key=scores.get, reverse=True)[:n] + + def common_contexts(self, words, fail_on_unknown=False): + """ + Find contexts where the specified words can all appear; and + return a frequency distribution mapping each context to the + number of times that context was used. + + :param words: The words used to seed the similarity search + :type words: str + :param fail_on_unknown: If true, then raise a value error if + any of the given words do not occur at all in the index. + """ + words = [self._key(w) for w in words] + contexts = [set(self._word_to_contexts[w]) for w in words] + empty = [words[i] for i in range(len(words)) if not contexts[i]] + common = reduce(set.intersection, contexts) + if empty and fail_on_unknown: + raise ValueError("The following word(s) were not found:", " ".join(words)) + elif not common: + # nothing in common -- just return an empty freqdist. + return FreqDist() + else: + fd = FreqDist( + c for w in words for c in self._word_to_contexts[w] if c in common + ) + return fd + + +class ConcordanceIndex: + """ + An index that can be used to look up the offset locations at which + a given word occurs in a document. + """ + + def __init__(self, tokens, key=lambda x: x): + """ + Construct a new concordance index. + + :param tokens: The document (list of tokens) that this + concordance index was created from. This list can be used + to access the context of a given word occurrence. + :param key: A function that maps each token to a normalized + version that will be used as a key in the index. E.g., if + you use ``key=lambda s:s.lower()``, then the index will be + case-insensitive. + """ + self._tokens = tokens + """The document (list of tokens) that this concordance index + was created from.""" + + self._key = key + """Function mapping each token to an index key (or None).""" + + self._offsets = defaultdict(list) + """Dictionary mapping words (or keys) to lists of offset indices.""" + # Initialize the index (self._offsets) + for index, word in enumerate(tokens): + word = self._key(word) + self._offsets[word].append(index) + + def tokens(self): + """ + :rtype: list(str) + :return: The document that this concordance index was + created from. + """ + return self._tokens + + def offsets(self, word): + """ + :rtype: list(int) + :return: A list of the offset positions at which the given + word occurs. If a key function was specified for the + index, then given word's key will be looked up. + """ + word = self._key(word) + return self._offsets[word] + + def __repr__(self): + return "" % ( + len(self._tokens), + len(self._offsets), + ) + + def find_concordance(self, word, width=80): + """ + Find all concordance lines given the query word. + + Provided with a list of words, these will be found as a phrase. + """ + if isinstance(word, list): + phrase = word + else: + phrase = [word] + + half_width = (width - len(" ".join(phrase)) - 2) // 2 + context = width // 4 # approx number of words of context + + # Find the instances of the word to create the ConcordanceLine + concordance_list = [] + offsets = self.offsets(phrase[0]) + for i, word in enumerate(phrase[1:]): + word_offsets = {offset - i - 1 for offset in self.offsets(word)} + offsets = sorted(word_offsets.intersection(offsets)) + if offsets: + for i in offsets: + query_word = " ".join(self._tokens[i : i + len(phrase)]) + # Find the context of query word. + left_context = self._tokens[max(0, i - context) : i] + right_context = self._tokens[i + len(phrase) : i + context] + # Create the pretty lines with the query_word in the middle. + left_print = " ".join(left_context)[-half_width:] + right_print = " ".join(right_context)[:half_width] + # The WYSIWYG line of the concordance. + line_print = " ".join([left_print, query_word, right_print]) + # Create the ConcordanceLine + concordance_line = ConcordanceLine( + left_context, + query_word, + right_context, + i, + left_print, + right_print, + line_print, + ) + concordance_list.append(concordance_line) + return concordance_list + + def print_concordance(self, word, width=80, lines=25): + """ + Print concordance lines given the query word. + :param word: The target word or phrase (a list of strings) + :type word: str or list + :param lines: The number of lines to display (default=25) + :type lines: int + :param width: The width of each line, in characters (default=80) + :type width: int + :param save: The option to save the concordance. + :type save: bool + """ + concordance_list = self.find_concordance(word, width=width) + + if not concordance_list: + print("no matches") + else: + lines = min(lines, len(concordance_list)) + print(f"Displaying {lines} of {len(concordance_list)} matches:") + for i, concordance_line in enumerate(concordance_list[:lines]): + print(concordance_line.line) + + +class TokenSearcher: + """ + A class that makes it easier to use regular expressions to search + over tokenized strings. The tokenized string is converted to a + string where tokens are marked with angle brackets -- e.g., + ``''``. The regular expression + passed to the ``findall()`` method is modified to treat angle + brackets as non-capturing parentheses, in addition to matching the + token boundaries; and to have ``'.'`` not match the angle brackets. + """ + + def __init__(self, tokens): + self._raw = "".join("<" + w + ">" for w in tokens) + + def findall(self, regexp): + """ + Find instances of the regular expression in the text. + The text is a list of tokens, and a regexp pattern to match + a single token must be surrounded by angle brackets. E.g. + + >>> from nltk.text import TokenSearcher + >>> from nltk.book import text1, text5, text9 + >>> text5.findall("<.*><.*>") + you rule bro; telling you bro; u twizted bro + >>> text1.findall("(<.*>)") + monied; nervous; dangerous; white; white; white; pious; queer; good; + mature; white; Cape; great; wise; wise; butterless; white; fiendish; + pale; furious; better; certain; complete; dismasted; younger; brave; + brave; brave; brave + >>> text9.findall("{3,}") + thread through those; the thought that; that the thing; the thing + that; that that thing; through these than through; them that the; + through the thick; them that they; thought that the + + :param regexp: A regular expression + :type regexp: str + """ + # preprocess the regular expression + regexp = re.sub(r"\s", "", regexp) + regexp = re.sub(r"<", "(?:<(?:", regexp) + regexp = re.sub(r">", ")>)", regexp) + regexp = re.sub(r"(?]", regexp) + + # perform the search + hits = re.findall(regexp, self._raw) + + # Sanity check + for h in hits: + if not h.startswith("<") and h.endswith(">"): + raise ValueError("Bad regexp for TokenSearcher.findall") + + # postprocess the output + hits = [h[1:-1].split("><") for h in hits] + return hits + + +class Text: + """ + A wrapper around a sequence of simple (string) tokens, which is + intended to support initial exploration of texts (via the + interactive console). Its methods perform a variety of analyses + on the text's contexts (e.g., counting, concordancing, collocation + discovery), and display the results. If you wish to write a + program which makes use of these analyses, then you should bypass + the ``Text`` class, and use the appropriate analysis function or + class directly instead. + + A ``Text`` is typically initialized from a given document or + corpus. E.g.: + + >>> import nltk.corpus + >>> from nltk.text import Text + >>> moby = Text(nltk.corpus.gutenberg.words('melville-moby_dick.txt')) + + """ + + # This defeats lazy loading, but makes things faster. This + # *shouldn't* be necessary because the corpus view *should* be + # doing intelligent caching, but without this it's running slow. + # Look into whether the caching is working correctly. + _COPY_TOKENS = True + + def __init__(self, tokens, name=None): + """ + Create a Text object. + + :param tokens: The source text. + :type tokens: sequence of str + """ + if self._COPY_TOKENS: + tokens = list(tokens) + self.tokens = tokens + + if name: + self.name = name + elif "]" in tokens[:20]: + end = tokens[:20].index("]") + self.name = " ".join(str(tok) for tok in tokens[1:end]) + else: + self.name = " ".join(str(tok) for tok in tokens[:8]) + "..." + + # //////////////////////////////////////////////////////////// + # Support item & slice access + # //////////////////////////////////////////////////////////// + + def __getitem__(self, i): + return self.tokens[i] + + def __len__(self): + return len(self.tokens) + + # //////////////////////////////////////////////////////////// + # Interactive console methods + # //////////////////////////////////////////////////////////// + + def concordance(self, word, width=79, lines=25): + """ + Prints a concordance for ``word`` with the specified context window. + Word matching is not case-sensitive. + + :param word: The target word or phrase (a list of strings) + :type word: str or list + :param width: The width of each line, in characters (default=80) + :type width: int + :param lines: The number of lines to display (default=25) + :type lines: int + + :seealso: ``ConcordanceIndex`` + """ + if "_concordance_index" not in self.__dict__: + self._concordance_index = ConcordanceIndex( + self.tokens, key=lambda s: s.lower() + ) + + return self._concordance_index.print_concordance(word, width, lines) + + def concordance_list(self, word, width=79, lines=25): + """ + Generate a concordance for ``word`` with the specified context window. + Word matching is not case-sensitive. + + :param word: The target word or phrase (a list of strings) + :type word: str or list + :param width: The width of each line, in characters (default=80) + :type width: int + :param lines: The number of lines to display (default=25) + :type lines: int + + :seealso: ``ConcordanceIndex`` + """ + if "_concordance_index" not in self.__dict__: + self._concordance_index = ConcordanceIndex( + self.tokens, key=lambda s: s.lower() + ) + return self._concordance_index.find_concordance(word, width)[:lines] + + def collocation_list(self, num=20, window_size=2): + """ + Return collocations derived from the text, ignoring stopwords. + + >>> from nltk.book import text4 + >>> text4.collocation_list()[:2] + [('United', 'States'), ('fellow', 'citizens')] + + :param num: The maximum number of collocations to return. + :type num: int + :param window_size: The number of tokens spanned by a collocation (default=2) + :type window_size: int + :rtype: list(tuple(str, str)) + """ + if not ( + "_collocations" in self.__dict__ + and self._num == num + and self._window_size == window_size + ): + self._num = num + self._window_size = window_size + + # print("Building collocations list") + from nltk.corpus import stopwords + + ignored_words = stopwords.words("english") + finder = BigramCollocationFinder.from_words(self.tokens, window_size) + finder.apply_freq_filter(2) + finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words) + bigram_measures = BigramAssocMeasures() + self._collocations = list( + finder.nbest(bigram_measures.likelihood_ratio, num) + ) + return self._collocations + + def collocations(self, num=20, window_size=2): + """ + Print collocations derived from the text, ignoring stopwords. + + >>> from nltk.book import text4 + >>> text4.collocations() # doctest: +NORMALIZE_WHITESPACE + United States; fellow citizens; years ago; four years; Federal + Government; General Government; American people; Vice President; God + bless; Chief Justice; one another; fellow Americans; Old World; + Almighty God; Fellow citizens; Chief Magistrate; every citizen; Indian + tribes; public debt; foreign nations + + + :param num: The maximum number of collocations to print. + :type num: int + :param window_size: The number of tokens spanned by a collocation (default=2) + :type window_size: int + """ + + collocation_strings = [ + w1 + " " + w2 for w1, w2 in self.collocation_list(num, window_size) + ] + print(tokenwrap(collocation_strings, separator="; ")) + + def count(self, word): + """ + Count the number of times this word appears in the text. + """ + return self.tokens.count(word) + + def index(self, word): + """ + Find the index of the first occurrence of the word in the text. + """ + return self.tokens.index(word) + + def readability(self, method): + # code from nltk_contrib.readability + raise NotImplementedError + + def similar(self, word, num=20): + """ + Distributional similarity: find other words which appear in the + same contexts as the specified word; list most similar words first. + + :param word: The word used to seed the similarity search + :type word: str + :param num: The number of words to generate (default=20) + :type num: int + :seealso: ContextIndex.similar_words() + """ + if "_word_context_index" not in self.__dict__: + # print('Building word-context index...') + self._word_context_index = ContextIndex( + self.tokens, filter=lambda x: x.isalpha(), key=lambda s: s.lower() + ) + + # words = self._word_context_index.similar_words(word, num) + + word = word.lower() + wci = self._word_context_index._word_to_contexts + if word in wci.conditions(): + contexts = set(wci[word]) + fd = Counter( + w + for w in wci.conditions() + for c in wci[w] + if c in contexts and not w == word + ) + words = [w for w, _ in fd.most_common(num)] + print(tokenwrap(words)) + else: + print("No matches") + + def common_contexts(self, words, num=20): + """ + Find contexts where the specified words appear; list + most frequent common contexts first. + + :param words: The words used to seed the similarity search + :type words: str + :param num: The number of words to generate (default=20) + :type num: int + :seealso: ContextIndex.common_contexts() + """ + if "_word_context_index" not in self.__dict__: + # print('Building word-context index...') + self._word_context_index = ContextIndex( + self.tokens, key=lambda s: s.lower() + ) + + try: + fd = self._word_context_index.common_contexts(words, True) + if not fd: + print("No common contexts were found") + else: + ranked_contexts = [w for w, _ in fd.most_common(num)] + print(tokenwrap(w1 + "_" + w2 for w1, w2 in ranked_contexts)) + + except ValueError as e: + print(e) + + def dispersion_plot(self, words): + """ + Produce a plot showing the distribution of the words through the text. + Requires pylab to be installed. + + :param words: The words to be plotted + :type words: list(str) + :seealso: nltk.draw.dispersion_plot() + """ + from nltk.draw import dispersion_plot + + dispersion_plot(self, words) + + def _train_default_ngram_lm(self, tokenized_sents, n=3): + train_data, padded_sents = padded_everygram_pipeline(n, tokenized_sents) + model = MLE(order=n) + model.fit(train_data, padded_sents) + return model + + def generate(self, length=100, text_seed=None, random_seed=42): + """ + Print random text, generated using a trigram language model. + See also `help(nltk.lm)`. + + :param length: The length of text to generate (default=100) + :type length: int + + :param text_seed: Generation can be conditioned on preceding context. + :type text_seed: list(str) + + :param random_seed: A random seed or an instance of `random.Random`. If provided, + makes the random sampling part of generation reproducible. (default=42) + :type random_seed: int + """ + # Create the model when using it the first time. + self._tokenized_sents = [ + sent.split(" ") for sent in sent_tokenize(" ".join(self.tokens)) + ] + if not hasattr(self, "_trigram_model"): + print("Building ngram index...", file=sys.stderr) + self._trigram_model = self._train_default_ngram_lm( + self._tokenized_sents, n=3 + ) + + generated_tokens = [] + + assert length > 0, "The `length` must be more than 0." + while len(generated_tokens) < length: + for idx, token in enumerate( + self._trigram_model.generate( + length, text_seed=text_seed, random_seed=random_seed + ) + ): + if token == "": + continue + if token == "": + break + generated_tokens.append(token) + random_seed += 1 + + prefix = " ".join(text_seed) + " " if text_seed else "" + output_str = prefix + tokenwrap(generated_tokens[:length]) + print(output_str) + return output_str + + def plot(self, *args): + """ + See documentation for FreqDist.plot() + :seealso: nltk.prob.FreqDist.plot() + """ + return self.vocab().plot(*args) + + def vocab(self): + """ + :seealso: nltk.prob.FreqDist + """ + if "_vocab" not in self.__dict__: + # print("Building vocabulary index...") + self._vocab = FreqDist(self) + return self._vocab + + def findall(self, regexp): + """ + Find instances of the regular expression in the text. + The text is a list of tokens, and a regexp pattern to match + a single token must be surrounded by angle brackets. E.g. + + >>> from nltk.book import text1, text5, text9 + >>> text5.findall("<.*><.*>") + you rule bro; telling you bro; u twizted bro + >>> text1.findall("(<.*>)") + monied; nervous; dangerous; white; white; white; pious; queer; good; + mature; white; Cape; great; wise; wise; butterless; white; fiendish; + pale; furious; better; certain; complete; dismasted; younger; brave; + brave; brave; brave + >>> text9.findall("{3,}") + thread through those; the thought that; that the thing; the thing + that; that that thing; through these than through; them that the; + through the thick; them that they; thought that the + + :param regexp: A regular expression + :type regexp: str + """ + + if "_token_searcher" not in self.__dict__: + self._token_searcher = TokenSearcher(self) + + hits = self._token_searcher.findall(regexp) + hits = [" ".join(h) for h in hits] + print(tokenwrap(hits, "; ")) + + # //////////////////////////////////////////////////////////// + # Helper Methods + # //////////////////////////////////////////////////////////// + + _CONTEXT_RE = re.compile(r"\w+|[\.\!\?]") + + def _context(self, tokens, i): + """ + One left & one right token, both case-normalized. Skip over + non-sentence-final punctuation. Used by the ``ContextIndex`` + that is created for ``similar()`` and ``common_contexts()``. + """ + # Left context + j = i - 1 + while j >= 0 and not self._CONTEXT_RE.match(tokens[j]): + j -= 1 + left = tokens[j] if j != 0 else "*START*" + + # Right context + j = i + 1 + while j < len(tokens) and not self._CONTEXT_RE.match(tokens[j]): + j += 1 + right = tokens[j] if j != len(tokens) else "*END*" + + return (left, right) + + # //////////////////////////////////////////////////////////// + # String Display + # //////////////////////////////////////////////////////////// + + def __str__(self): + return "" % self.name + + def __repr__(self): + return "" % self.name + + +# Prototype only; this approach will be slow to load +class TextCollection(Text): + """A collection of texts, which can be loaded with list of texts, or + with a corpus consisting of one or more texts, and which supports + counting, concordancing, collocation discovery, etc. Initialize a + TextCollection as follows: + + >>> import nltk.corpus + >>> from nltk.text import TextCollection + >>> from nltk.book import text1, text2, text3 + >>> gutenberg = TextCollection(nltk.corpus.gutenberg) + >>> mytexts = TextCollection([text1, text2, text3]) + + Iterating over a TextCollection produces all the tokens of all the + texts in order. + """ + + def __init__(self, source): + if hasattr(source, "words"): # bridge to the text corpus reader + source = [source.words(f) for f in source.fileids()] + + self._texts = source + Text.__init__(self, LazyConcatenation(source)) + self._idf_cache = {} + + def tf(self, term, text): + """The frequency of the term in text.""" + return text.count(term) / len(text) + + def idf(self, term): + """The number of texts in the corpus divided by the + number of texts that the term appears in. + If a term does not appear in the corpus, 0.0 is returned.""" + # idf values are cached for performance. + idf = self._idf_cache.get(term) + if idf is None: + matches = len([True for text in self._texts if term in text]) + if len(self._texts) == 0: + raise ValueError("IDF undefined for empty document collection") + idf = log(len(self._texts) / matches) if matches else 0.0 + self._idf_cache[term] = idf + return idf + + def tf_idf(self, term, text): + return self.tf(term, text) * self.idf(term) + + +def demo(): + from nltk.corpus import brown + + text = Text(brown.words(categories="news")) + print(text) + print() + print("Concordance:") + text.concordance("news") + print() + print("Distributionally similar words:") + text.similar("news") + print() + print("Collocations:") + text.collocations() + print() + # print("Automatically generated text:") + # text.generate() + # print() + print("Dispersion plot:") + text.dispersion_plot(["news", "report", "said", "announced"]) + print() + print("Vocabulary plot:") + text.plot(50) + print() + print("Indexing:") + print("text[3]:", text[3]) + print("text[3:5]:", text[3:5]) + print("text.vocab()['news']:", text.vocab()["news"]) + + +if __name__ == "__main__": + demo() + +__all__ = [ + "ContextIndex", + "ConcordanceIndex", + "TokenSearcher", + "Text", + "TextCollection", +] diff --git a/lib/python3.10/site-packages/nltk/tgrep.py b/lib/python3.10/site-packages/nltk/tgrep.py new file mode 100644 index 0000000000000000000000000000000000000000..45ce2ab92629296fc52931ff12720d62aab939cf --- /dev/null +++ b/lib/python3.10/site-packages/nltk/tgrep.py @@ -0,0 +1,1039 @@ +#!/usr/bin/env python +# +# Natural Language Toolkit: TGrep search +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Will Roberts +# URL: +# For license information, see LICENSE.TXT + +""" +============================================ + TGrep search implementation for NLTK trees +============================================ + +This module supports TGrep2 syntax for matching parts of NLTK Trees. +Note that many tgrep operators require the tree passed to be a +``ParentedTree``. + +External links: + +- `Tgrep tutorial `_ +- `Tgrep2 manual `_ +- `Tgrep2 source `_ + +Usage +===== + +>>> from nltk.tree import ParentedTree +>>> from nltk.tgrep import tgrep_nodes, tgrep_positions +>>> tree = ParentedTree.fromstring('(S (NP (DT the) (JJ big) (NN dog)) (VP bit) (NP (DT a) (NN cat)))') +>>> list(tgrep_nodes('NN', [tree])) +[[ParentedTree('NN', ['dog']), ParentedTree('NN', ['cat'])]] +>>> list(tgrep_positions('NN', [tree])) +[[(0, 2), (2, 1)]] +>>> list(tgrep_nodes('DT', [tree])) +[[ParentedTree('DT', ['the']), ParentedTree('DT', ['a'])]] +>>> list(tgrep_nodes('DT $ JJ', [tree])) +[[ParentedTree('DT', ['the'])]] + +This implementation adds syntax to select nodes based on their NLTK +tree position. This syntax is ``N`` plus a Python tuple representing +the tree position. For instance, ``N()``, ``N(0,)``, ``N(0,0)`` are +valid node selectors. Example: + +>>> tree = ParentedTree.fromstring('(S (NP (DT the) (JJ big) (NN dog)) (VP bit) (NP (DT a) (NN cat)))') +>>> tree[0,0] +ParentedTree('DT', ['the']) +>>> tree[0,0].treeposition() +(0, 0) +>>> list(tgrep_nodes('N(0,0)', [tree])) +[[ParentedTree('DT', ['the'])]] + +Caveats: +======== + +- Link modifiers: "?" and "=" are not implemented. +- Tgrep compatibility: Using "@" for "!", "{" for "<", "}" for ">" are + not implemented. +- The "=" and "~" links are not implemented. + +Known Issues: +============= + +- There are some issues with link relations involving leaf nodes + (which are represented as bare strings in NLTK trees). For + instance, consider the tree:: + + (S (A x)) + + The search string ``* !>> S`` should select all nodes which are not + dominated in some way by an ``S`` node (i.e., all nodes which are + not descendants of an ``S``). Clearly, in this tree, the only node + which fulfills this criterion is the top node (since it is not + dominated by anything). However, the code here will find both the + top node and the leaf node ``x``. This is because we cannot recover + the parent of the leaf, since it is stored as a bare string. + + A possible workaround, when performing this kind of search, would be + to filter out all leaf nodes. + +Implementation notes +==================== + +This implementation is (somewhat awkwardly) based on lambda functions +which are predicates on a node. A predicate is a function which is +either True or False; using a predicate function, we can identify sets +of nodes with particular properties. A predicate function, could, for +instance, return True only if a particular node has a label matching a +particular regular expression, and has a daughter node which has no +sisters. Because tgrep2 search strings can do things statefully (such +as substituting in macros, and binding nodes with node labels), the +actual predicate function is declared with three arguments:: + + pred = lambda n, m, l: return True # some logic here + +``n`` + is a node in a tree; this argument must always be given + +``m`` + contains a dictionary, mapping macro names onto predicate functions + +``l`` + is a dictionary to map node labels onto nodes in the tree + +``m`` and ``l`` are declared to default to ``None``, and so need not be +specified in a call to a predicate. Predicates which call other +predicates must always pass the value of these arguments on. The +top-level predicate (constructed by ``_tgrep_exprs_action``) binds the +macro definitions to ``m`` and initialises ``l`` to an empty dictionary. +""" + +import functools +import re + +try: + import pyparsing +except ImportError: + print("Warning: nltk.tgrep will not work without the `pyparsing` package") + print("installed.") + +import nltk.tree + + +class TgrepException(Exception): + """Tgrep exception type.""" + + pass + + +def ancestors(node): + """ + Returns the list of all nodes dominating the given tree node. + This method will not work with leaf nodes, since there is no way + to recover the parent. + """ + results = [] + try: + current = node.parent() + except AttributeError: + # if node is a leaf, we cannot retrieve its parent + return results + while current: + results.append(current) + current = current.parent() + return results + + +def unique_ancestors(node): + """ + Returns the list of all nodes dominating the given node, where + there is only a single path of descent. + """ + results = [] + try: + current = node.parent() + except AttributeError: + # if node is a leaf, we cannot retrieve its parent + return results + while current and len(current) == 1: + results.append(current) + current = current.parent() + return results + + +def _descendants(node): + """ + Returns the list of all nodes which are descended from the given + tree node in some way. + """ + try: + treepos = node.treepositions() + except AttributeError: + return [] + return [node[x] for x in treepos[1:]] + + +def _leftmost_descendants(node): + """ + Returns the set of all nodes descended in some way through + left branches from this node. + """ + try: + treepos = node.treepositions() + except AttributeError: + return [] + return [node[x] for x in treepos[1:] if all(y == 0 for y in x)] + + +def _rightmost_descendants(node): + """ + Returns the set of all nodes descended in some way through + right branches from this node. + """ + try: + rightmost_leaf = max(node.treepositions()) + except AttributeError: + return [] + return [node[rightmost_leaf[:i]] for i in range(1, len(rightmost_leaf) + 1)] + + +def _istree(obj): + """Predicate to check whether `obj` is a nltk.tree.Tree.""" + return isinstance(obj, nltk.tree.Tree) + + +def _unique_descendants(node): + """ + Returns the list of all nodes descended from the given node, where + there is only a single path of descent. + """ + results = [] + current = node + while current and _istree(current) and len(current) == 1: + current = current[0] + results.append(current) + return results + + +def _before(node): + """ + Returns the set of all nodes that are before the given node. + """ + try: + pos = node.treeposition() + tree = node.root() + except AttributeError: + return [] + return [tree[x] for x in tree.treepositions() if x[: len(pos)] < pos[: len(x)]] + + +def _immediately_before(node): + """ + Returns the set of all nodes that are immediately before the given + node. + + Tree node A immediately precedes node B if the last terminal + symbol (word) produced by A immediately precedes the first + terminal symbol produced by B. + """ + try: + pos = node.treeposition() + tree = node.root() + except AttributeError: + return [] + # go "upwards" from pos until there is a place we can go to the left + idx = len(pos) - 1 + while 0 <= idx and pos[idx] == 0: + idx -= 1 + if idx < 0: + return [] + pos = list(pos[: idx + 1]) + pos[-1] -= 1 + before = tree[pos] + return [before] + _rightmost_descendants(before) + + +def _after(node): + """ + Returns the set of all nodes that are after the given node. + """ + try: + pos = node.treeposition() + tree = node.root() + except AttributeError: + return [] + return [tree[x] for x in tree.treepositions() if x[: len(pos)] > pos[: len(x)]] + + +def _immediately_after(node): + """ + Returns the set of all nodes that are immediately after the given + node. + + Tree node A immediately follows node B if the first terminal + symbol (word) produced by A immediately follows the last + terminal symbol produced by B. + """ + try: + pos = node.treeposition() + tree = node.root() + current = node.parent() + except AttributeError: + return [] + # go "upwards" from pos until there is a place we can go to the + # right + idx = len(pos) - 1 + while 0 <= idx and pos[idx] == len(current) - 1: + idx -= 1 + current = current.parent() + if idx < 0: + return [] + pos = list(pos[: idx + 1]) + pos[-1] += 1 + after = tree[pos] + return [after] + _leftmost_descendants(after) + + +def _tgrep_node_literal_value(node): + """ + Gets the string value of a given parse tree node, for comparison + using the tgrep node literal predicates. + """ + return node.label() if _istree(node) else str(node) + + +def _tgrep_macro_use_action(_s, _l, tokens): + """ + Builds a lambda function which looks up the macro name used. + """ + assert len(tokens) == 1 + assert tokens[0][0] == "@" + macro_name = tokens[0][1:] + + def macro_use(n, m=None, l=None): + if m is None or macro_name not in m: + raise TgrepException(f"macro {macro_name} not defined") + return m[macro_name](n, m, l) + + return macro_use + + +def _tgrep_node_action(_s, _l, tokens): + """ + Builds a lambda function representing a predicate on a tree node + depending on the name of its node. + """ + if tokens[0] == "'": + # strip initial apostrophe (tgrep2 print command) + tokens = tokens[1:] + if len(tokens) > 1: + # disjunctive definition of a node name + assert list(set(tokens[1::2])) == ["|"] + # recursively call self to interpret each node name definition + tokens = [_tgrep_node_action(None, None, [node]) for node in tokens[::2]] + # capture tokens and return the disjunction + return (lambda t: lambda n, m=None, l=None: any(f(n, m, l) for f in t))(tokens) + else: + if hasattr(tokens[0], "__call__"): + # this is a previously interpreted parenthetical node + # definition (lambda function) + return tokens[0] + elif tokens[0] == "*" or tokens[0] == "__": + return lambda n, m=None, l=None: True + elif tokens[0].startswith('"'): + assert tokens[0].endswith('"') + node_lit = tokens[0][1:-1].replace('\\"', '"').replace("\\\\", "\\") + return ( + lambda s: lambda n, m=None, l=None: _tgrep_node_literal_value(n) == s + )(node_lit) + elif tokens[0].startswith("/"): + assert tokens[0].endswith("/") + node_lit = tokens[0][1:-1] + return ( + lambda r: lambda n, m=None, l=None: r.search( + _tgrep_node_literal_value(n) + ) + )(re.compile(node_lit)) + elif tokens[0].startswith("i@"): + node_func = _tgrep_node_action(_s, _l, [tokens[0][2:].lower()]) + return ( + lambda f: lambda n, m=None, l=None: f( + _tgrep_node_literal_value(n).lower() + ) + )(node_func) + else: + return ( + lambda s: lambda n, m=None, l=None: _tgrep_node_literal_value(n) == s + )(tokens[0]) + + +def _tgrep_parens_action(_s, _l, tokens): + """ + Builds a lambda function representing a predicate on a tree node + from a parenthetical notation. + """ + assert len(tokens) == 3 + assert tokens[0] == "(" + assert tokens[2] == ")" + return tokens[1] + + +def _tgrep_nltk_tree_pos_action(_s, _l, tokens): + """ + Builds a lambda function representing a predicate on a tree node + which returns true if the node is located at a specific tree + position. + """ + # recover the tuple from the parsed string + node_tree_position = tuple(int(x) for x in tokens if x.isdigit()) + # capture the node's tree position + return ( + lambda i: lambda n, m=None, l=None: ( + hasattr(n, "treeposition") and n.treeposition() == i + ) + )(node_tree_position) + + +def _tgrep_relation_action(_s, _l, tokens): + """ + Builds a lambda function representing a predicate on a tree node + depending on its relation to other nodes in the tree. + """ + # process negation first if needed + negated = False + if tokens[0] == "!": + negated = True + tokens = tokens[1:] + if tokens[0] == "[": + # process square-bracketed relation expressions + assert len(tokens) == 3 + assert tokens[2] == "]" + retval = tokens[1] + else: + # process operator-node relation expressions + assert len(tokens) == 2 + operator, predicate = tokens + # A < B A is the parent of (immediately dominates) B. + if operator == "<": + retval = lambda n, m=None, l=None: ( + _istree(n) and any(predicate(x, m, l) for x in n) + ) + # A > B A is the child of B. + elif operator == ">": + retval = lambda n, m=None, l=None: ( + hasattr(n, "parent") + and bool(n.parent()) + and predicate(n.parent(), m, l) + ) + # A <, B Synonymous with A <1 B. + elif operator == "<," or operator == "<1": + retval = lambda n, m=None, l=None: ( + _istree(n) and bool(list(n)) and predicate(n[0], m, l) + ) + # A >, B Synonymous with A >1 B. + elif operator == ">," or operator == ">1": + retval = lambda n, m=None, l=None: ( + hasattr(n, "parent") + and bool(n.parent()) + and (n is n.parent()[0]) + and predicate(n.parent(), m, l) + ) + # A N B A is the Nth child of B (the first child is >1). + elif operator[0] == ">" and operator[1:].isdigit(): + idx = int(operator[1:]) + # capture the index parameter + retval = ( + lambda i: lambda n, m=None, l=None: ( + hasattr(n, "parent") + and bool(n.parent()) + and 0 <= i < len(n.parent()) + and (n is n.parent()[i]) + and predicate(n.parent(), m, l) + ) + )(idx - 1) + # A <' B B is the last child of A (also synonymous with A <-1 B). + # A <- B B is the last child of A (synonymous with A <-1 B). + elif operator == "<'" or operator == "<-" or operator == "<-1": + retval = lambda n, m=None, l=None: ( + _istree(n) and bool(list(n)) and predicate(n[-1], m, l) + ) + # A >' B A is the last child of B (also synonymous with A >-1 B). + # A >- B A is the last child of B (synonymous with A >-1 B). + elif operator == ">'" or operator == ">-" or operator == ">-1": + retval = lambda n, m=None, l=None: ( + hasattr(n, "parent") + and bool(n.parent()) + and (n is n.parent()[-1]) + and predicate(n.parent(), m, l) + ) + # A <-N B B is the N th-to-last child of A (the last child is <-1). + elif operator[:2] == "<-" and operator[2:].isdigit(): + idx = -int(operator[2:]) + # capture the index parameter + retval = ( + lambda i: lambda n, m=None, l=None: ( + _istree(n) + and bool(list(n)) + and 0 <= (i + len(n)) < len(n) + and predicate(n[i + len(n)], m, l) + ) + )(idx) + # A >-N B A is the N th-to-last child of B (the last child is >-1). + elif operator[:2] == ">-" and operator[2:].isdigit(): + idx = -int(operator[2:]) + # capture the index parameter + retval = ( + lambda i: lambda n, m=None, l=None: ( + hasattr(n, "parent") + and bool(n.parent()) + and 0 <= (i + len(n.parent())) < len(n.parent()) + and (n is n.parent()[i + len(n.parent())]) + and predicate(n.parent(), m, l) + ) + )(idx) + # A <: B B is the only child of A + elif operator == "<:": + retval = lambda n, m=None, l=None: ( + _istree(n) and len(n) == 1 and predicate(n[0], m, l) + ) + # A >: B A is the only child of B. + elif operator == ">:": + retval = lambda n, m=None, l=None: ( + hasattr(n, "parent") + and bool(n.parent()) + and len(n.parent()) == 1 + and predicate(n.parent(), m, l) + ) + # A << B A dominates B (A is an ancestor of B). + elif operator == "<<": + retval = lambda n, m=None, l=None: ( + _istree(n) and any(predicate(x, m, l) for x in _descendants(n)) + ) + # A >> B A is dominated by B (A is a descendant of B). + elif operator == ">>": + retval = lambda n, m=None, l=None: any( + predicate(x, m, l) for x in ancestors(n) + ) + # A <<, B B is a left-most descendant of A. + elif operator == "<<," or operator == "<<1": + retval = lambda n, m=None, l=None: ( + _istree(n) and any(predicate(x, m, l) for x in _leftmost_descendants(n)) + ) + # A >>, B A is a left-most descendant of B. + elif operator == ">>,": + retval = lambda n, m=None, l=None: any( + (predicate(x, m, l) and n in _leftmost_descendants(x)) + for x in ancestors(n) + ) + # A <<' B B is a right-most descendant of A. + elif operator == "<<'": + retval = lambda n, m=None, l=None: ( + _istree(n) + and any(predicate(x, m, l) for x in _rightmost_descendants(n)) + ) + # A >>' B A is a right-most descendant of B. + elif operator == ">>'": + retval = lambda n, m=None, l=None: any( + (predicate(x, m, l) and n in _rightmost_descendants(x)) + for x in ancestors(n) + ) + # A <<: B There is a single path of descent from A and B is on it. + elif operator == "<<:": + retval = lambda n, m=None, l=None: ( + _istree(n) and any(predicate(x, m, l) for x in _unique_descendants(n)) + ) + # A >>: B There is a single path of descent from B and A is on it. + elif operator == ">>:": + retval = lambda n, m=None, l=None: any( + predicate(x, m, l) for x in unique_ancestors(n) + ) + # A . B A immediately precedes B. + elif operator == ".": + retval = lambda n, m=None, l=None: any( + predicate(x, m, l) for x in _immediately_after(n) + ) + # A , B A immediately follows B. + elif operator == ",": + retval = lambda n, m=None, l=None: any( + predicate(x, m, l) for x in _immediately_before(n) + ) + # A .. B A precedes B. + elif operator == "..": + retval = lambda n, m=None, l=None: any( + predicate(x, m, l) for x in _after(n) + ) + # A ,, B A follows B. + elif operator == ",,": + retval = lambda n, m=None, l=None: any( + predicate(x, m, l) for x in _before(n) + ) + # A $ B A is a sister of B (and A != B). + elif operator == "$" or operator == "%": + retval = lambda n, m=None, l=None: ( + hasattr(n, "parent") + and bool(n.parent()) + and any(predicate(x, m, l) for x in n.parent() if x is not n) + ) + # A $. B A is a sister of and immediately precedes B. + elif operator == "$." or operator == "%.": + retval = lambda n, m=None, l=None: ( + hasattr(n, "right_sibling") + and bool(n.right_sibling()) + and predicate(n.right_sibling(), m, l) + ) + # A $, B A is a sister of and immediately follows B. + elif operator == "$," or operator == "%,": + retval = lambda n, m=None, l=None: ( + hasattr(n, "left_sibling") + and bool(n.left_sibling()) + and predicate(n.left_sibling(), m, l) + ) + # A $.. B A is a sister of and precedes B. + elif operator == "$.." or operator == "%..": + retval = lambda n, m=None, l=None: ( + hasattr(n, "parent") + and hasattr(n, "parent_index") + and bool(n.parent()) + and any(predicate(x, m, l) for x in n.parent()[n.parent_index() + 1 :]) + ) + # A $,, B A is a sister of and follows B. + elif operator == "$,," or operator == "%,,": + retval = lambda n, m=None, l=None: ( + hasattr(n, "parent") + and hasattr(n, "parent_index") + and bool(n.parent()) + and any(predicate(x, m, l) for x in n.parent()[: n.parent_index()]) + ) + else: + raise TgrepException(f'cannot interpret tgrep operator "{operator}"') + # now return the built function + if negated: + return (lambda r: (lambda n, m=None, l=None: not r(n, m, l)))(retval) + else: + return retval + + +def _tgrep_conjunction_action(_s, _l, tokens, join_char="&"): + """ + Builds a lambda function representing a predicate on a tree node + from the conjunction of several other such lambda functions. + + This is prototypically called for expressions like + (`tgrep_rel_conjunction`):: + + < NP & < AP < VP + + where tokens is a list of predicates representing the relations + (`< NP`, `< AP`, and `< VP`), possibly with the character `&` + included (as in the example here). + + This is also called for expressions like (`tgrep_node_expr2`):: + + NP < NN + S=s < /NP/=n : s < /VP/=v : n .. v + + tokens[0] is a tgrep_expr predicate; tokens[1:] are an (optional) + list of segmented patterns (`tgrep_expr_labeled`, processed by + `_tgrep_segmented_pattern_action`). + """ + # filter out the ampersand + tokens = [x for x in tokens if x != join_char] + if len(tokens) == 1: + return tokens[0] + else: + return ( + lambda ts: lambda n, m=None, l=None: all( + predicate(n, m, l) for predicate in ts + ) + )(tokens) + + +def _tgrep_segmented_pattern_action(_s, _l, tokens): + """ + Builds a lambda function representing a segmented pattern. + + Called for expressions like (`tgrep_expr_labeled`):: + + =s .. =v < =n + + This is a segmented pattern, a tgrep2 expression which begins with + a node label. + + The problem is that for segemented_pattern_action (': =v < =s'), + the first element (in this case, =v) is specifically selected by + virtue of matching a particular node in the tree; to retrieve + the node, we need the label, not a lambda function. For node + labels inside a tgrep_node_expr, we need a lambda function which + returns true if the node visited is the same as =v. + + We solve this by creating two copies of a node_label_use in the + grammar; the label use inside a tgrep_expr_labeled has a separate + parse action to the pred use inside a node_expr. See + `_tgrep_node_label_use_action` and + `_tgrep_node_label_pred_use_action`. + """ + # tokens[0] is a string containing the node label + node_label = tokens[0] + # tokens[1:] is an (optional) list of predicates which must all + # hold of the bound node + reln_preds = tokens[1:] + + def pattern_segment_pred(n, m=None, l=None): + """This predicate function ignores its node argument.""" + # look up the bound node using its label + if l is None or node_label not in l: + raise TgrepException(f"node_label ={node_label} not bound in pattern") + node = l[node_label] + # match the relation predicates against the node + return all(pred(node, m, l) for pred in reln_preds) + + return pattern_segment_pred + + +def _tgrep_node_label_use_action(_s, _l, tokens): + """ + Returns the node label used to begin a tgrep_expr_labeled. See + `_tgrep_segmented_pattern_action`. + + Called for expressions like (`tgrep_node_label_use`):: + + =s + + when they appear as the first element of a `tgrep_expr_labeled` + expression (see `_tgrep_segmented_pattern_action`). + + It returns the node label. + """ + assert len(tokens) == 1 + assert tokens[0].startswith("=") + return tokens[0][1:] + + +def _tgrep_node_label_pred_use_action(_s, _l, tokens): + """ + Builds a lambda function representing a predicate on a tree node + which describes the use of a previously bound node label. + + Called for expressions like (`tgrep_node_label_use_pred`):: + + =s + + when they appear inside a tgrep_node_expr (for example, inside a + relation). The predicate returns true if and only if its node + argument is identical the the node looked up in the node label + dictionary using the node's label. + """ + assert len(tokens) == 1 + assert tokens[0].startswith("=") + node_label = tokens[0][1:] + + def node_label_use_pred(n, m=None, l=None): + # look up the bound node using its label + if l is None or node_label not in l: + raise TgrepException(f"node_label ={node_label} not bound in pattern") + node = l[node_label] + # truth means the given node is this node + return n is node + + return node_label_use_pred + + +def _tgrep_bind_node_label_action(_s, _l, tokens): + """ + Builds a lambda function representing a predicate on a tree node + which can optionally bind a matching node into the tgrep2 string's + label_dict. + + Called for expressions like (`tgrep_node_expr2`):: + + /NP/ + @NP=n + """ + # tokens[0] is a tgrep_node_expr + if len(tokens) == 1: + return tokens[0] + else: + # if present, tokens[1] is the character '=', and tokens[2] is + # a tgrep_node_label, a string value containing the node label + assert len(tokens) == 3 + assert tokens[1] == "=" + node_pred = tokens[0] + node_label = tokens[2] + + def node_label_bind_pred(n, m=None, l=None): + if node_pred(n, m, l): + # bind `n` into the dictionary `l` + if l is None: + raise TgrepException( + "cannot bind node_label {}: label_dict is None".format( + node_label + ) + ) + l[node_label] = n + return True + else: + return False + + return node_label_bind_pred + + +def _tgrep_rel_disjunction_action(_s, _l, tokens): + """ + Builds a lambda function representing a predicate on a tree node + from the disjunction of several other such lambda functions. + """ + # filter out the pipe + tokens = [x for x in tokens if x != "|"] + if len(tokens) == 1: + return tokens[0] + elif len(tokens) == 2: + return (lambda a, b: lambda n, m=None, l=None: a(n, m, l) or b(n, m, l))( + tokens[0], tokens[1] + ) + + +def _macro_defn_action(_s, _l, tokens): + """ + Builds a dictionary structure which defines the given macro. + """ + assert len(tokens) == 3 + assert tokens[0] == "@" + return {tokens[1]: tokens[2]} + + +def _tgrep_exprs_action(_s, _l, tokens): + """ + This is the top-lebel node in a tgrep2 search string; the + predicate function it returns binds together all the state of a + tgrep2 search string. + + Builds a lambda function representing a predicate on a tree node + from the disjunction of several tgrep expressions. Also handles + macro definitions and macro name binding, and node label + definitions and node label binding. + """ + if len(tokens) == 1: + return lambda n, m=None, l=None: tokens[0](n, None, {}) + # filter out all the semicolons + tokens = [x for x in tokens if x != ";"] + # collect all macro definitions + macro_dict = {} + macro_defs = [tok for tok in tokens if isinstance(tok, dict)] + for macro_def in macro_defs: + macro_dict.update(macro_def) + # collect all tgrep expressions + tgrep_exprs = [tok for tok in tokens if not isinstance(tok, dict)] + # create a new scope for the node label dictionary + def top_level_pred(n, m=macro_dict, l=None): + label_dict = {} + # bind macro definitions and OR together all tgrep_exprs + return any(predicate(n, m, label_dict) for predicate in tgrep_exprs) + + return top_level_pred + + +def _build_tgrep_parser(set_parse_actions=True): + """ + Builds a pyparsing-based parser object for tokenizing and + interpreting tgrep search strings. + """ + tgrep_op = pyparsing.Optional("!") + pyparsing.Regex("[$%,.<>][%,.<>0-9-':]*") + tgrep_qstring = pyparsing.QuotedString( + quoteChar='"', escChar="\\", unquoteResults=False + ) + tgrep_node_regex = pyparsing.QuotedString( + quoteChar="/", escChar="\\", unquoteResults=False + ) + tgrep_qstring_icase = pyparsing.Regex('i@\\"(?:[^"\\n\\r\\\\]|(?:\\\\.))*\\"') + tgrep_node_regex_icase = pyparsing.Regex("i@\\/(?:[^/\\n\\r\\\\]|(?:\\\\.))*\\/") + tgrep_node_literal = pyparsing.Regex("[^][ \r\t\n;:.,&|<>()$!@%'^=]+") + tgrep_expr = pyparsing.Forward() + tgrep_relations = pyparsing.Forward() + tgrep_parens = pyparsing.Literal("(") + tgrep_expr + ")" + tgrep_nltk_tree_pos = ( + pyparsing.Literal("N(") + + pyparsing.Optional( + pyparsing.Word(pyparsing.nums) + + "," + + pyparsing.Optional( + pyparsing.delimitedList(pyparsing.Word(pyparsing.nums), delim=",") + + pyparsing.Optional(",") + ) + ) + + ")" + ) + tgrep_node_label = pyparsing.Regex("[A-Za-z0-9]+") + tgrep_node_label_use = pyparsing.Combine("=" + tgrep_node_label) + # see _tgrep_segmented_pattern_action + tgrep_node_label_use_pred = tgrep_node_label_use.copy() + macro_name = pyparsing.Regex("[^];:.,&|<>()[$!@%'^=\r\t\n ]+") + macro_name.setWhitespaceChars("") + macro_use = pyparsing.Combine("@" + macro_name) + tgrep_node_expr = ( + tgrep_node_label_use_pred + | macro_use + | tgrep_nltk_tree_pos + | tgrep_qstring_icase + | tgrep_node_regex_icase + | tgrep_qstring + | tgrep_node_regex + | "*" + | tgrep_node_literal + ) + tgrep_node_expr2 = ( + tgrep_node_expr + + pyparsing.Literal("=").setWhitespaceChars("") + + tgrep_node_label.copy().setWhitespaceChars("") + ) | tgrep_node_expr + tgrep_node = tgrep_parens | ( + pyparsing.Optional("'") + + tgrep_node_expr2 + + pyparsing.ZeroOrMore("|" + tgrep_node_expr) + ) + tgrep_brackets = pyparsing.Optional("!") + "[" + tgrep_relations + "]" + tgrep_relation = tgrep_brackets | (tgrep_op + tgrep_node) + tgrep_rel_conjunction = pyparsing.Forward() + tgrep_rel_conjunction << ( + tgrep_relation + + pyparsing.ZeroOrMore(pyparsing.Optional("&") + tgrep_rel_conjunction) + ) + tgrep_relations << tgrep_rel_conjunction + pyparsing.ZeroOrMore( + "|" + tgrep_relations + ) + tgrep_expr << tgrep_node + pyparsing.Optional(tgrep_relations) + tgrep_expr_labeled = tgrep_node_label_use + pyparsing.Optional(tgrep_relations) + tgrep_expr2 = tgrep_expr + pyparsing.ZeroOrMore(":" + tgrep_expr_labeled) + macro_defn = ( + pyparsing.Literal("@") + pyparsing.White().suppress() + macro_name + tgrep_expr2 + ) + tgrep_exprs = ( + pyparsing.Optional(macro_defn + pyparsing.ZeroOrMore(";" + macro_defn) + ";") + + tgrep_expr2 + + pyparsing.ZeroOrMore(";" + (macro_defn | tgrep_expr2)) + + pyparsing.ZeroOrMore(";").suppress() + ) + if set_parse_actions: + tgrep_node_label_use.setParseAction(_tgrep_node_label_use_action) + tgrep_node_label_use_pred.setParseAction(_tgrep_node_label_pred_use_action) + macro_use.setParseAction(_tgrep_macro_use_action) + tgrep_node.setParseAction(_tgrep_node_action) + tgrep_node_expr2.setParseAction(_tgrep_bind_node_label_action) + tgrep_parens.setParseAction(_tgrep_parens_action) + tgrep_nltk_tree_pos.setParseAction(_tgrep_nltk_tree_pos_action) + tgrep_relation.setParseAction(_tgrep_relation_action) + tgrep_rel_conjunction.setParseAction(_tgrep_conjunction_action) + tgrep_relations.setParseAction(_tgrep_rel_disjunction_action) + macro_defn.setParseAction(_macro_defn_action) + # the whole expression is also the conjunction of two + # predicates: the first node predicate, and the remaining + # relation predicates + tgrep_expr.setParseAction(_tgrep_conjunction_action) + tgrep_expr_labeled.setParseAction(_tgrep_segmented_pattern_action) + tgrep_expr2.setParseAction( + functools.partial(_tgrep_conjunction_action, join_char=":") + ) + tgrep_exprs.setParseAction(_tgrep_exprs_action) + return tgrep_exprs.ignore("#" + pyparsing.restOfLine) + + +def tgrep_tokenize(tgrep_string): + """ + Tokenizes a TGrep search string into separate tokens. + """ + parser = _build_tgrep_parser(False) + if isinstance(tgrep_string, bytes): + tgrep_string = tgrep_string.decode() + return list(parser.parseString(tgrep_string)) + + +def tgrep_compile(tgrep_string): + """ + Parses (and tokenizes, if necessary) a TGrep search string into a + lambda function. + """ + parser = _build_tgrep_parser(True) + if isinstance(tgrep_string, bytes): + tgrep_string = tgrep_string.decode() + return list(parser.parseString(tgrep_string, parseAll=True))[0] + + +def treepositions_no_leaves(tree): + """ + Returns all the tree positions in the given tree which are not + leaf nodes. + """ + treepositions = tree.treepositions() + # leaves are treeposition tuples that are not prefixes of any + # other treeposition + prefixes = set() + for pos in treepositions: + for length in range(len(pos)): + prefixes.add(pos[:length]) + return [pos for pos in treepositions if pos in prefixes] + + +def tgrep_positions(pattern, trees, search_leaves=True): + """ + Return the tree positions in the trees which match the given pattern. + + :param pattern: a tgrep search pattern + :type pattern: str or output of tgrep_compile() + :param trees: a sequence of NLTK trees (usually ParentedTrees) + :type trees: iter(ParentedTree) or iter(Tree) + :param search_leaves: whether to return matching leaf nodes + :type search_leaves: bool + :rtype: iter(tree positions) + """ + + if isinstance(pattern, (bytes, str)): + pattern = tgrep_compile(pattern) + + for tree in trees: + try: + if search_leaves: + positions = tree.treepositions() + else: + positions = treepositions_no_leaves(tree) + yield [position for position in positions if pattern(tree[position])] + except AttributeError: + yield [] + + +def tgrep_nodes(pattern, trees, search_leaves=True): + """ + Return the tree nodes in the trees which match the given pattern. + + :param pattern: a tgrep search pattern + :type pattern: str or output of tgrep_compile() + :param trees: a sequence of NLTK trees (usually ParentedTrees) + :type trees: iter(ParentedTree) or iter(Tree) + :param search_leaves: whether to return matching leaf nodes + :type search_leaves: bool + :rtype: iter(tree nodes) + """ + + if isinstance(pattern, (bytes, str)): + pattern = tgrep_compile(pattern) + + for tree in trees: + try: + if search_leaves: + positions = tree.treepositions() + else: + positions = treepositions_no_leaves(tree) + yield [tree[position] for position in positions if pattern(tree[position])] + except AttributeError: + yield [] diff --git a/lib/python3.10/site-packages/nltk/toolbox.py b/lib/python3.10/site-packages/nltk/toolbox.py new file mode 100644 index 0000000000000000000000000000000000000000..40155cbaec4f2554a26e1762f7b86bd7eeefb5b9 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/toolbox.py @@ -0,0 +1,524 @@ +# Natural Language Toolkit: Toolbox Reader +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Greg Aumann +# URL: +# For license information, see LICENSE.TXT + +""" +Module for reading, writing and manipulating +Toolbox databases and settings files. +""" + +import codecs +import re +from io import StringIO +from xml.etree.ElementTree import Element, ElementTree, SubElement, TreeBuilder + +from nltk.data import PathPointer, find + + +class StandardFormat: + """ + Class for reading and processing standard format marker files and strings. + """ + + def __init__(self, filename=None, encoding=None): + self._encoding = encoding + if filename is not None: + self.open(filename) + + def open(self, sfm_file): + """ + Open a standard format marker file for sequential reading. + + :param sfm_file: name of the standard format marker input file + :type sfm_file: str + """ + if isinstance(sfm_file, PathPointer): + self._file = sfm_file.open(self._encoding) + else: + self._file = codecs.open(sfm_file, "r", self._encoding) + + def open_string(self, s): + """ + Open a standard format marker string for sequential reading. + + :param s: string to parse as a standard format marker input file + :type s: str + """ + self._file = StringIO(s) + + def raw_fields(self): + """ + Return an iterator that returns the next field in a (marker, value) + tuple. Linebreaks and trailing white space are preserved except + for the final newline in each field. + + :rtype: iter(tuple(str, str)) + """ + join_string = "\n" + line_regexp = r"^%s(?:\\(\S+)\s*)?(.*)$" + # discard a BOM in the first line + first_line_pat = re.compile(line_regexp % "(?:\xef\xbb\xbf)?") + line_pat = re.compile(line_regexp % "") + # need to get first line outside the loop for correct handling + # of the first marker if it spans multiple lines + file_iter = iter(self._file) + # PEP 479, prevent RuntimeError when StopIteration is raised inside generator + try: + line = next(file_iter) + except StopIteration: + # no more data is available, terminate the generator + return + mobj = re.match(first_line_pat, line) + mkr, line_value = mobj.groups() + value_lines = [line_value] + self.line_num = 0 + for line in file_iter: + self.line_num += 1 + mobj = re.match(line_pat, line) + line_mkr, line_value = mobj.groups() + if line_mkr: + yield (mkr, join_string.join(value_lines)) + mkr = line_mkr + value_lines = [line_value] + else: + value_lines.append(line_value) + self.line_num += 1 + yield (mkr, join_string.join(value_lines)) + + def fields( + self, + strip=True, + unwrap=True, + encoding=None, + errors="strict", + unicode_fields=None, + ): + """ + Return an iterator that returns the next field in a ``(marker, value)`` + tuple, where ``marker`` and ``value`` are unicode strings if an ``encoding`` + was specified in the ``fields()`` method. Otherwise they are non-unicode strings. + + :param strip: strip trailing whitespace from the last line of each field + :type strip: bool + :param unwrap: Convert newlines in a field to spaces. + :type unwrap: bool + :param encoding: Name of an encoding to use. If it is specified then + the ``fields()`` method returns unicode strings rather than non + unicode strings. + :type encoding: str or None + :param errors: Error handling scheme for codec. Same as the ``decode()`` + builtin string method. + :type errors: str + :param unicode_fields: Set of marker names whose values are UTF-8 encoded. + Ignored if encoding is None. If the whole file is UTF-8 encoded set + ``encoding='utf8'`` and leave ``unicode_fields`` with its default + value of None. + :type unicode_fields: sequence + :rtype: iter(tuple(str, str)) + """ + if encoding is None and unicode_fields is not None: + raise ValueError("unicode_fields is set but not encoding.") + unwrap_pat = re.compile(r"\n+") + for mkr, val in self.raw_fields(): + if unwrap: + val = unwrap_pat.sub(" ", val) + if strip: + val = val.rstrip() + yield (mkr, val) + + def close(self): + """Close a previously opened standard format marker file or string.""" + self._file.close() + try: + del self.line_num + except AttributeError: + pass + + +class ToolboxData(StandardFormat): + def parse(self, grammar=None, **kwargs): + if grammar: + return self._chunk_parse(grammar=grammar, **kwargs) + else: + return self._record_parse(**kwargs) + + def _record_parse(self, key=None, **kwargs): + r""" + Returns an element tree structure corresponding to a toolbox data file with + all markers at the same level. + + Thus the following Toolbox database:: + \_sh v3.0 400 Rotokas Dictionary + \_DateStampHasFourDigitYear + + \lx kaa + \ps V.A + \ge gag + \gp nek i pas + + \lx kaa + \ps V.B + \ge strangle + \gp pasim nek + + after parsing will end up with the same structure (ignoring the extra + whitespace) as the following XML fragment after being parsed by + ElementTree:: + +
+ <_sh>v3.0 400 Rotokas Dictionary + <_DateStampHasFourDigitYear/> +
+ + + kaa + V.A + gag + nek i pas + + + + kaa + V.B + strangle + pasim nek + +
+ + :param key: Name of key marker at the start of each record. If set to + None (the default value) the first marker that doesn't begin with + an underscore is assumed to be the key. + :type key: str + :param kwargs: Keyword arguments passed to ``StandardFormat.fields()`` + :type kwargs: dict + :rtype: ElementTree._ElementInterface + :return: contents of toolbox data divided into header and records + """ + builder = TreeBuilder() + builder.start("toolbox_data", {}) + builder.start("header", {}) + in_records = False + for mkr, value in self.fields(**kwargs): + if key is None and not in_records and mkr[0] != "_": + key = mkr + if mkr == key: + if in_records: + builder.end("record") + else: + builder.end("header") + in_records = True + builder.start("record", {}) + builder.start(mkr, {}) + builder.data(value) + builder.end(mkr) + if in_records: + builder.end("record") + else: + builder.end("header") + builder.end("toolbox_data") + return builder.close() + + def _tree2etree(self, parent): + from nltk.tree import Tree + + root = Element(parent.label()) + for child in parent: + if isinstance(child, Tree): + root.append(self._tree2etree(child)) + else: + text, tag = child + e = SubElement(root, tag) + e.text = text + return root + + def _chunk_parse(self, grammar=None, root_label="record", trace=0, **kwargs): + """ + Returns an element tree structure corresponding to a toolbox data file + parsed according to the chunk grammar. + + :type grammar: str + :param grammar: Contains the chunking rules used to parse the + database. See ``chunk.RegExp`` for documentation. + :type root_label: str + :param root_label: The node value that should be used for the + top node of the chunk structure. + :type trace: int + :param trace: The level of tracing that should be used when + parsing a text. ``0`` will generate no tracing output; + ``1`` will generate normal tracing output; and ``2`` or + higher will generate verbose tracing output. + :type kwargs: dict + :param kwargs: Keyword arguments passed to ``toolbox.StandardFormat.fields()`` + :rtype: ElementTree._ElementInterface + """ + from nltk import chunk + from nltk.tree import Tree + + cp = chunk.RegexpParser(grammar, root_label=root_label, trace=trace) + db = self.parse(**kwargs) + tb_etree = Element("toolbox_data") + header = db.find("header") + tb_etree.append(header) + for record in db.findall("record"): + parsed = cp.parse([(elem.text, elem.tag) for elem in record]) + tb_etree.append(self._tree2etree(parsed)) + return tb_etree + + +_is_value = re.compile(r"\S") + + +def to_sfm_string(tree, encoding=None, errors="strict", unicode_fields=None): + """ + Return a string with a standard format representation of the toolbox + data in tree (tree can be a toolbox database or a single record). + + :param tree: flat representation of toolbox data (whole database or single record) + :type tree: ElementTree._ElementInterface + :param encoding: Name of an encoding to use. + :type encoding: str + :param errors: Error handling scheme for codec. Same as the ``encode()`` + builtin string method. + :type errors: str + :param unicode_fields: + :type unicode_fields: dict(str) or set(str) + :rtype: str + """ + if tree.tag == "record": + root = Element("toolbox_data") + root.append(tree) + tree = root + + if tree.tag != "toolbox_data": + raise ValueError("not a toolbox_data element structure") + if encoding is None and unicode_fields is not None: + raise ValueError( + "if encoding is not specified then neither should unicode_fields" + ) + l = [] + for rec in tree: + l.append("\n") + for field in rec: + mkr = field.tag + value = field.text + if encoding is not None: + if unicode_fields is not None and mkr in unicode_fields: + cur_encoding = "utf8" + else: + cur_encoding = encoding + if re.search(_is_value, value): + l.append((f"\\{mkr} {value}\n").encode(cur_encoding, errors)) + else: + l.append((f"\\{mkr}{value}\n").encode(cur_encoding, errors)) + else: + if re.search(_is_value, value): + l.append(f"\\{mkr} {value}\n") + else: + l.append(f"\\{mkr}{value}\n") + return "".join(l[1:]) + + +class ToolboxSettings(StandardFormat): + """This class is the base class for settings files.""" + + def __init__(self): + super().__init__() + + def parse(self, encoding=None, errors="strict", **kwargs): + """ + Return the contents of toolbox settings file with a nested structure. + + :param encoding: encoding used by settings file + :type encoding: str + :param errors: Error handling scheme for codec. Same as ``decode()`` builtin method. + :type errors: str + :param kwargs: Keyword arguments passed to ``StandardFormat.fields()`` + :type kwargs: dict + :rtype: ElementTree._ElementInterface + """ + builder = TreeBuilder() + for mkr, value in self.fields(encoding=encoding, errors=errors, **kwargs): + # Check whether the first char of the field marker + # indicates a block start (+) or end (-) + block = mkr[0] + if block in ("+", "-"): + mkr = mkr[1:] + else: + block = None + # Build tree on the basis of block char + if block == "+": + builder.start(mkr, {}) + builder.data(value) + elif block == "-": + builder.end(mkr) + else: + builder.start(mkr, {}) + builder.data(value) + builder.end(mkr) + return builder.close() + + +def to_settings_string(tree, encoding=None, errors="strict", unicode_fields=None): + # write XML to file + l = list() + _to_settings_string( + tree.getroot(), + l, + encoding=encoding, + errors=errors, + unicode_fields=unicode_fields, + ) + return "".join(l) + + +def _to_settings_string(node, l, **kwargs): + # write XML to file + tag = node.tag + text = node.text + if len(node) == 0: + if text: + l.append(f"\\{tag} {text}\n") + else: + l.append("\\%s\n" % tag) + else: + if text: + l.append(f"\\+{tag} {text}\n") + else: + l.append("\\+%s\n" % tag) + for n in node: + _to_settings_string(n, l, **kwargs) + l.append("\\-%s\n" % tag) + return + + +def remove_blanks(elem): + """ + Remove all elements and subelements with no text and no child elements. + + :param elem: toolbox data in an elementtree structure + :type elem: ElementTree._ElementInterface + """ + out = list() + for child in elem: + remove_blanks(child) + if child.text or len(child) > 0: + out.append(child) + elem[:] = out + + +def add_default_fields(elem, default_fields): + """ + Add blank elements and subelements specified in default_fields. + + :param elem: toolbox data in an elementtree structure + :type elem: ElementTree._ElementInterface + :param default_fields: fields to add to each type of element and subelement + :type default_fields: dict(tuple) + """ + for field in default_fields.get(elem.tag, []): + if elem.find(field) is None: + SubElement(elem, field) + for child in elem: + add_default_fields(child, default_fields) + + +def sort_fields(elem, field_orders): + """ + Sort the elements and subelements in order specified in field_orders. + + :param elem: toolbox data in an elementtree structure + :type elem: ElementTree._ElementInterface + :param field_orders: order of fields for each type of element and subelement + :type field_orders: dict(tuple) + """ + order_dicts = dict() + for field, order in field_orders.items(): + order_dicts[field] = order_key = dict() + for i, subfield in enumerate(order): + order_key[subfield] = i + _sort_fields(elem, order_dicts) + + +def _sort_fields(elem, orders_dicts): + """sort the children of elem""" + try: + order = orders_dicts[elem.tag] + except KeyError: + pass + else: + tmp = sorted( + ((order.get(child.tag, 1e9), i), child) for i, child in enumerate(elem) + ) + elem[:] = [child for key, child in tmp] + for child in elem: + if len(child): + _sort_fields(child, orders_dicts) + + +def add_blank_lines(tree, blanks_before, blanks_between): + """ + Add blank lines before all elements and subelements specified in blank_before. + + :param elem: toolbox data in an elementtree structure + :type elem: ElementTree._ElementInterface + :param blank_before: elements and subelements to add blank lines before + :type blank_before: dict(tuple) + """ + try: + before = blanks_before[tree.tag] + between = blanks_between[tree.tag] + except KeyError: + for elem in tree: + if len(elem): + add_blank_lines(elem, blanks_before, blanks_between) + else: + last_elem = None + for elem in tree: + tag = elem.tag + if last_elem is not None and last_elem.tag != tag: + if tag in before and last_elem is not None: + e = last_elem.getiterator()[-1] + e.text = (e.text or "") + "\n" + else: + if tag in between: + e = last_elem.getiterator()[-1] + e.text = (e.text or "") + "\n" + if len(elem): + add_blank_lines(elem, blanks_before, blanks_between) + last_elem = elem + + +def demo(): + from itertools import islice + + # zip_path = find('corpora/toolbox.zip') + # lexicon = ToolboxData(ZipFilePathPointer(zip_path, 'toolbox/rotokas.dic')).parse() + file_path = find("corpora/toolbox/rotokas.dic") + lexicon = ToolboxData(file_path).parse() + print("first field in fourth record:") + print(lexicon[3][0].tag) + print(lexicon[3][0].text) + + print("\nfields in sequential order:") + for field in islice(lexicon.find("record"), 10): + print(field.tag, field.text) + + print("\nlx fields:") + for field in islice(lexicon.findall("record/lx"), 10): + print(field.text) + + settings = ToolboxSettings() + file_path = find("corpora/toolbox/MDF/MDF_AltH.typ") + settings.open(file_path) + # settings.open(ZipFilePathPointer(zip_path, entry='toolbox/MDF/MDF_AltH.typ')) + tree = settings.parse(unwrap=False, encoding="cp1252") + print(tree.find("expset/expMDF/rtfPageSetup/paperSize").text) + settings_tree = ElementTree(tree) + print(to_settings_string(settings_tree).encode("utf8")) + + +if __name__ == "__main__": + demo() diff --git a/lib/python3.10/site-packages/nltk/treeprettyprinter.py b/lib/python3.10/site-packages/nltk/treeprettyprinter.py new file mode 100644 index 0000000000000000000000000000000000000000..ed4e766b47c123d66b7df326cbfde26a2db99363 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/treeprettyprinter.py @@ -0,0 +1,28 @@ +# Natural Language Toolkit: ASCII visualization of NLTK trees +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Andreas van Cranenburgh +# Peter Ljunglöf +# URL: +# For license information, see LICENSE.TXT + +""" +Pretty-printing of discontinuous trees. +Adapted from the disco-dop project, by Andreas van Cranenburgh. +https://github.com/andreasvc/disco-dop + +Interesting reference (not used for this code): +T. Eschbach et al., Orth. Hypergraph Drawing, Journal of +Graph Algorithms and Applications, 10(2) 141--157 (2006)149. +https://jgaa.info/accepted/2006/EschbachGuentherBecker2006.10.2.pdf +""" + +from nltk.internals import Deprecated +from nltk.tree.prettyprinter import TreePrettyPrinter as TPP + + +class TreePrettyPrinter(Deprecated, TPP): + """Import `TreePrettyPrinter` using `from nltk.tree import TreePrettyPrinter` instead.""" + + +__all__ = ["TreePrettyPrinter"] diff --git a/lib/python3.10/site-packages/nltk/treetransforms.py b/lib/python3.10/site-packages/nltk/treetransforms.py new file mode 100644 index 0000000000000000000000000000000000000000..6ebc061f321c701c7851370cd00cacb4499a256c --- /dev/null +++ b/lib/python3.10/site-packages/nltk/treetransforms.py @@ -0,0 +1,126 @@ +# Natural Language Toolkit: Tree Transformations +# +# Copyright (C) 2005-2007 Oregon Graduate Institute +# Author: Nathan Bodenstab +# URL: +# For license information, see LICENSE.TXT + +r""" +A collection of methods for tree (grammar) transformations used +in parsing natural language. + +Although many of these methods are technically grammar transformations +(ie. Chomsky Norm Form), when working with treebanks it is much more +natural to visualize these modifications in a tree structure. Hence, +we will do all transformation directly to the tree itself. +Transforming the tree directly also allows us to do parent annotation. +A grammar can then be simply induced from the modified tree. + +The following is a short tutorial on the available transformations. + + 1. Chomsky Normal Form (binarization) + + It is well known that any grammar has a Chomsky Normal Form (CNF) + equivalent grammar where CNF is defined by every production having + either two non-terminals or one terminal on its right hand side. + When we have hierarchically structured data (ie. a treebank), it is + natural to view this in terms of productions where the root of every + subtree is the head (left hand side) of the production and all of + its children are the right hand side constituents. In order to + convert a tree into CNF, we simply need to ensure that every subtree + has either two subtrees as children (binarization), or one leaf node + (non-terminal). In order to binarize a subtree with more than two + children, we must introduce artificial nodes. + + There are two popular methods to convert a tree into CNF: left + factoring and right factoring. The following example demonstrates + the difference between them. Example:: + + Original Right-Factored Left-Factored + + A A A + / | \ / \ / \ + B C D ==> B A| OR A| D + / \ / \ + C D B C + + 2. Parent Annotation + + In addition to binarizing the tree, there are two standard + modifications to node labels we can do in the same traversal: parent + annotation and Markov order-N smoothing (or sibling smoothing). + + The purpose of parent annotation is to refine the probabilities of + productions by adding a small amount of context. With this simple + addition, a CYK (inside-outside, dynamic programming chart parse) + can improve from 74% to 79% accuracy. A natural generalization from + parent annotation is to grandparent annotation and beyond. The + tradeoff becomes accuracy gain vs. computational complexity. We + must also keep in mind data sparcity issues. Example:: + + Original Parent Annotation + + A A^ + / | \ / \ + B C D ==> B^
A|^ where ? is the + / \ parent of A + C^ D^ + + + 3. Markov order-N smoothing + + Markov smoothing combats data sparcity issues as well as decreasing + computational requirements by limiting the number of children + included in artificial nodes. In practice, most people use an order + 2 grammar. Example:: + + Original No Smoothing Markov order 1 Markov order 2 etc. + + __A__ A A A + / /|\ \ / \ / \ / \ + B C D E F ==> B A| ==> B A| ==> B A| + / \ / \ / \ + C ... C ... C ... + + + + Annotation decisions can be thought about in the vertical direction + (parent, grandparent, etc) and the horizontal direction (number of + siblings to keep). Parameters to the following functions specify + these values. For more information see: + + Dan Klein and Chris Manning (2003) "Accurate Unlexicalized + Parsing", ACL-03. https://www.aclweb.org/anthology/P03-1054 + + 4. Unary Collapsing + + Collapse unary productions (ie. subtrees with a single child) into a + new non-terminal (Tree node). This is useful when working with + algorithms that do not allow unary productions, yet you do not wish + to lose the parent information. Example:: + + A + | + B ==> A+B + / \ / \ + C D C D + +""" + +from nltk.internals import deprecated +from nltk.tree.transforms import chomsky_normal_form as cnf +from nltk.tree.transforms import collapse_unary as cu +from nltk.tree.transforms import un_chomsky_normal_form as ucnf + +chomsky_normal_form = deprecated( + "Import using `from nltk.tree import chomsky_normal_form` instead." +)(cnf) +un_chomsky_normal_form = deprecated( + "Import using `from nltk.tree import un_chomsky_normal_form` instead." +)(ucnf) +collapse_unary = deprecated( + "Import using `from nltk.tree import collapse_unary` instead." +)(cu) + + +__all__ = ["chomsky_normal_form", "un_chomsky_normal_form", "collapse_unary"] diff --git a/lib/python3.10/site-packages/nltk/util.py b/lib/python3.10/site-packages/nltk/util.py new file mode 100644 index 0000000000000000000000000000000000000000..4d2d96fb74f2ec375596ae8761f565351cbedf31 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/util.py @@ -0,0 +1,1216 @@ +# Natural Language Toolkit: Utility functions +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Steven Bird +# Eric Kafe (acyclic closures) +# URL: +# For license information, see LICENSE.TXT + +import inspect +import locale +import os +import pydoc +import re +import textwrap +import warnings +from collections import defaultdict, deque +from itertools import chain, combinations, islice, tee +from pprint import pprint +from urllib.request import ( + HTTPPasswordMgrWithDefaultRealm, + ProxyBasicAuthHandler, + ProxyDigestAuthHandler, + ProxyHandler, + build_opener, + getproxies, + install_opener, +) + +from nltk.collections import * +from nltk.internals import deprecated, raise_unorderable_types, slice_bounds + +###################################################################### +# Short usage message +###################################################################### + + +@deprecated("Use help(obj) instead.") +def usage(obj): + str(obj) # In case it's lazy, this will load it. + + if not isinstance(obj, type): + obj = obj.__class__ + + print(f"{obj.__name__} supports the following operations:") + for (name, method) in sorted(pydoc.allmethods(obj).items()): + if name.startswith("_"): + continue + if getattr(method, "__deprecated__", False): + continue + + try: + sig = str(inspect.signature(method)) + except ValueError as e: + # builtins sometimes don't support introspection + if "builtin" in str(e): + continue + else: + raise + + args = sig.lstrip("(").rstrip(")").split(", ") + meth = inspect.getattr_static(obj, name) + if isinstance(meth, (classmethod, staticmethod)): + name = f"cls.{name}" + elif args and args[0] == "self": + name = f"self.{name}" + args.pop(0) + print( + textwrap.fill( + f"{name}({', '.join(args)})", + initial_indent=" - ", + subsequent_indent=" " * (len(name) + 5), + ) + ) + + +########################################################################## +# IDLE +########################################################################## + + +def in_idle(): + """ + Return True if this function is run within idle. Tkinter + programs that are run in idle should never call ``Tk.mainloop``; so + this function should be used to gate all calls to ``Tk.mainloop``. + + :warning: This function works by checking ``sys.stdin``. If the + user has modified ``sys.stdin``, then it may return incorrect + results. + :rtype: bool + """ + import sys + + return sys.stdin.__class__.__name__ in ("PyShell", "RPCProxy") + + +########################################################################## +# PRETTY PRINTING +########################################################################## + + +def pr(data, start=0, end=None): + """ + Pretty print a sequence of data items + + :param data: the data stream to print + :type data: sequence or iter + :param start: the start position + :type start: int + :param end: the end position + :type end: int + """ + pprint(list(islice(data, start, end))) + + +def print_string(s, width=70): + """ + Pretty print a string, breaking lines on whitespace + + :param s: the string to print, consisting of words and spaces + :type s: str + :param width: the display width + :type width: int + """ + print("\n".join(textwrap.wrap(s, width=width))) + + +def tokenwrap(tokens, separator=" ", width=70): + """ + Pretty print a list of text tokens, breaking lines on whitespace + + :param tokens: the tokens to print + :type tokens: list + :param separator: the string to use to separate tokens + :type separator: str + :param width: the display width (default=70) + :type width: int + """ + return "\n".join(textwrap.wrap(separator.join(tokens), width=width)) + + +########################################################################## +# Indexing +########################################################################## + + +class Index(defaultdict): + def __init__(self, pairs): + defaultdict.__init__(self, list) + for key, value in pairs: + self[key].append(value) + + +###################################################################### +## Regexp display (thanks to David Mertz) +###################################################################### + + +def re_show(regexp, string, left="{", right="}"): + """ + Return a string with markers surrounding the matched substrings. + Search str for substrings matching ``regexp`` and wrap the matches + with braces. This is convenient for learning about regular expressions. + + :param regexp: The regular expression. + :type regexp: str + :param string: The string being matched. + :type string: str + :param left: The left delimiter (printed before the matched substring) + :type left: str + :param right: The right delimiter (printed after the matched substring) + :type right: str + :rtype: str + """ + print(re.compile(regexp, re.M).sub(left + r"\g<0>" + right, string.rstrip())) + + +########################################################################## +# READ FROM FILE OR STRING +########################################################################## + +# recipe from David Mertz +def filestring(f): + if hasattr(f, "read"): + return f.read() + elif isinstance(f, str): + with open(f) as infile: + return infile.read() + else: + raise ValueError("Must be called with a filename or file-like object") + + +########################################################################## +# Breadth-First Search +########################################################################## + + +def breadth_first(tree, children=iter, maxdepth=-1): + """Traverse the nodes of a tree in breadth-first order. + (No check for cycles.) + The first argument should be the tree root; + children should be a function taking as argument a tree node + and returning an iterator of the node's children. + """ + queue = deque([(tree, 0)]) + + while queue: + node, depth = queue.popleft() + yield node + + if depth != maxdepth: + try: + queue.extend((c, depth + 1) for c in children(node)) + except TypeError: + pass + + +########################################################################## +# Graph Drawing +########################################################################## + + +def edge_closure(tree, children=iter, maxdepth=-1, verbose=False): + """Yield the edges of a graph in breadth-first order, + discarding eventual cycles. + The first argument should be the start node; + children should be a function taking as argument a graph node + and returning an iterator of the node's children. + + >>> from nltk.util import edge_closure + >>> print(list(edge_closure('A', lambda node:{'A':['B','C'], 'B':'C', 'C':'B'}[node]))) + [('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'B')] + """ + traversed = set() + edges = set() + queue = deque([(tree, 0)]) + while queue: + node, depth = queue.popleft() + traversed.add(node) + if depth != maxdepth: + try: + for child in children(node): + if child not in traversed: + queue.append((child, depth + 1)) + else: + if verbose: + warnings.warn( + f"Discarded redundant search for {child} at depth {depth + 1}", + stacklevel=2, + ) + edge = (node, child) + if edge not in edges: + yield edge + edges.add(edge) + except TypeError: + pass + + +def edges2dot(edges, shapes=None, attr=None): + """ + :param edges: the set (or list) of edges of a directed graph. + + :return dot_string: a representation of 'edges' as a string in the DOT + graph language, which can be converted to an image by the 'dot' program + from the Graphviz package, or nltk.parse.dependencygraph.dot2img(dot_string). + + :param shapes: dictionary of strings that trigger a specified shape. + :param attr: dictionary with global graph attributes + + >>> import nltk + >>> from nltk.util import edges2dot + >>> print(edges2dot([('A', 'B'), ('A', 'C'), ('B', 'C'), ('C', 'B')])) + digraph G { + "A" -> "B"; + "A" -> "C"; + "B" -> "C"; + "C" -> "B"; + } + + """ + if not shapes: + shapes = dict() + if not attr: + attr = dict() + + dot_string = "digraph G {\n" + + for pair in attr.items(): + dot_string += f"{pair[0]} = {pair[1]};\n" + + for edge in edges: + for shape in shapes.items(): + for node in range(2): + if shape[0] in repr(edge[node]): + dot_string += f'"{edge[node]}" [shape = {shape[1]}];\n' + dot_string += f'"{edge[0]}" -> "{edge[1]}";\n' + + dot_string += "}\n" + return dot_string + + +def unweighted_minimum_spanning_digraph(tree, children=iter, shapes=None, attr=None): + """ + + Build a Minimum Spanning Tree (MST) of an unweighted graph, + by traversing the nodes of a tree in breadth-first order, + discarding eventual cycles. + + Return a representation of this MST as a string in the DOT graph language, + which can be converted to an image by the 'dot' program from the Graphviz + package, or nltk.parse.dependencygraph.dot2img(dot_string). + + The first argument should be the tree root; + children should be a function taking as argument a tree node + and returning an iterator of the node's children. + + >>> import nltk + >>> wn=nltk.corpus.wordnet + >>> from nltk.util import unweighted_minimum_spanning_digraph as umsd + >>> print(umsd(wn.synset('bound.a.01'), lambda s:s.also_sees())) + digraph G { + "Synset('bound.a.01')" -> "Synset('unfree.a.02')"; + "Synset('unfree.a.02')" -> "Synset('confined.a.02')"; + "Synset('unfree.a.02')" -> "Synset('dependent.a.01')"; + "Synset('unfree.a.02')" -> "Synset('restricted.a.01')"; + "Synset('restricted.a.01')" -> "Synset('classified.a.02')"; + } + + """ + return edges2dot( + edge_closure( + tree, lambda node: unweighted_minimum_spanning_dict(tree, children)[node] + ), + shapes, + attr, + ) + + +########################################################################## +# Breadth-First / Depth-first Searches with Cycle Detection +########################################################################## + + +def acyclic_breadth_first(tree, children=iter, maxdepth=-1): + """Traverse the nodes of a tree in breadth-first order, + discarding eventual cycles. + + The first argument should be the tree root; + children should be a function taking as argument a tree node + and returning an iterator of the node's children. + """ + traversed = set() + queue = deque([(tree, 0)]) + while queue: + node, depth = queue.popleft() + yield node + traversed.add(node) + if depth != maxdepth: + try: + for child in children(node): + if child not in traversed: + queue.append((child, depth + 1)) + else: + warnings.warn( + "Discarded redundant search for {} at depth {}".format( + child, depth + 1 + ), + stacklevel=2, + ) + except TypeError: + pass + + +def acyclic_depth_first(tree, children=iter, depth=-1, cut_mark=None, traversed=None): + """Traverse the nodes of a tree in depth-first order, + discarding eventual cycles within any branch, + adding cut_mark (when specified) if cycles were truncated. + + The first argument should be the tree root; + children should be a function taking as argument a tree node + and returning an iterator of the node's children. + + Catches all cycles: + + >>> import nltk + >>> from nltk.util import acyclic_depth_first as acyclic_tree + >>> wn=nltk.corpus.wordnet + >>> from pprint import pprint + >>> pprint(acyclic_tree(wn.synset('dog.n.01'), lambda s:s.hypernyms(),cut_mark='...')) + [Synset('dog.n.01'), + [Synset('canine.n.02'), + [Synset('carnivore.n.01'), + [Synset('placental.n.01'), + [Synset('mammal.n.01'), + [Synset('vertebrate.n.01'), + [Synset('chordate.n.01'), + [Synset('animal.n.01'), + [Synset('organism.n.01'), + [Synset('living_thing.n.01'), + [Synset('whole.n.02'), + [Synset('object.n.01'), + [Synset('physical_entity.n.01'), + [Synset('entity.n.01')]]]]]]]]]]]]], + [Synset('domestic_animal.n.01'), "Cycle(Synset('animal.n.01'),-3,...)"]] + """ + if traversed is None: + traversed = {tree} + out_tree = [tree] + if depth != 0: + try: + for child in children(tree): + if child not in traversed: + # Recurse with a common "traversed" set for all children: + traversed.add(child) + out_tree += [ + acyclic_depth_first( + child, children, depth - 1, cut_mark, traversed + ) + ] + else: + warnings.warn( + "Discarded redundant search for {} at depth {}".format( + child, depth - 1 + ), + stacklevel=3, + ) + if cut_mark: + out_tree += [f"Cycle({child},{depth - 1},{cut_mark})"] + except TypeError: + pass + elif cut_mark: + out_tree += [cut_mark] + return out_tree + + +def acyclic_branches_depth_first( + tree, children=iter, depth=-1, cut_mark=None, traversed=None +): + """Traverse the nodes of a tree in depth-first order, + discarding eventual cycles within the same branch, + but keep duplicate paths in different branches. + Add cut_mark (when defined) if cycles were truncated. + + The first argument should be the tree root; + children should be a function taking as argument a tree node + and returning an iterator of the node's children. + + Catches only only cycles within the same branch, + but keeping cycles from different branches: + + >>> import nltk + >>> from nltk.util import acyclic_branches_depth_first as tree + >>> wn=nltk.corpus.wordnet + >>> from pprint import pprint + >>> pprint(tree(wn.synset('certified.a.01'), lambda s:s.also_sees(), cut_mark='...', depth=4)) + [Synset('certified.a.01'), + [Synset('authorized.a.01'), + [Synset('lawful.a.01'), + [Synset('legal.a.01'), + "Cycle(Synset('lawful.a.01'),0,...)", + [Synset('legitimate.a.01'), '...']], + [Synset('straight.a.06'), + [Synset('honest.a.01'), '...'], + "Cycle(Synset('lawful.a.01'),0,...)"]], + [Synset('legitimate.a.01'), + "Cycle(Synset('authorized.a.01'),1,...)", + [Synset('legal.a.01'), + [Synset('lawful.a.01'), '...'], + "Cycle(Synset('legitimate.a.01'),0,...)"], + [Synset('valid.a.01'), + "Cycle(Synset('legitimate.a.01'),0,...)", + [Synset('reasonable.a.01'), '...']]], + [Synset('official.a.01'), "Cycle(Synset('authorized.a.01'),1,...)"]], + [Synset('documented.a.01')]] + """ + if traversed is None: + traversed = {tree} + out_tree = [tree] + if depth != 0: + try: + for child in children(tree): + if child not in traversed: + # Recurse with a different "traversed" set for each child: + out_tree += [ + acyclic_branches_depth_first( + child, + children, + depth - 1, + cut_mark, + traversed.union({child}), + ) + ] + else: + warnings.warn( + "Discarded redundant search for {} at depth {}".format( + child, depth - 1 + ), + stacklevel=3, + ) + if cut_mark: + out_tree += [f"Cycle({child},{depth - 1},{cut_mark})"] + except TypeError: + pass + elif cut_mark: + out_tree += [cut_mark] + return out_tree + + +def acyclic_dic2tree(node, dic): + """Convert acyclic dictionary 'dic', where the keys are nodes, and the + values are lists of children, to output tree suitable for pprint(), + starting at root 'node', with subtrees as nested lists.""" + return [node] + [acyclic_dic2tree(child, dic) for child in dic[node]] + + +def unweighted_minimum_spanning_dict(tree, children=iter): + """ + Output a dictionary representing a Minimum Spanning Tree (MST) + of an unweighted graph, by traversing the nodes of a tree in + breadth-first order, discarding eventual cycles. + + The first argument should be the tree root; + children should be a function taking as argument a tree node + and returning an iterator of the node's children. + + >>> import nltk + >>> from nltk.corpus import wordnet as wn + >>> from nltk.util import unweighted_minimum_spanning_dict as umsd + >>> from pprint import pprint + >>> pprint(umsd(wn.synset('bound.a.01'), lambda s:s.also_sees())) + {Synset('bound.a.01'): [Synset('unfree.a.02')], + Synset('classified.a.02'): [], + Synset('confined.a.02'): [], + Synset('dependent.a.01'): [], + Synset('restricted.a.01'): [Synset('classified.a.02')], + Synset('unfree.a.02'): [Synset('confined.a.02'), + Synset('dependent.a.01'), + Synset('restricted.a.01')]} + + """ + traversed = set() # Empty set of traversed nodes + queue = deque([tree]) # Initialize queue + agenda = {tree} # Set of all nodes ever queued + mstdic = {} # Empty MST dictionary + while queue: + node = queue.popleft() # Node is not yet in the MST dictionary, + mstdic[node] = [] # so add it with an empty list of children + if node not in traversed: # Avoid cycles + traversed.add(node) + for child in children(node): + if child not in agenda: # Queue nodes only once + mstdic[node].append(child) # Add child to the MST + queue.append(child) # Add child to queue + agenda.add(child) + return mstdic + + +def unweighted_minimum_spanning_tree(tree, children=iter): + """ + Output a Minimum Spanning Tree (MST) of an unweighted graph, + by traversing the nodes of a tree in breadth-first order, + discarding eventual cycles. + + The first argument should be the tree root; + children should be a function taking as argument a tree node + and returning an iterator of the node's children. + + >>> import nltk + >>> from nltk.util import unweighted_minimum_spanning_tree as mst + >>> wn=nltk.corpus.wordnet + >>> from pprint import pprint + >>> pprint(mst(wn.synset('bound.a.01'), lambda s:s.also_sees())) + [Synset('bound.a.01'), + [Synset('unfree.a.02'), + [Synset('confined.a.02')], + [Synset('dependent.a.01')], + [Synset('restricted.a.01'), [Synset('classified.a.02')]]]] + """ + return acyclic_dic2tree(tree, unweighted_minimum_spanning_dict(tree, children)) + + +########################################################################## +# Guess Character Encoding +########################################################################## + +# adapted from io.py in the docutils extension module (https://docutils.sourceforge.io/) +# http://www.pyzine.com/Issue008/Section_Articles/article_Encodings.html + + +def guess_encoding(data): + """ + Given a byte string, attempt to decode it. + Tries the standard 'UTF8' and 'latin-1' encodings, + Plus several gathered from locale information. + + The calling program *must* first call:: + + locale.setlocale(locale.LC_ALL, '') + + If successful it returns ``(decoded_unicode, successful_encoding)``. + If unsuccessful it raises a ``UnicodeError``. + """ + successful_encoding = None + # we make 'utf-8' the first encoding + encodings = ["utf-8"] + # + # next we add anything we can learn from the locale + try: + encodings.append(locale.nl_langinfo(locale.CODESET)) + except AttributeError: + pass + try: + encodings.append(locale.getlocale()[1]) + except (AttributeError, IndexError): + pass + try: + encodings.append(locale.getdefaultlocale()[1]) + except (AttributeError, IndexError): + pass + # + # we try 'latin-1' last + encodings.append("latin-1") + for enc in encodings: + # some of the locale calls + # may have returned None + if not enc: + continue + try: + decoded = str(data, enc) + successful_encoding = enc + + except (UnicodeError, LookupError): + pass + else: + break + if not successful_encoding: + raise UnicodeError( + "Unable to decode input data. " + "Tried the following encodings: %s." + % ", ".join([repr(enc) for enc in encodings if enc]) + ) + else: + return (decoded, successful_encoding) + + +########################################################################## +# Remove repeated elements from a list deterministcally +########################################################################## + + +def unique_list(xs): + seen = set() + # not seen.add(x) here acts to make the code shorter without using if statements, seen.add(x) always returns None. + return [x for x in xs if x not in seen and not seen.add(x)] + + +########################################################################## +# Invert a dictionary +########################################################################## + + +def invert_dict(d): + inverted_dict = defaultdict(list) + for key in d: + if hasattr(d[key], "__iter__"): + for term in d[key]: + inverted_dict[term].append(key) + else: + inverted_dict[d[key]] = key + return inverted_dict + + +########################################################################## +# Utilities for directed graphs: transitive closure, and inversion +# The graph is represented as a dictionary of sets +########################################################################## + + +def transitive_closure(graph, reflexive=False): + """ + Calculate the transitive closure of a directed graph, + optionally the reflexive transitive closure. + + The algorithm is a slight modification of the "Marking Algorithm" of + Ioannidis & Ramakrishnan (1998) "Efficient Transitive Closure Algorithms". + + :param graph: the initial graph, represented as a dictionary of sets + :type graph: dict(set) + :param reflexive: if set, also make the closure reflexive + :type reflexive: bool + :rtype: dict(set) + """ + if reflexive: + base_set = lambda k: {k} + else: + base_set = lambda k: set() + # The graph U_i in the article: + agenda_graph = {k: graph[k].copy() for k in graph} + # The graph M_i in the article: + closure_graph = {k: base_set(k) for k in graph} + for i in graph: + agenda = agenda_graph[i] + closure = closure_graph[i] + while agenda: + j = agenda.pop() + closure.add(j) + closure |= closure_graph.setdefault(j, base_set(j)) + agenda |= agenda_graph.get(j, base_set(j)) + agenda -= closure + return closure_graph + + +def invert_graph(graph): + """ + Inverts a directed graph. + + :param graph: the graph, represented as a dictionary of sets + :type graph: dict(set) + :return: the inverted graph + :rtype: dict(set) + """ + inverted = {} + for key in graph: + for value in graph[key]: + inverted.setdefault(value, set()).add(key) + return inverted + + +########################################################################## +# HTML Cleaning +########################################################################## + + +def clean_html(html): + raise NotImplementedError( + "To remove HTML markup, use BeautifulSoup's get_text() function" + ) + + +def clean_url(url): + raise NotImplementedError( + "To remove HTML markup, use BeautifulSoup's get_text() function" + ) + + +########################################################################## +# FLATTEN LISTS +########################################################################## + + +def flatten(*args): + """ + Flatten a list. + + >>> from nltk.util import flatten + >>> flatten(1, 2, ['b', 'a' , ['c', 'd']], 3) + [1, 2, 'b', 'a', 'c', 'd', 3] + + :param args: items and lists to be combined into a single list + :rtype: list + """ + + x = [] + for l in args: + if not isinstance(l, (list, tuple)): + l = [l] + for item in l: + if isinstance(item, (list, tuple)): + x.extend(flatten(item)) + else: + x.append(item) + return x + + +########################################################################## +# Ngram iteration +########################################################################## + + +def pad_sequence( + sequence, + n, + pad_left=False, + pad_right=False, + left_pad_symbol=None, + right_pad_symbol=None, +): + """ + Returns a padded sequence of items before ngram extraction. + + >>> list(pad_sequence([1,2,3,4,5], 2, pad_left=True, pad_right=True, left_pad_symbol='', right_pad_symbol='')) + ['', 1, 2, 3, 4, 5, ''] + >>> list(pad_sequence([1,2,3,4,5], 2, pad_left=True, left_pad_symbol='')) + ['', 1, 2, 3, 4, 5] + >>> list(pad_sequence([1,2,3,4,5], 2, pad_right=True, right_pad_symbol='')) + [1, 2, 3, 4, 5, ''] + + :param sequence: the source data to be padded + :type sequence: sequence or iter + :param n: the degree of the ngrams + :type n: int + :param pad_left: whether the ngrams should be left-padded + :type pad_left: bool + :param pad_right: whether the ngrams should be right-padded + :type pad_right: bool + :param left_pad_symbol: the symbol to use for left padding (default is None) + :type left_pad_symbol: any + :param right_pad_symbol: the symbol to use for right padding (default is None) + :type right_pad_symbol: any + :rtype: sequence or iter + """ + sequence = iter(sequence) + if pad_left: + sequence = chain((left_pad_symbol,) * (n - 1), sequence) + if pad_right: + sequence = chain(sequence, (right_pad_symbol,) * (n - 1)) + return sequence + + +# add a flag to pad the sequence so we get peripheral ngrams? + + +def ngrams(sequence, n, **kwargs): + """ + Return the ngrams generated from a sequence of items, as an iterator. + For example: + + >>> from nltk.util import ngrams + >>> list(ngrams([1,2,3,4,5], 3)) + [(1, 2, 3), (2, 3, 4), (3, 4, 5)] + + Wrap with list for a list version of this function. Set pad_left + or pad_right to true in order to get additional ngrams: + + >>> list(ngrams([1,2,3,4,5], 2, pad_right=True)) + [(1, 2), (2, 3), (3, 4), (4, 5), (5, None)] + >>> list(ngrams([1,2,3,4,5], 2, pad_right=True, right_pad_symbol='
')) + [(1, 2), (2, 3), (3, 4), (4, 5), (5, '
')] + >>> list(ngrams([1,2,3,4,5], 2, pad_left=True, left_pad_symbol='')) + [('', 1), (1, 2), (2, 3), (3, 4), (4, 5)] + >>> list(ngrams([1,2,3,4,5], 2, pad_left=True, pad_right=True, left_pad_symbol='', right_pad_symbol='')) + [('', 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, '')] + + + :param sequence: the source data to be converted into ngrams + :type sequence: sequence or iter + :param n: the degree of the ngrams + :type n: int + :param pad_left: whether the ngrams should be left-padded + :type pad_left: bool + :param pad_right: whether the ngrams should be right-padded + :type pad_right: bool + :param left_pad_symbol: the symbol to use for left padding (default is None) + :type left_pad_symbol: any + :param right_pad_symbol: the symbol to use for right padding (default is None) + :type right_pad_symbol: any + :rtype: sequence or iter + """ + sequence = pad_sequence(sequence, n, **kwargs) + + # Creates the sliding window, of n no. of items. + # `iterables` is a tuple of iterables where each iterable is a window of n items. + iterables = tee(sequence, n) + + for i, sub_iterable in enumerate(iterables): # For each window, + for _ in range(i): # iterate through every order of ngrams + next(sub_iterable, None) # generate the ngrams within the window. + return zip(*iterables) # Unpack and flattens the iterables. + + +def bigrams(sequence, **kwargs): + """ + Return the bigrams generated from a sequence of items, as an iterator. + For example: + + >>> from nltk.util import bigrams + >>> list(bigrams([1,2,3,4,5])) + [(1, 2), (2, 3), (3, 4), (4, 5)] + + Use bigrams for a list version of this function. + + :param sequence: the source data to be converted into bigrams + :type sequence: sequence or iter + :rtype: iter(tuple) + """ + + yield from ngrams(sequence, 2, **kwargs) + + +def trigrams(sequence, **kwargs): + """ + Return the trigrams generated from a sequence of items, as an iterator. + For example: + + >>> from nltk.util import trigrams + >>> list(trigrams([1,2,3,4,5])) + [(1, 2, 3), (2, 3, 4), (3, 4, 5)] + + Use trigrams for a list version of this function. + + :param sequence: the source data to be converted into trigrams + :type sequence: sequence or iter + :rtype: iter(tuple) + """ + + yield from ngrams(sequence, 3, **kwargs) + + +def everygrams( + sequence, min_len=1, max_len=-1, pad_left=False, pad_right=False, **kwargs +): + """ + Returns all possible ngrams generated from a sequence of items, as an iterator. + + >>> sent = 'a b c'.split() + + New version outputs for everygrams. + >>> list(everygrams(sent)) + [('a',), ('a', 'b'), ('a', 'b', 'c'), ('b',), ('b', 'c'), ('c',)] + + Old version outputs for everygrams. + >>> sorted(everygrams(sent), key=len) + [('a',), ('b',), ('c',), ('a', 'b'), ('b', 'c'), ('a', 'b', 'c')] + + >>> list(everygrams(sent, max_len=2)) + [('a',), ('a', 'b'), ('b',), ('b', 'c'), ('c',)] + + :param sequence: the source data to be converted into ngrams. If max_len is + not provided, this sequence will be loaded into memory + :type sequence: sequence or iter + :param min_len: minimum length of the ngrams, aka. n-gram order/degree of ngram + :type min_len: int + :param max_len: maximum length of the ngrams (set to length of sequence by default) + :type max_len: int + :param pad_left: whether the ngrams should be left-padded + :type pad_left: bool + :param pad_right: whether the ngrams should be right-padded + :type pad_right: bool + :rtype: iter(tuple) + """ + + # Get max_len for padding. + if max_len == -1: + try: + max_len = len(sequence) + except TypeError: + sequence = list(sequence) + max_len = len(sequence) + + # Pad if indicated using max_len. + sequence = pad_sequence(sequence, max_len, pad_left, pad_right, **kwargs) + + # Sliding window to store grams. + history = list(islice(sequence, max_len)) + + # Yield ngrams from sequence. + while history: + for ngram_len in range(min_len, len(history) + 1): + yield tuple(history[:ngram_len]) + + # Append element to history if sequence has more items. + try: + history.append(next(sequence)) + except StopIteration: + pass + + del history[0] + + +def skipgrams(sequence, n, k, **kwargs): + """ + Returns all possible skipgrams generated from a sequence of items, as an iterator. + Skipgrams are ngrams that allows tokens to be skipped. + Refer to http://homepages.inf.ed.ac.uk/ballison/pdf/lrec_skipgrams.pdf + + >>> sent = "Insurgents killed in ongoing fighting".split() + >>> list(skipgrams(sent, 2, 2)) + [('Insurgents', 'killed'), ('Insurgents', 'in'), ('Insurgents', 'ongoing'), ('killed', 'in'), ('killed', 'ongoing'), ('killed', 'fighting'), ('in', 'ongoing'), ('in', 'fighting'), ('ongoing', 'fighting')] + >>> list(skipgrams(sent, 3, 2)) + [('Insurgents', 'killed', 'in'), ('Insurgents', 'killed', 'ongoing'), ('Insurgents', 'killed', 'fighting'), ('Insurgents', 'in', 'ongoing'), ('Insurgents', 'in', 'fighting'), ('Insurgents', 'ongoing', 'fighting'), ('killed', 'in', 'ongoing'), ('killed', 'in', 'fighting'), ('killed', 'ongoing', 'fighting'), ('in', 'ongoing', 'fighting')] + + :param sequence: the source data to be converted into trigrams + :type sequence: sequence or iter + :param n: the degree of the ngrams + :type n: int + :param k: the skip distance + :type k: int + :rtype: iter(tuple) + """ + + # Pads the sequence as desired by **kwargs. + if "pad_left" in kwargs or "pad_right" in kwargs: + sequence = pad_sequence(sequence, n, **kwargs) + + # Note when iterating through the ngrams, the pad_right here is not + # the **kwargs padding, it's for the algorithm to detect the SENTINEL + # object on the right pad to stop inner loop. + SENTINEL = object() + for ngram in ngrams(sequence, n + k, pad_right=True, right_pad_symbol=SENTINEL): + head = ngram[:1] + tail = ngram[1:] + for skip_tail in combinations(tail, n - 1): + if skip_tail[-1] is SENTINEL: + continue + yield head + skip_tail + + +###################################################################### +# Binary Search in a File +###################################################################### + +# inherited from pywordnet, by Oliver Steele +def binary_search_file(file, key, cache=None, cacheDepth=-1): + """ + Return the line from the file with first word key. + Searches through a sorted file using the binary search algorithm. + + :type file: file + :param file: the file to be searched through. + :type key: str + :param key: the identifier we are searching for. + """ + + key = key + " " + keylen = len(key) + start = 0 + currentDepth = 0 + + if hasattr(file, "name"): + end = os.stat(file.name).st_size - 1 + else: + file.seek(0, 2) + end = file.tell() - 1 + file.seek(0) + + if cache is None: + cache = {} + + while start < end: + lastState = start, end + middle = (start + end) // 2 + + if cache.get(middle): + offset, line = cache[middle] + + else: + line = "" + while True: + file.seek(max(0, middle - 1)) + if middle > 0: + file.discard_line() + offset = file.tell() + line = file.readline() + if line != "": + break + # at EOF; try to find start of the last line + middle = (start + middle) // 2 + if middle == end - 1: + return None + if currentDepth < cacheDepth: + cache[middle] = (offset, line) + + if offset > end: + assert end != middle - 1, "infinite loop" + end = middle - 1 + elif line[:keylen] == key: + return line + elif line > key: + assert end != middle - 1, "infinite loop" + end = middle - 1 + elif line < key: + start = offset + len(line) - 1 + + currentDepth += 1 + thisState = start, end + + if lastState == thisState: + # Detects the condition where we're searching past the end + # of the file, which is otherwise difficult to detect + return None + + return None + + +###################################################################### +# Proxy configuration +###################################################################### + + +def set_proxy(proxy, user=None, password=""): + """ + Set the HTTP proxy for Python to download through. + + If ``proxy`` is None then tries to set proxy from environment or system + settings. + + :param proxy: The HTTP proxy server to use. For example: + 'http://proxy.example.com:3128/' + :param user: The username to authenticate with. Use None to disable + authentication. + :param password: The password to authenticate with. + """ + if proxy is None: + # Try and find the system proxy settings + try: + proxy = getproxies()["http"] + except KeyError as e: + raise ValueError("Could not detect default proxy settings") from e + + # Set up the proxy handler + proxy_handler = ProxyHandler({"https": proxy, "http": proxy}) + opener = build_opener(proxy_handler) + + if user is not None: + # Set up basic proxy authentication if provided + password_manager = HTTPPasswordMgrWithDefaultRealm() + password_manager.add_password(realm=None, uri=proxy, user=user, passwd=password) + opener.add_handler(ProxyBasicAuthHandler(password_manager)) + opener.add_handler(ProxyDigestAuthHandler(password_manager)) + + # Override the existing url opener + install_opener(opener) + + +###################################################################### +# ElementTree pretty printing from https://www.effbot.org/zone/element-lib.htm +###################################################################### + + +def elementtree_indent(elem, level=0): + """ + Recursive function to indent an ElementTree._ElementInterface + used for pretty printing. Run indent on elem and then output + in the normal way. + + :param elem: element to be indented. will be modified. + :type elem: ElementTree._ElementInterface + :param level: level of indentation for this element + :type level: nonnegative integer + :rtype: ElementTree._ElementInterface + :return: Contents of elem indented to reflect its structure + """ + + i = "\n" + level * " " + if len(elem): + if not elem.text or not elem.text.strip(): + elem.text = i + " " + for elem in elem: + elementtree_indent(elem, level + 1) + if not elem.tail or not elem.tail.strip(): + elem.tail = i + else: + if level and (not elem.tail or not elem.tail.strip()): + elem.tail = i + + +###################################################################### +# Mathematical approximations +###################################################################### + + +def choose(n, k): + """ + This function is a fast way to calculate binomial coefficients, commonly + known as nCk, i.e. the number of combinations of n things taken k at a time. + (https://en.wikipedia.org/wiki/Binomial_coefficient). + + This is the *scipy.special.comb()* with long integer computation but this + approximation is faster, see https://github.com/nltk/nltk/issues/1181 + + >>> choose(4, 2) + 6 + >>> choose(6, 2) + 15 + + :param n: The number of things. + :type n: int + :param r: The number of times a thing is taken. + :type r: int + """ + if 0 <= k <= n: + ntok, ktok = 1, 1 + for t in range(1, min(k, n - k) + 1): + ntok *= n + ktok *= t + n -= 1 + return ntok // ktok + else: + return 0 + + +###################################################################### +# Iteration utilities +###################################################################### + + +def pairwise(iterable): + """s -> (s0,s1), (s1,s2), (s2, s3), ...""" + a, b = tee(iterable) + next(b, None) + return zip(a, b) + + +###################################################################### +# Parallelization. +###################################################################### + + +def parallelize_preprocess(func, iterator, processes, progress_bar=False): + from joblib import Parallel, delayed + from tqdm import tqdm + + iterator = tqdm(iterator) if progress_bar else iterator + if processes <= 1: + return map(func, iterator) + return Parallel(n_jobs=processes)(delayed(func)(line) for line in iterator) diff --git a/lib/python3.10/site-packages/nltk/wsd.py b/lib/python3.10/site-packages/nltk/wsd.py new file mode 100644 index 0000000000000000000000000000000000000000..8e29ce1e44b302d751a55d9512363f364a7c3f47 --- /dev/null +++ b/lib/python3.10/site-packages/nltk/wsd.py @@ -0,0 +1,51 @@ +# Natural Language Toolkit: Word Sense Disambiguation Algorithms +# +# Authors: Liling Tan , +# Dmitrijs Milajevs +# +# Copyright (C) 2001-2023 NLTK Project +# URL: +# For license information, see LICENSE.TXT + +from nltk.corpus import wordnet + + +def lesk(context_sentence, ambiguous_word, pos=None, synsets=None): + """Return a synset for an ambiguous word in a context. + + :param iter context_sentence: The context sentence where the ambiguous word + occurs, passed as an iterable of words. + :param str ambiguous_word: The ambiguous word that requires WSD. + :param str pos: A specified Part-of-Speech (POS). + :param iter synsets: Possible synsets of the ambiguous word. + :return: ``lesk_sense`` The Synset() object with the highest signature overlaps. + + This function is an implementation of the original Lesk algorithm (1986) [1]. + + Usage example:: + + >>> lesk(['I', 'went', 'to', 'the', 'bank', 'to', 'deposit', 'money', '.'], 'bank', 'n') + Synset('savings_bank.n.02') + + [1] Lesk, Michael. "Automatic sense disambiguation using machine + readable dictionaries: how to tell a pine cone from an ice cream + cone." Proceedings of the 5th Annual International Conference on + Systems Documentation. ACM, 1986. + https://dl.acm.org/citation.cfm?id=318728 + """ + + context = set(context_sentence) + if synsets is None: + synsets = wordnet.synsets(ambiguous_word) + + if pos: + synsets = [ss for ss in synsets if str(ss.pos()) == pos] + + if not synsets: + return None + + _, sense = max( + (len(context.intersection(ss.definition().split())), ss) for ss in synsets + ) + + return sense diff --git a/lib/python3.10/site-packages/rpds/__init__.py b/lib/python3.10/site-packages/rpds/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..257da6a7bd439c46cb2409e77531dc4a4dc6295c --- /dev/null +++ b/lib/python3.10/site-packages/rpds/__init__.py @@ -0,0 +1,5 @@ +from .rpds import * + +__doc__ = rpds.__doc__ +if hasattr(rpds, "__all__"): + __all__ = rpds.__all__ \ No newline at end of file diff --git a/lib/python3.10/site-packages/rpds/__init__.pyi b/lib/python3.10/site-packages/rpds/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5af0e323c1d6d4182d0d373e216a86e520affc87 --- /dev/null +++ b/lib/python3.10/site-packages/rpds/__init__.pyi @@ -0,0 +1,77 @@ +from typing import ( + ItemsView, + Iterable, + Iterator, + KeysView, + Mapping, + TypeVar, + ValuesView, +) + +_T = TypeVar("_T") +_KT_co = TypeVar("_KT_co", covariant=True) +_VT_co = TypeVar("_VT_co", covariant=True) +_KU_co = TypeVar("_KU_co", covariant=True) +_VU_co = TypeVar("_VU_co", covariant=True) + +class HashTrieMap(Mapping[_KT_co, _VT_co]): + def __init__( + self, + value: Mapping[_KT_co, _VT_co] | Iterable[tuple[_KT_co, _VT_co]] = {}, + **kwds: Mapping[_KT_co, _VT_co], + ): ... + def __getitem__(self, key: _KT_co) -> _VT_co: ... + def __iter__(self) -> Iterator[_KT_co]: ... + def __len__(self) -> int: ... + def discard(self, key: _KT_co) -> HashTrieMap[_KT_co, _VT_co]: ... + def items(self) -> ItemsView[_KT_co, _VT_co]: ... + def keys(self) -> KeysView[_KT_co]: ... + def values(self) -> ValuesView[_VT_co]: ... + def remove(self, key: _KT_co) -> HashTrieMap[_KT_co, _VT_co]: ... + def insert( + self, + key: _KT_co, + val: _VT_co, + ) -> HashTrieMap[_KT_co, _VT_co]: ... + def update( + self, + *args: Mapping[_KU_co, _VU_co] | Iterable[tuple[_KU_co, _VU_co]], + ) -> HashTrieMap[_KT_co | _KU_co, _VT_co | _VU_co]: ... + @classmethod + def convert( + cls, + value: Mapping[_KT_co, _VT_co] | Iterable[tuple[_KT_co, _VT_co]], + ) -> HashTrieMap[_KT_co, _VT_co]: ... + @classmethod + def fromkeys( + cls, + keys: Iterable[_KT_co], + value: _VT_co = None, + ) -> HashTrieMap[_KT_co, _VT_co]: ... + +class HashTrieSet(frozenset[_T]): + def __init__(self, value: Iterable[_T] = ()): ... + def __iter__(self) -> Iterator[_T]: ... + def __len__(self) -> int: ... + def discard(self, value: _T) -> HashTrieSet[_T]: ... + def remove(self, value: _T) -> HashTrieSet[_T]: ... + def insert(self, value: _T) -> HashTrieSet[_T]: ... + def update(self, *args: Iterable[_T]) -> HashTrieSet[_T]: ... + +class List(Iterable[_T]): + def __init__(self, value: Iterable[_T] = (), *more: _T): ... + def __iter__(self) -> Iterator[_T]: ... + def __len__(self) -> int: ... + def push_front(self, value: _T) -> List[_T]: ... + def drop_first(self) -> List[_T]: ... + +class Queue(Iterable[_T]): + def __init__(self, value: Iterable[_T] = (), *more: _T): ... + def __iter__(self) -> Iterator[_T]: ... + def __len__(self) -> int: ... + def enqueue(self, value: _T) -> Queue[_T]: ... + def dequeue(self, value: _T) -> Queue[_T]: ... + @property + def is_empty(self) -> _T: ... + @property + def peek(self) -> _T: ... diff --git a/lib/python3.10/site-packages/rpds/py.typed b/lib/python3.10/site-packages/rpds/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391