query
stringlengths
9
60
language
stringclasses
1 value
code
stringlengths
105
25.7k
url
stringlengths
91
217
reading element from html - <td>
python
def _force_read_simple(self, element, text_before, text_after, data_of): """ Force the screen reader display an information of element. :param element: The reference element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :param text_before: The text content to show before the element. :type text_before: str :param text_after: The text content to show after the element. :type text_after: str :param data_of: The name of attribute that links the content with element. :type data_of: str """ self.id_generator.generate_id(element) identifier = element.get_attribute('id') selector = '[' + data_of + '="' + identifier + '"]' reference_before = self.parser.find( '.' + AccessibleDisplayImplementation.CLASS_FORCE_READ_BEFORE + selector ).first_result() reference_after = self.parser.find( '.' + AccessibleDisplayImplementation.CLASS_FORCE_READ_AFTER + selector ).first_result() references = self.parser.find(selector).list_results() if reference_before in references: references.remove(reference_before) if reference_after in references: references.remove(reference_after) if not references: if text_before: if reference_before is not None: reference_before.remove_node() span = self.parser.create_element('span') span.set_attribute( 'class', AccessibleDisplayImplementation.CLASS_FORCE_READ_BEFORE ) span.set_attribute(data_of, identifier) span.append_text(text_before) self._insert(element, span, True) if text_after: if reference_after is not None: reference_after.remove_node() span = self.parser.create_element('span') span.set_attribute( 'class', AccessibleDisplayImplementation.CLASS_FORCE_READ_AFTER ) span.set_attribute(data_of, identifier) span.append_text(text_after) self._insert(element, span, False)
https://github.com/hatemile/hatemile-for-python/blob/1e914f9aa09f6f8d78282af131311546ecba9fb8/hatemile/implementation/display.py#L720-L779
reading element from html - <td>
python
def _force_read( self, element, value, text_prefix_before, text_suffix_before, text_prefix_after, text_suffix_after, data_of ): """ Force the screen reader display an information of element with prefixes or suffixes. :param element: The reference element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :param value: The value to be show. :type value: str :param text_prefix_before: The prefix of value to show before the element. :type text_prefix_before: str :param text_suffix_before: The suffix of value to show before the element. :type text_suffix_before: str :param text_prefix_after: The prefix of value to show after the element. :type text_prefix_after: str :param text_suffix_after: The suffix of value to show after the element. :type text_suffix_after: str :param data_of: The name of attribute that links the content with element. :type data_of: str """ if (text_prefix_before) or (text_suffix_before): text_before = text_prefix_before + value + text_suffix_before else: text_before = '' if (text_prefix_after) or (text_suffix_after): text_after = text_prefix_after + value + text_suffix_after else: text_after = '' self._force_read_simple(element, text_before, text_after, data_of)
https://github.com/hatemile/hatemile-for-python/blob/1e914f9aa09f6f8d78282af131311546ecba9fb8/hatemile/implementation/display.py#L781-L824
reading element from html - <td>
python
def _get_td_or_none(details, ID): """ Get <tr> tag with given `ID` and return content of the last <td> tag from <tr> root. Args: details (obj): :class:`dhtmlparser.HTMLElement` instance. ID (str): id property of the <tr> tag. Returns: str: Content of the last <td> as strign. """ content = details.find("tr", {"id": ID}) content = _get_last_td(content) # if content is None, return it if not content: return None content = content.getContent().strip() # if content is blank string, return None if not content: return None return content
https://github.com/edeposit/edeposit.amqp.harvester/blob/38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e/src/edeposit/amqp/harvester/scrappers/ben_cz.py#L56-L81
reading element from html - <td>
python
def read_html(io, match='.+', flavor=None, header=None, index_col=None, skiprows=None, attrs=None, parse_dates=False, tupleize_cols=None, thousands=',', encoding=None, decimal='.', converters=None, na_values=None, keep_default_na=True, displayed_only=True): r"""Read HTML tables into a ``list`` of ``DataFrame`` objects. Parameters ---------- io : str or file-like A URL, a file-like object, or a raw string containing HTML. Note that lxml only accepts the http, ftp and file url protocols. If you have a URL that starts with ``'https'`` you might try removing the ``'s'``. match : str or compiled regular expression, optional The set of tables containing text matching this regex or string will be returned. Unless the HTML is extremely simple you will probably need to pass a non-empty string here. Defaults to '.+' (match any non-empty string). The default value will return all tables contained on a page. This value is converted to a regular expression so that there is consistent behavior between Beautiful Soup and lxml. flavor : str or None, container of strings The parsing engine to use. 'bs4' and 'html5lib' are synonymous with each other, they are both there for backwards compatibility. The default of ``None`` tries to use ``lxml`` to parse and if that fails it falls back on ``bs4`` + ``html5lib``. header : int or list-like or None, optional The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to make the columns headers. index_col : int or list-like or None, optional The column (or list of columns) to use to create the index. skiprows : int or list-like or slice or None, optional 0-based. Number of rows to skip after parsing the column integer. If a sequence of integers or a slice is given, will skip the rows indexed by that sequence. Note that a single element sequence means 'skip the nth row' whereas an integer means 'skip n rows'. attrs : dict or None, optional This is a dictionary of attributes that you can pass to use to identify the table in the HTML. These are not checked for validity before being passed to lxml or Beautiful Soup. However, these attributes must be valid HTML table attributes to work correctly. For example, :: attrs = {'id': 'table'} is a valid attribute dictionary because the 'id' HTML tag attribute is a valid HTML attribute for *any* HTML tag as per `this document <http://www.w3.org/TR/html-markup/global-attributes.html>`__. :: attrs = {'asdf': 'table'} is *not* a valid attribute dictionary because 'asdf' is not a valid HTML attribute even if it is a valid XML attribute. Valid HTML 4.01 table attributes can be found `here <http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A working draft of the HTML 5 spec can be found `here <http://www.w3.org/TR/html-markup/table.html>`__. It contains the latest information on table attributes for the modern web. parse_dates : bool, optional See :func:`~read_csv` for more details. tupleize_cols : bool, optional If ``False`` try to parse multiple header rows into a :class:`~pandas.MultiIndex`, otherwise return raw tuples. Defaults to ``False``. .. deprecated:: 0.21.0 This argument will be removed and will always convert to MultiIndex thousands : str, optional Separator to use to parse thousands. Defaults to ``','``. encoding : str or None, optional The encoding used to decode the web page. Defaults to ``None``.``None`` preserves the previous encoding behavior, which depends on the underlying parser library (e.g., the parser library will try to use the encoding provided by the document). decimal : str, default '.' Character to recognize as decimal point (e.g. use ',' for European data). .. versionadded:: 0.19.0 converters : dict, default None Dict of functions for converting values in certain columns. Keys can either be integers or column labels, values are functions that take one input argument, the cell (not column) content, and return the transformed content. .. versionadded:: 0.19.0 na_values : iterable, default None Custom NA values .. versionadded:: 0.19.0 keep_default_na : bool, default True If na_values are specified and keep_default_na is False the default NaN values are overridden, otherwise they're appended to .. versionadded:: 0.19.0 displayed_only : bool, default True Whether elements with "display: none" should be parsed .. versionadded:: 0.23.0 Returns ------- dfs : list of DataFrames See Also -------- read_csv Notes ----- Before using this function you should read the :ref:`gotchas about the HTML parsing libraries <io.html.gotchas>`. Expect to do some cleanup after you call this function. For example, you might need to manually assign column names if the column names are converted to NaN when you pass the `header=0` argument. We try to assume as little as possible about the structure of the table and push the idiosyncrasies of the HTML contained in the table to the user. This function searches for ``<table>`` elements and only for ``<tr>`` and ``<th>`` rows and ``<td>`` elements within each ``<tr>`` or ``<th>`` element in the table. ``<td>`` stands for "table data". This function attempts to properly handle ``colspan`` and ``rowspan`` attributes. If the function has a ``<thead>`` argument, it is used to construct the header, otherwise the function attempts to find the header within the body (by putting rows with only ``<th>`` elements into the header). .. versionadded:: 0.21.0 Similar to :func:`~read_csv` the `header` argument is applied **after** `skiprows` is applied. This function will *always* return a list of :class:`DataFrame` *or* it will fail, e.g., it will *not* return an empty list. Examples -------- See the :ref:`read_html documentation in the IO section of the docs <io.read_html>` for some examples of reading in HTML tables. """ _importers() # Type check here. We don't want to parse only to fail because of an # invalid value of an integer skiprows. if isinstance(skiprows, numbers.Integral) and skiprows < 0: raise ValueError('cannot skip rows starting from the end of the ' 'data (you passed a negative value)') _validate_header_arg(header) return _parse(flavor=flavor, io=io, match=match, header=header, index_col=index_col, skiprows=skiprows, parse_dates=parse_dates, tupleize_cols=tupleize_cols, thousands=thousands, attrs=attrs, encoding=encoding, decimal=decimal, converters=converters, na_values=na_values, keep_default_na=keep_default_na, displayed_only=displayed_only)
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/html.py#L921-L1088
reading element from html - <td>
python
def to_html_string(self): """ Returns an etree HTML node with a document describing the process. This is only supported if the editor provided an SVG representation. """ html = ET.Element('html') head = ET.SubElement(html, 'head') title = ET.SubElement(head, 'title') title.text = self.description body = ET.SubElement(html, 'body') h1 = ET.SubElement(body, 'h1') h1.text = self.description span = ET.SubElement(body, 'span') span.text = '___CONTENT___' html_text = ET.tostring(html) svg_content = '' svg_done = set() for spec in self.get_specs_depth_first(): if spec.svg and spec.svg not in svg_done: svg_content += '<p>' + spec.svg + "</p>" svg_done.add(spec.svg) return html_text.replace('___CONTENT___', svg_content)
https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/bpmn/specs/BpmnProcessSpec.py#L141-L164
reading element from html - <td>
python
def get_element_with_text(self, locator, text, params=None, timeout=None, visible=False): """ Get element that contains <text> either by text or by attribute value. Note: if timeout is 0, this function will not wait for the element(s) to become present. :param locator: locator tuple or list of WebElements :param text: text that the element should contain :param params: (optional) locator parameters :param timeout: (optional) time to wait for text (default: self._explicit_wait) :param visible: (optional) if the element should also be visible (default: False) :return: WebElement instance """ if timeout is None: timeout = self._explicit_wait @wait(exceptions=ElementNotVisibleException, timeout=timeout) def _wait_for_text(): return self.is_element_with_text_present(locator, text, params, visible) msg = "Element with type <{}>, locator <{}> and text <{text}> was never located!".format( *locator, text=text) if not isinstance(locator, list) else \ "None of the elements had the text: {}".format(text) if timeout == 0: return self.is_element_with_text_present(locator, text, params, visible) try: return _wait_for_text() except RuntimeError as e: LOGGER.debug(e) raise NoSuchElementException(msg)
https://github.com/Projectplace/basepage/blob/735476877eb100db0981590a6d12140e68652167/basepage/base_page.py#L367-L398
reading element from html - <td>
python
def raw_html(self) -> _RawHTML: """Bytes representation of the HTML content. (`learn more <http://www.diveintopython3.net/strings.html>`_). """ if self._html: return self._html else: return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding)
https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L90-L97
reading element from html - <td>
python
def _parse_redirect(self, element): """ Parse a redirect statement :param element: The XML Element object :type element: etree._Element """ self._log.info('Parsing response as a redirect') self.redirect = True return self._parse_template(element)
https://github.com/FujiMakoto/AgentML/blob/c8cb64b460d876666bf29ea2c682189874c7c403/agentml/parser/trigger/response/__init__.py#L153-L161
reading element from html - <td>
python
def get_body_content(self): """ Returns content of BODY element for this HTML document. Content will be of type 'str' (Python 2) or 'bytes' (Python 3). :Returns: Returns content of this document. """ try: html_tree = parse_html_string(self.content) except: return '' html_root = html_tree.getroottree() if len(html_root.find('body')) != 0: body = html_tree.find('body') tree_str = etree.tostring(body, pretty_print=True, encoding='utf-8', xml_declaration=False) # this is so stupid if tree_str.startswith(six.b('<body>')): n = tree_str.rindex(six.b('</body>')) return tree_str[6:n] return tree_str return ''
https://github.com/aerkalov/ebooklib/blob/305f2dd7f02923ffabf9586a5d16266113d00c4a/ebooklib/epub.py#L341-L370
reading element from html - <td>
python
def get_html_content(self): """ Parses the element and subelements and parses any HTML enabled text to its original HTML form for rendering. :returns: Parsed HTML enabled text content. :rtype: str """ # Extract full element node content (including subelements) html_content = '' if hasattr(self, 'xml_element'): xml = self.xml_element content_list = ["" if xml.text is None else xml.text] def to_string(xml): if isinstance(xml, _Comment): return str(xml) else: return ElementTree.tostring(xml).decode('utf-8') content_list += [to_string(e) for e in xml.getchildren()] full_xml_content = "".join(content_list) # Parse tags to generate HTML valid content first_regex = r'html:' second_regex = r' xmlns:html=(["\'])(?:(?=(\\?))\2.)*?\1' html_content = re.sub(first_regex, '', re.sub(second_regex, '', full_xml_content)) return html_content
https://github.com/rodynnz/xccdf/blob/1b9dc2f06b5cce8db2a54c5f95a8f6bcf5cb6981/src/xccdf/models/html_element.py#L68-L99
reading element from html - <td>
python
def parse_text(document, container, element): "Parse text element." txt = None alternate = element.find(_name('{{{mc}}}AlternateContent')) if alternate is not None: parse_alternate(document, container, alternate) br = element.find(_name('{{{w}}}br')) if br is not None: if _name('{{{w}}}type') in br.attrib: _type = br.attrib[_name('{{{w}}}type')] brk = doc.Break(_type) else: brk = doc.Break() container.elements.append(brk) t = element.find(_name('{{{w}}}t')) if t is not None: txt = doc.Text(t.text) txt.parent = container container.elements.append(txt) rpr = element.find(_name('{{{w}}}rPr')) if rpr is not None: # Notice it is using txt as container parse_previous_properties(document, txt, rpr) for r in element.findall(_name('{{{w}}}r')): parse_text(document, container, r) foot = element.find(_name('{{{w}}}footnoteReference')) if foot is not None: parse_footnote(document, container, foot) end = element.find(_name('{{{w}}}endnoteReference')) if end is not None: parse_endnote(document, container, end) sym = element.find(_name('{{{w}}}sym')) if sym is not None: _font = sym.attrib[_name('{{{w}}}font')] _char = sym.attrib[_name('{{{w}}}char')] container.elements.append(doc.Symbol(font=_font, character=_char)) image = element.find(_name('{{{w}}}drawing')) if image is not None: parse_drawing(document, container, image) refe = element.find(_name('{{{w}}}commentReference')) if refe is not None: _m = doc.Comment(refe.attrib[_name('{{{w}}}id')], 'reference') container.elements.append(_m) return
https://github.com/booktype/python-ooxml/blob/b56990a5bee2e1bc46839cec5161ff3726dc4d87/ooxml/parse.py#L224-L291
reading element from html - <td>
python
def print_element(element): """ Pretty- print an lxml.etree element. Parameters ------------ element : etree element """ pretty = etree.tostring( element, pretty_print=True).decode('utf-8') print(pretty) return pretty
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/exchange/xml_based.py#L425-L436
reading element from html - <td>
python
def get_visible_element(self, locator, params=None, timeout=None): """ Get element both present AND visible in the DOM. If timeout is 0 (zero) return WebElement instance or None, else we wait and retry for timeout and raise TimeoutException should the element not be found. :param locator: locator tuple :param params: (optional) locator params :param timeout: (optional) time to wait for element (default: self._explicit_wait) :return: WebElement instance """ return self.get_present_element(locator, params, timeout, True)
https://github.com/Projectplace/basepage/blob/735476877eb100db0981590a6d12140e68652167/basepage/base_page.py#L418-L430
reading element from html - <td>
python
def _process_element(self, pos, e): """ Parses an incoming HTML element/node for data. pos -- the part of the element being parsed (start/end) e -- the element being parsed """ tag, class_attr = _tag_and_class_attr(e) start_of_message = tag == 'div' and class_attr == 'message' and pos == 'start' end_of_thread = tag == 'div' and 'thread' in class_attr and pos == 'end' if start_of_message and not self.messages_started: self.messages_started = True elif tag == "span" and pos == "end": if "user" in class_attr: self.current_sender = self.name_resolver.resolve(e.text) elif "meta" in class_attr: self.current_timestamp =\ parse_timestamp(e.text, self.use_utc, self.timezone_hints) elif tag == 'p' and pos == 'end': # This is only necessary because of accidental double <p> nesting on # Facebook's end. Clearly, QA and testing is one of Facebook's strengths ;) if not self.current_text: self.current_text = e.text.strip() if e.text else '' elif tag == 'img' and pos == 'start': self.current_text = '(image reference: {})'.format(e.attrib['src']) elif (start_of_message or end_of_thread) and self.messages_started: if not self.current_timestamp: # This is the typical error when the new Facebook format is # used with the legacy parser. raise UnsuitableParserError if not self.current_sender: if not self.no_sender_warning_status: sys.stderr.write( "\rWARNING: The sender was missing in one or more parsed messages. " "This is an error on Facebook's end that unfortunately cannot be " "recovered from. Some or all messages in the output may show the " "sender as 'Unknown' within each thread.\n") self.no_sender_warning_status = True self.current_sender = "Unknown" cm = ChatMessage(timestamp=self.current_timestamp, sender=self.current_sender, content=self.current_text or '', seq_num=self.seq_num) self.messages += [cm] self.seq_num -= 1 self.current_sender, self.current_timestamp, self.current_text = None, None, None return end_of_thread
https://github.com/ownaginatious/fbchat-archive-parser/blob/f1e66cea864f1c07b825fc036071f443693231d5/fbchat_archive_parser/parser.py#L140-L192
reading element from html - <td>
python
def html(self) -> _BaseHTML: """Unicode representation of the HTML content (`learn more <http://www.diveintopython3.net/strings.html>`_). """ if self._html: return self.raw_html.decode(self.encoding, errors='replace') else: return etree.tostring(self.element, encoding='unicode').strip()
https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L100-L107
reading element from html - <td>
python
def innerHTML(self, html: str) -> None: # type: ignore """Set innerHTML both on this node and related browser node.""" df = self._parse_html(html) if self.connected: self._set_inner_html_web(df.html) self._empty() self._append_child(df)
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/web_node.py#L305-L311
reading element from html - <td>
python
def _safe_get_element_text(self, path, root=None): """Safe get element text. Get element as string or None, :param root: Lxml element. :param path: String path (i.e. 'Items.Item.Offers.Offer'). :return: String or None. """ element = self._safe_get_element(path, root) if element: return element.text else: return None
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/amazon/api.py#L395-L410
reading element from html - <td>
python
def _get_element_text_or_none(document, selector): """ Using a CSS selector, get the element and return the text, or None if no element. :arg document: ``HTMLElement`` document :arg selector: CSS selector :returns: str or None """ element = document.cssselect(selector) if element: return element[0].text return None
https://github.com/jaywink/federation/blob/59d31bb37e662891dbea72c1dee05dc53146c78b/federation/utils/diaspora.py#L111-L122
reading element from html - <td>
python
def parseStr(self, html): ''' parseStr - Parses a string and creates the DOM tree and indexes. @param html <str> - valid HTML ''' self.reset() if isinstance(html, bytes): self.feed(html.decode(self.encoding)) else: self.feed(html)
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Parser.py#L878-L889
reading element from html - <td>
python
def get_element_content( p, meta_data, is_td=False, remove_italics=False, remove_bold=False, ): """ P tags are made up of several runs (r tags) of text. This function takes a p tag and constructs the text that should be part of the p tag. image_handler should be a callable that returns the desired ``src`` attribute for a given image. """ # Only remove bold or italics if this tag is an h tag. # Td elements have the same look and feel as p/h elements. Right now we are # never putting h tags in td elements, as such if we are in a td we will # never be stripping bold/italics since that is only done on h tags if not is_td and is_header(p, meta_data): # Check to see if the whole line is bold or italics. remove_bold, remove_italics = whole_line_styled(p) p_text = '' w_namespace = get_namespace(p, 'w') if len(p) == 0: return '' # Only these tags contain text that we care about (eg. We don't care about # delete tags) content_tags = ( '%sr' % w_namespace, '%shyperlink' % w_namespace, '%sins' % w_namespace, '%ssmartTag' % w_namespace, ) elements_with_content = [] for child in p: if child is None: break if child.tag in content_tags: elements_with_content.append(child) # Gather the content from all of the children for el in elements_with_content: # Hyperlinks and insert tags need to be handled differently than # r and smart tags. if el.tag in ('%sins' % w_namespace, '%ssmartTag' % w_namespace): p_text += get_element_content( el, meta_data, remove_bold=remove_bold, remove_italics=remove_italics, ) elif el.tag == '%shyperlink' % w_namespace: p_text += build_hyperlink(el, meta_data) elif el.tag == '%sr' % w_namespace: p_text += get_text_run_content( el, meta_data, remove_bold=remove_bold, remove_italics=remove_italics, ) else: raise SyntaxNotSupported( 'Content element "%s" not handled.' % el.tag ) # This function does not return a p tag since other tag types need this as # well (td, li). return p_text
https://github.com/PolicyStat/docx2html/blob/2dc4afd1e3a3f2f0b357d0bff903eb58bcc94429/docx2html/core.py#L1272-L1341
reading element from html - <td>
python
def read_element_tag(fd, endian): """Read data element tag: type and number of bytes. If tag is of the Small Data Element (SDE) type the element data is also returned. """ data = fd.read(8) mtpn = unpack(endian, 'I', data[:4]) # The most significant two bytes of mtpn will always be 0, # if they are not, this must be SDE format num_bytes = mtpn >> 16 if num_bytes > 0: # small data element format mtpn = mtpn & 0xFFFF if num_bytes > 4: raise ParseError('Error parsing Small Data Element (SDE) ' 'formatted data') data = data[4:4 + num_bytes] else: # regular element num_bytes = unpack(endian, 'I', data[4:]) data = None return (mtpn, num_bytes, data)
https://github.com/nephics/mat4py/blob/6c1a2ad903937437cc5f24f3c3f5aa2c5a77a1c1/mat4py/loadmat.py#L146-L167
reading element from html - <td>
python
def htmlListToTR(l,trClass=None,tdClass=None,td1Class=None): """ turns a list into a <tr><td>something</td></tr> call this when generating HTML tables dynamically. """ html="<tr>" for item in l: if 'array' in str(type(item)): item=item[0] #TODO: why is this needed html+="<td>%s</td>"%item html+="</tr>" if trClass: html=html.replace("<tr>",'<tr class="%s">'%trClass) if td1Class: html=html.replace("<td>",'<td class="%s">'%td1Class,1) if tdClass: html=html.replace("<td>",'<td class="%s">'%tdClass) return html
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/doc/oldcode/swhlab/core/common.py#L192-L211
reading element from html - <td>
python
def read(self): ''' Read tagged doc from mutliple files (sents, tokens, concepts, links, tags) ''' warnings.warn("Document.read() is deprecated and will be removed in near future.", DeprecationWarning) with TxtReader.from_doc(self) as reader: reader.read(self) return self
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/texttaglib.py#L639-L644
reading element from html - <td>
python
def iter_links_element_text(cls, element): '''Get the element text as a link.''' if element.text: link_type = identify_link_type(element.text) yield LinkInfo( element=element, tag=element.tag, attrib=None, link=element.text, inline=False, linked=True, base_link=None, value_type='plain', link_type=link_type )
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/scraper/html.py#L380-L391
reading element from html - <td>
python
def element_content_as_string(element): """Serialize etree.Element. Note: element may contain more than one child or only text (i.e. no child at all). Therefore the resulting string may raise an exception, when passed back to etree.XML(). """ if len(element) == 0: return element.text or "" # Make sure, None is returned as '' stream = compat.StringIO() for childnode in element: stream.write(xml_to_bytes(childnode, pretty_print=False) + "\n") # print(xml_to_bytes(childnode, pretty_print=False), file=stream) s = stream.getvalue() stream.close() return s
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/xml_tools.py#L114-L129
reading element from html - <td>
python
def extract_text(html, guess_punct_space=True, guess_layout=True, newline_tags=NEWLINE_TAGS, double_newline_tags=DOUBLE_NEWLINE_TAGS): """ Convert html to text, cleaning invisible content such as styles. Almost the same as normalize-space xpath, but this also adds spaces between inline elements (like <span>) which are often used as block elements in html markup, and adds appropriate newlines to make output better formatted. html should be a unicode string or an already parsed lxml.html element. ``html_text.etree_to_text`` is a lower-level function which only accepts an already parsed lxml.html Element, and is not doing html cleaning itself. When guess_punct_space is True (default), no extra whitespace is added for punctuation. This has a slight (around 10%) performance overhead and is just a heuristic. When guess_layout is True (default), a newline is added before and after ``newline_tags`` and two newlines are added before and after ``double_newline_tags``. This heuristic makes the extracted text more similar to how it is rendered in the browser. Default newline and double newline tags can be found in `html_text.NEWLINE_TAGS` and `html_text.DOUBLE_NEWLINE_TAGS`. """ if html is None: return '' cleaned = _cleaned_html_tree(html) return etree_to_text( cleaned, guess_punct_space=guess_punct_space, guess_layout=guess_layout, newline_tags=newline_tags, double_newline_tags=double_newline_tags, )
https://github.com/TeamHG-Memex/html-text/blob/871d4dbe9f4f99e5f041110c60458adcaae6fab4/html_text/html_text.py#L180-L219
reading element from html - <td>
python
def read_xml(filename): """ Use et to read in a xml file, or string, into a Element object. :param filename: File to parse. :return: lxml._elementTree object or None """ parser = et.XMLParser(remove_blank_text=True) isfile=False try: isfile = os.path.exists(filename) except ValueError as e: if 'path too long for Windows' in str(e): pass else: raise try: if isfile: return et.parse(filename, parser) else: r = et.fromstring(filename, parser) return r.getroottree() except IOError: log.exception('unable to open file [[}]'.format(filename)) except et.XMLSyntaxError: log.exception('unable to parse XML [{}]'.format(filename)) return None return None
https://github.com/mandiant/ioc_writer/blob/712247f3a10bdc2584fa18ac909fc763f71df21a/ioc_writer/utils/xmlutils.py#L30-L57
reading element from html - <td>
python
def _parse_thead_tbody_tfoot(self, table_html): """ Given a table, return parsed header, body, and foot. Parameters ---------- table_html : node-like Returns ------- tuple of (header, body, footer), each a list of list-of-text rows. Notes ----- Header and body are lists-of-lists. Top level list is a list of rows. Each row is a list of str text. Logic: Use <thead>, <tbody>, <tfoot> elements to identify header, body, and footer, otherwise: - Put all rows into body - Move rows from top of body to header only if all elements inside row are <th> - Move rows from bottom of body to footer only if all elements inside row are <th> """ header_rows = self._parse_thead_tr(table_html) body_rows = self._parse_tbody_tr(table_html) footer_rows = self._parse_tfoot_tr(table_html) def row_is_all_th(row): return all(self._equals_tag(t, 'th') for t in self._parse_td(row)) if not header_rows: # The table has no <thead>. Move the top all-<th> rows from # body_rows to header_rows. (This is a common case because many # tables in the wild have no <thead> or <tfoot> while body_rows and row_is_all_th(body_rows[0]): header_rows.append(body_rows.pop(0)) header = self._expand_colspan_rowspan(header_rows) body = self._expand_colspan_rowspan(body_rows) footer = self._expand_colspan_rowspan(footer_rows) return header, body, footer
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/html.py#L375-L420
reading element from html - <td>
python
def ReadItem(self, document_link, options=None): """Reads a document. :param str document_link: The link to the document. :param dict options: The request options for the request. :return: The read Document. :rtype: dict """ if options is None: options = {} path = base.GetPathFromLink(document_link) document_id = base.GetResourceIdOrFullNameFromLink(document_link) return self.Read(path, 'docs', document_id, None, options)
https://github.com/Azure/azure-cosmos-python/blob/dd01b3c5d308c6da83cfcaa0ab7083351a476353/azure/cosmos/cosmos_client.py#L1094-L1117
reading element from html - <td>
python
def read_ttl(path): ''' Helper function to read Document in TTL-TXT format (i.e. ${docname}_*.txt) E.g. Document.read_ttl('~/data/myfile') is the same as Document('myfile', '~/data/').read() ''' warnings.warn("Document.read_ttl() is deprecated and will be removed in near future. Use read() instead", DeprecationWarning) doc_path = os.path.dirname(path) doc_name = os.path.basename(path) return Document(doc_name, doc_path).read()
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/texttaglib.py#L647-L654
reading element from html - <td>
python
def _print_html(self): """ Internal method to call the javascript/html table. """ cur_path = os.path.dirname(os.path.realpath(sys.argv[0])) shutil.copytree(cur_path+'webpage/template/', '')
https://github.com/samghelms/mathviz/blob/30fe89537379faea4de8c8b568ac6e52e4d15353/mathviz_hopper/src/table.py#L172-L178
reading element from html - <td>
python
def parse_impl(self): """ Parses the HTML content as a stream. This is far less memory intensive than loading the entire HTML file into memory, like BeautifulSoup does. """ # Cast to str to ensure not unicode under Python 2, as the parser # doesn't like that. parser = XMLParser(encoding=str('UTF-8')) element_iter = ET.iterparse(self.handle, events=("start", "end"), parser=parser) for pos, element in element_iter: tag, class_attr = _tag_and_class_attr(element) if tag == "h1" and pos == "end": if not self.user: self.user = element.text.strip() elif tag == "div" and "thread" in class_attr and pos == "start": participants = self.parse_participants(element) thread = self.parse_thread(participants, element_iter, True) self.save_thread(thread)
https://github.com/ownaginatious/fbchat-archive-parser/blob/f1e66cea864f1c07b825fc036071f443693231d5/fbchat_archive_parser/parser.py#L385-L404
reading element from html - <td>
python
def html(self, data=None, template=None): """ Send html document to user. Args: - data: Dict to render template, or string with rendered HTML. - template: Name of template to render HTML document with passed data. """ if data is None: data = {} if template: return render(self.request, template, data) return HttpResponse(data)
https://github.com/djangomini/djangomini/blob/cfbe2d59acf0e89e5fd442df8952f9a117a63875/djangomini/controllers.py#L39-L51
reading element from html - <td>
python
def get_visible_elements(self, locator, params=None, timeout=None): """ Get elements both present AND visible in the DOM. If timeout is 0 (zero) return WebElement instance or None, else we wait and retry for timeout and raise TimeoutException should the element not be found. :param locator: locator tuple :param params: (optional) locator params :param timeout: (optional) time to wait for element (default: self._explicit_wait) :return: WebElement instance """ return self.get_present_elements(locator, params, timeout, True)
https://github.com/Projectplace/basepage/blob/735476877eb100db0981590a6d12140e68652167/basepage/base_page.py#L450-L462
reading element from html - <td>
python
def read(ctx, input, output): """Output processed document elements.""" log.info('chemdataextractor.read') log.info('Reading %s' % input.name) doc = Document.from_file(input) for element in doc.elements: output.write(u'%s : %s\n=====\n' % (element.__class__.__name__, six.text_type(element)))
https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/__init__.py#L60-L66
reading element from html - <td>
python
def is_element_with_text_present(self, locator, text, params=None, visible=False): """ Is element that contains <text> either by text or by attribute value present. Note: Will return the element if the outcome is positive. :param locator: locator tuple or list of WebElements :param text: text that the element should contain :param params: (optional) locator parameters :param visible: (optional) if the element should also be visible (default: False) :return: WebElement instance or False """ elements = self.get_present_elements(locator, params, 0, visible) if not isinstance(locator, list) else locator if not elements: # Can't iterate over bool return False for element in elements: element_text = self.get_text(element) if element_text is not None and text in element_text.strip(): return element return False
https://github.com/Projectplace/basepage/blob/735476877eb100db0981590a6d12140e68652167/basepage/base_page.py#L344-L365
reading element from html - <td>
python
def _get_element_text(self, element): """ Return the textual content of the element and its children """ text = '' if element.text is not None: text += element.text for child in element.getchildren(): text += self._get_element_text(child) if element.tail is not None: text += element.tail return text
https://github.com/jbarlow83/OCRmyPDF/blob/79c84eefa353632a3d7ccddbd398c6678c1c1777/src/ocrmypdf/hocrtransform.py#L103-L114
reading element from html - <td>
python
def _parse_element(self, elt, parent_is_keep=False): """ Parses an Element recursively :param elt: HtmlElement to parse :type elt: lxml.html.HtmlElement :param parent_is_keep: Whether the element is inside a keep element or not :type parent_is_keep: bool """ for e in elt.iterchildren(): is_discard_element = self._is_discard(e) is_keep_element = self._is_keep(e) # Element is an explicit one to discard, flag it and continue if is_discard_element and not is_keep_element: self.elts_to_remove.append(e) continue if not parent_is_keep: # Parent element is not an explicit keep, normal process # Element is an explicit one to keep, inspect it if is_keep_element: self._parse_element(e, parent_is_keep=True) continue # Has a descendant to keep, inspect it if self._has_keep_elt_in_descendants(e): self._parse_element(e) continue # Element did not match anything, remove it self.elts_to_remove.append(e) else: # Element is a child of a keep element, only check explicit discards self._parse_element(e, parent_is_keep=True)
https://github.com/jurismarches/chopper/blob/53c5489a53e3a5d205a5cb207df751c09633e7ce/chopper/html/extractor.py#L135-L171
reading element from html - <td>
python
def iter_links_meta_element(cls, element): '''Iterate the ``meta`` element for links. This function handles refresh URLs. ''' if element.attrib.get('http-equiv', '').lower() == 'refresh': content_value = element.attrib.get('content') if content_value: link = parse_refresh(content_value) if link: yield LinkInfo( element=element, tag=element.tag, attrib='http-equiv', link=link, inline=False, linked=True, base_link=None, value_type='refresh', link_type=None # treat it as a redirect ) else: for link_info in cls.iter_links_open_graph_meta(element): yield link_info
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/scraper/html.py#L422-L444
reading element from html - <td>
python
def get_partial_contenthandler(element): """Build a `PartialLIGOLWContentHandler` to read only this element Parameters ---------- element : `type`, subclass of :class:`~ligo.lw.ligolw.Element` the element class to be read, Returns ------- contenthandler : `type` a subclass of :class:`~ligo.lw.ligolw.PartialLIGOLWContentHandler` to read only the given `element` """ from ligo.lw.ligolw import PartialLIGOLWContentHandler from ligo.lw.table import Table if issubclass(element, Table): def _element_filter(name, attrs): return element.CheckProperties(name, attrs) else: def _element_filter(name, _): return name == element.tagName return build_content_handler(PartialLIGOLWContentHandler, _element_filter)
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/ligolw.py#L135-L159
reading element from html - <td>
python
def parse_at_element( self, element, # type: ET.Element state # type: _ProcessorState ): # type: (...) -> Any """Parse the provided element as a dictionary.""" parsed_dict = {} for child in self._child_processors: state.push_location(child.element_path) parsed_dict[child.alias] = child.parse_from_parent(element, state) state.pop_location() return parsed_dict
https://github.com/gatkin/declxml/blob/3a2324b43aee943e82a04587fbb68932c6f392ba/declxml.py#L945-L959
reading element from html - <td>
python
def wait_until_element_contains_text(self, element, text, timeout=None): """Search element and wait until it contains the expected text :param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found :param text: text expected to be contained into the element :param timeout: max time to wait :returns: the web element if it contains the expected text :rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement :raises TimeoutException: If the element does not contain the expected text after the timeout """ return self._wait_until(self._expected_condition_find_element_containing_text, (element, text), timeout)
https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/utils.py#L381-L391
reading element from html - <td>
python
def fromstring(html, guess_charset=True, parser=None): """Parse the html, returning a single element/document. This tries to minimally parse the chunk of text, without knowing if it is a fragment or a document. base_url will set the document's base_url attribute (and the tree's docinfo.URL) """ if not isinstance(html, _strings): raise TypeError('string required') doc = document_fromstring(html, parser=parser, guess_charset=guess_charset) # document starts with doctype or <html>, full document! start = html[:50].lstrip().lower() if start.startswith('<html') or start.startswith('<!doctype'): return doc head = _find_tag(doc, 'head') # if the head is not empty we have a full document if len(head): return doc body = _find_tag(doc, 'body') # The body has just one element, so it was probably a single # element passed in if (len(body) == 1 and (not body.text or not body.text.strip()) and (not body[-1].tail or not body[-1].tail.strip())): return body[0] # Now we have a body which represents a bunch of tags which have the # content that was passed in. We will create a fake container, which # is the body tag, except <body> implies too much structure. if _contains_block_level_tag(body): body.tag = 'div' else: body.tag = 'span' return body
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/html5parser.py#L136-L175
reading element from html - <td>
python
def _parse_element_r(self, el, specials, refs, id=None, element_cls=Paragraph): """Recursively parse HTML/XML element and its children into a list of Document elements.""" elements = [] if el.tag in {etree.Comment, etree.ProcessingInstruction}: return [] # if el in refs: # return [element_cls('', references=refs[el])] if el in specials: return specials[el] id = el.get('id', id) references = refs.get(el, []) if el.text is not None: elements.append(element_cls(six.text_type(el.text), id=id, references=references)) elif references: elements.append(element_cls('', id=id, references=references)) for child in el: # br is a special case - technically inline, but we want to split if child.tag not in {etree.Comment, etree.ProcessingInstruction} and child.tag.lower() == 'br': elements.append(element_cls('')) child_elements = self._parse_element_r(child, specials=specials, refs=refs, id=id, element_cls=element_cls) if (self._is_inline(child) and len(elements) > 0 and len(child_elements) > 0 and isinstance(elements[-1], (Text, Sentence)) and isinstance(child_elements[0], (Text, Sentence)) and type(elements[-1]) == type(child_elements[0])): elements[-1] += child_elements.pop(0) elements.extend(child_elements) if child.tail is not None: if self._is_inline(child) and len(elements) > 0 and isinstance(elements[-1], element_cls): elements[-1] += element_cls(six.text_type(child.tail), id=id) else: elements.append(element_cls(six.text_type(child.tail), id=id)) return elements
https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/reader/markup.py#L62-L93
reading element from html - <td>
python
def _read_elem_elements(self, fid): """Read all FE elements from the file stream. Elements are stored in the self.element_data dict. The keys refer to the element types: * 3: Triangular grid (three nodes) * 8: Quadrangular grid (four nodes) * 11: Mixed boundary element * 12: Neumann (no-flow) boundary element """ elements = {} # read elements for element_type in range(0, self.header['nr_element_types']): element_list = [] for element_coordinates in range( 0, self.header['element_infos'][element_type, 1]): element_coordinates_line = fid.readline().lstrip() tmp_element = self.element() tmp_element.nodes = np.fromstring(element_coordinates_line, dtype=int, sep=' ') tmp_element.xcoords = self.nodes['presort'][tmp_element.nodes - 1, 1] tmp_element.zcoords = self.nodes['presort'][tmp_element.nodes - 1, 2] element_list.append(tmp_element) element_type_number = self.header['element_infos'][element_type, 0] elements[element_type_number] = element_list self.element_data = elements
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/grid.py#L231-L259
reading element from html - <td>
python
def render(self, text, add_header=False): """Render the HTML. Parameters ---------- add_header: boolean (default: False) If True, add HTML5 header and footer. Returns ------- str The rendered HTML. """ html = mark_text(text, self.aesthetics, self.rules) html = html.replace('\n', '<br/>') if add_header: html = '\n'.join([HEADER, self.css, MIDDLE, html, FOOTER]) #print('\n'.join((HEADER, self.css, MIDDLE, html, FOOTER))) return html
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/prettyprinter.py#L121-L140
reading element from html - <td>
python
def read_content (self): """Return URL target content, or in case of directories a dummy HTML file with links to the files.""" if self.is_directory(): self.url_connection.cwd(self.filename) self.files = self.get_files() # XXX limit number of files? data = get_index_html(self.files) else: # download file in BINARY mode ftpcmd = "RETR %s" % self.filename buf = StringIO() def stor_data (s): """Helper method storing given data""" # limit the download size if (buf.tell() + len(s)) > self.max_size: raise LinkCheckerError(_("FTP file size too large")) buf.write(s) self.url_connection.retrbinary(ftpcmd, stor_data) data = buf.getvalue() buf.close() return data
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/ftpurl.py#L188-L209
reading element from html - <td>
python
def iter_links_element(self, element): '''Iterate a HTML element.''' # reference: lxml.html.HtmlMixin.iterlinks() attrib = element.attrib tag = element.tag if tag == 'link': iterable = self.iter_links_link_element(element) elif tag == 'meta': iterable = self.iter_links_meta_element(element) elif tag in ('object', 'applet'): iterable = self.iter_links_object_element(element) elif tag == 'param': iterable = self.iter_links_param_element(element) elif tag == 'style': iterable = self.iter_links_style_element(element) elif tag == 'script': iterable = self.iter_links_script_element(element) else: iterable = self.iter_links_plain_element(element) # RSS/Atom if tag in ('link', 'url', 'icon'): iterable = itertools.chain( iterable, self.iter_links_element_text(element) ) for link_info in iterable: yield link_info if 'style' in attrib and self.css_scraper: for link in self.css_scraper.scrape_links(attrib['style']): yield LinkInfo( element=element, tag=element.tag, attrib='style', link=link, inline=True, linked=False, base_link=None, value_type='css', link_type=LinkType.media, )
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/scraper/html.py#L338-L377
reading element from html - <td>
python
def _iter_texts(self, tree): """Iterates over texts in given HTML tree.""" skip = ( not isinstance(tree, lxml.html.HtmlElement) # comments, etc. or tree.tag in self.skipped_tags ) if not skip: if tree.text: yield Text(tree.text, tree, 'text') for child in tree: for text in self._iter_texts(child): yield text if tree.tail: yield Text(tree.tail, tree, 'tail')
https://github.com/honzajavorek/tipi/blob/cbe51192725608b6fba1244a48610ae231b13e08/tipi/html.py#L151-L164
reading element from html - <td>
python
def html_to_text(html_string): """ returns a plain text string when given a html string text handles a, p, h1 to h6 and br, inserts newline chars to create space in the string @todo handle images """ # create a valid html document from string # beware that it inserts <hmtl> <body> and <p> tags # where needed html_tree = html.document_fromstring(html_string) # handle header tags for h in html_tree.cssselect("h1, h2, h3, h4, h5, h6"): # add two newlines after a header tag h.text = h.text + '\n\n' # handle links # find all a tags starting from the root of the document // # and replace the link with (link) for a in html_tree.xpath("//a"): href = a.attrib['href'] a.text = a.text + " (" + href + ")" # handle paragraphs for p in html_tree.xpath("//p"): # keep the tail if there is one # or add two newlines after the text if there is no tail p.tail = p.tail if p.tail else "\n\n" # handle breaks for br in html_tree.xpath("//br"): # add a newline and then the tail (remaining text after the <br/> tag) # or add a newline only if there is no tail # http://stackoverflow.com/questions/18660382/how-can-i-preserve-br-as-newlines-with-lxml-html-text-content-or-equivalent?rq=1 br.tail = "\n" + br.tail if br.tail else "\n" return html_tree.text_content()
https://github.com/solocompt/plugs-core/blob/19fd23101fcfdabe657485f0a22e6b63e2b44f9d/plugs_core/utils.py#L82-L119
reading element from html - <td>
python
def publish_string(self, rest, outfile, styles=''): """Render a reST string as HTML. """ html = self.convert_string(rest) html = self.strip_xml_header(html) html = self.apply_styles(html, styles) self.write_file(html, outfile) return outfile
https://github.com/Jarn/jarn.viewdoc/blob/59ae82fd1658889c41096c1d8c08dcb1047dc349/jarn/viewdoc/viewdoc.py#L341-L348
reading element from html - <td>
python
def htmlReadDoc(cur, URL, encoding, options): """parse an XML in-memory document and build a tree. """ ret = libxml2mod.htmlReadDoc(cur, URL, encoding, options) if ret is None:raise treeError('htmlReadDoc() failed') return xmlDoc(_obj=ret)
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L828-L832
reading element from html - <td>
python
def htmlReadFd(fd, URL, encoding, options): """parse an XML from a file descriptor and build a tree. """ ret = libxml2mod.htmlReadFd(fd, URL, encoding, options) if ret is None:raise treeError('htmlReadFd() failed') return xmlDoc(_obj=ret)
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L834-L838
reading element from html - <td>
python
def html(self) -> str: """Return string representation of this. Used in start tag of HTML representation of the Element node. """ if self._owner and self.name in self._owner._special_attr_boolean: return self.name else: value = self.value if isinstance(value, str): value = html_.escape(value) return '{name}="{value}"'.format(name=self.name, value=value)
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/element.py#L175-L186
reading element from html - <td>
python
def read(path, mode='tsv'): ''' Helper function to read Document in TTL-TXT format (i.e. ${docname}_*.txt) E.g. read('~/data/myfile') is the same as Document('myfile', '~/data/').read() ''' if mode == 'tsv': return TxtReader.from_path(path).read() elif mode == 'json': return read_json(path) else: raise Exception("Invalid mode - [{}] was provided".format(mode))
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/texttaglib.py#L936-L945
reading element from html - <td>
python
def read(self, doc=None): ''' Read tagged doc from mutliple files (sents, tokens, concepts, links, tags) ''' if not self.sent_stream: raise Exception("There is no sentence data stream available") if doc is None: doc = Document(name=self.doc_name, path=self.doc_path) for row in self.sent_reader(): if len(row) == 2: sid, text = row doc.new_sent(text.strip(), ID=sid) elif len(row) == 4: sid, text, flag, comment = row sent = doc.new_sent(text.strip(), ID=sid) sent.flag = flag sent.comment = comment # Read tokens if available if self.token_stream: # read all tokens first sent_tokens_map = dd(list) for token_row in self.token_reader(): if len(token_row) == 6: sid, wid, token, lemma, pos, comment = token_row else: sid, wid, token, lemma, pos = token_row comment = '' sid = int(sid) sent_tokens_map[sid].append((token, lemma, pos.strip(), wid, comment)) # TODO: verify wid? # import tokens for sent in doc: sent_tokens = sent_tokens_map[sent.ID] sent.import_tokens([x[0] for x in sent_tokens]) for ((tk, lemma, pos, wid, comment), token) in zip(sent_tokens, sent.tokens): token.pos = pos token.lemma = lemma token.comment = comment # only read concepts if tokens are available if self.concept_stream: # read concepts for concept_row in self.concept_reader(): if len(concept_row) == 5: sid, cid, clemma, tag, comment = concept_row else: sid, cid, clemma, tag = concept_row comment = '' cid = int(cid) doc.get(sid).new_concept(tag.strip(), clemma=clemma, cidx=cid, comment=comment) # only read concept-token links if tokens and concepts are available for sid, cid, wid in self.link_reader(): sent = doc.get(sid) cid = int(cid) wid = int(wid.strip()) sent.concept(cid).add_token(sent[wid]) # read sentence level tags if self.tag_stream: for row in self.tag_reader(): if len(row) == 5: sid, cfrom, cto, label, tagtype = row wid = None if len(row) == 6: sid, cfrom, cto, label, tagtype, wid = row if cfrom: cfrom = int(cfrom) if cto: cto = int(cto) if wid is None or wid == '': doc.get(sid).new_tag(label, cfrom, cto, tagtype=tagtype) else: doc.get(sid)[int(wid)].new_tag(label, cfrom, cto, tagtype=tagtype) return doc
https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/texttaglib.py#L723-L792
reading element from html - <td>
python
def dtdElementDesc(self, name): """Search the DTD for the description of this element """ ret = libxml2mod.xmlGetDtdElementDesc(self._o, name) if ret is None:raise treeError('xmlGetDtdElementDesc() failed') __tmp = xmlElement(_obj=ret) return __tmp
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L5751-L5756
reading element from html - <td>
python
def get_content_html(request): """Retrieve content as HTML using the ident-hash (uuid@version).""" result = _get_content_json() media_type = result['mediaType'] if media_type == COLLECTION_MIMETYPE: content = tree_to_html(result['tree']) else: content = result['content'] resp = request.response resp.body = content resp.status = "200 OK" resp.content_type = 'application/xhtml+xml' return result, resp
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/content.py#L151-L165
reading element from html - <td>
python
def element_for_url(cls, url): """Return the resource at the given URL, as a (`http_client.HTTPResponse`, `xml.etree.ElementTree.Element`) tuple resulting from a ``GET`` request to that URL.""" response = cls.http_request(url) if response.status != 200: cls.raise_http_error(response) assert response.getheader('Content-Type').startswith('application/xml') response_xml = response.read() logging.getLogger('recurly.http.response').debug(response_xml) response_doc = ElementTree.fromstring(response_xml) return response, response_doc
https://github.com/recurly/recurly-client-python/blob/682217c4e85ec5c8d4e41519ee0620d2dc4d84d7/recurly/resource.py#L372-L386
reading element from html - <td>
python
def text(self, force_get=False): """ Get the text of the element @rtype: str @return: Text of the element """ def text_element(): """ Wrapper to get text of element """ return self.element.text def force_text_element(): """Get text by javascript""" return self.driver_wrapper.js_executor.execute_template_and_return_result( 'getElementText.js', {}, self.element ) if force_get: return self.execute_and_handle_webelement_exceptions(force_text_element, 'get text by javascript') else: return self.execute_and_handle_webelement_exceptions(text_element, 'get text')
https://github.com/Shapeways/coyote_framework/blob/cb29899b984a21d56bf65d0b1d907073948fe16c/coyote_framework/webdriver/webdriverwrapper/WebElementWrapper.py#L490-L512
reading element from html - <td>
python
def readElement(self): """ Reads an AMF3 element from the data stream. @raise DecodeError: The ActionScript type is unsupported. @raise EOStream: No more data left to decode. """ pos = self.stream.tell() try: t = self.stream.read(1) except IOError: raise pyamf.EOStream try: func = self._func_cache[t] except KeyError: func = self.getTypeFunc(t) if not func: raise pyamf.DecodeError("Unsupported ActionScript type %s" % ( hex(ord(t)),)) self._func_cache[t] = func try: return func() except IOError: self.stream.seek(pos) raise
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/codec.py#L312-L342
reading element from html - <td>
python
def wait_until_element_not_contain_text(self, element, text, timeout=None): """Search element and wait until it does not contain the expected text :param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found :param text: text expected to be contained into the element :param timeout: max time to wait :returns: the web element if it does not contain the given text :rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement :raises TimeoutException: If the element contains the expected text after the timeout """ return self._wait_until(self._expected_condition_find_element_not_containing_text, (element, text), timeout)
https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/utils.py#L393-L403
reading element from html - <td>
python
def to_html(self, css_class=''): """ Returns the parameter as a dt/dd pair. """ if self.name and self.type: header_text = '%s (%s)' % (self.name, self.type) elif self.type: header_text = self.type else: header_text = self.name return '<dt>%s</dt><dd>%s</dd>' % (header_text, self.doc)
https://github.com/mdiener/grace/blob/2dab13a2cf636da5da989904c5885166fc94d36d/grace/py27/pyjsdoc.py#L1324-L1334
reading element from html - <td>
python
def read_var_header(fd, endian): """Read full header tag. Return a dict with the parsed header, the file position of next tag, a file like object for reading the uncompressed element data. """ mtpn, num_bytes = unpack(endian, 'II', fd.read(8)) next_pos = fd.tell() + num_bytes if mtpn == etypes['miCOMPRESSED']['n']: # read compressed data data = fd.read(num_bytes) dcor = zlib.decompressobj() # from here, read of the decompressed data fd_var = BytesIO(dcor.decompress(data)) del data fd = fd_var # Check the stream is not so broken as to leave cruft behind if dcor.flush() != b'': raise ParseError('Error in compressed data.') # read full tag from the uncompressed data mtpn, num_bytes = unpack(endian, 'II', fd.read(8)) if mtpn != etypes['miMATRIX']['n']: raise ParseError('Expecting miMATRIX type number {}, ' 'got {}'.format(etypes['miMATRIX']['n'], mtpn)) # read the header header = read_header(fd, endian) return header, next_pos, fd
https://github.com/nephics/mat4py/blob/6c1a2ad903937437cc5f24f3c3f5aa2c5a77a1c1/mat4py/loadmat.py#L225-L253
reading element from html - <td>
python
def content_to_html(content, article_id): """Returns artilce/page content as HTML""" def render_node(html, node, index): """Renders node as HTML""" if node['type'] == 'paragraph': return html + '<p>%s</p>' % node['data'] else: if node['type'] == 'ad': id = 'div-gpt-ad-1443288719995-' + str(10 + index) + '-' + str(article_id) dfp_type = 'Intra_Article_' + str(index + 1) size = 'banner' if node['data'] == 'mobile': size = 'box' newString = '<div class="o-article-embed__advertisement"><div class="o-advertisement o-advertisment--banner o-advertisement--center"><div class="adslot" id="' + id + '" data-size="' + size + '" data-dfp="' + dfp_type + '"></div></div></div>' return html + '<div class="o-article-embed o-article-embed--advertisement">%s</div>\n' % newString try: if node['type'] == 'poll': node['type'] = 'widget' node['data']['data'] = node['data'] return html + embeds.render(node['type'], node['data']) except EmbedException: return html html = '' index = 0 for node in content: html = render_node(html, node, index) if (node['type'] == 'ad'): index += 1 # return mark_safe(reduce(render_node, content, '')) return mark_safe(html)
https://github.com/ubyssey/dispatch/blob/8da6084fe61726f20e9cf675190480cfc45ee764/dispatch/modules/content/render.py#L5-L38
reading element from html - <td>
python
def content(self, name, attrs=None, characters=None): """Writes an element, some content for the element, and then closes the element, all without indentation. :name: the name of the element :attrs: a dict of attributes :characters: the characters to write """ with self.no_inner_space(outer=True): with self.element(name, attrs): if characters: self.characters(characters)
https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/xml/stream_writer.py#L170-L181
reading element from html - <td>
python
def div( txt, *args, **kwargs ): """ Create & return an HTML <div> element by wrapping the passed text buffer. @param txt (basestring): the text buffer to use @param *args (list): if present, \c txt is considered a Python format string, and the arguments are formatted into it @param kwargs (dict): the \c css field can contain the CSS class for the <div> element """ if args: txt = txt.format( *args ) css = kwargs.get('css',HTML_DIV_CLASS) return u'<div class="{}">{!s}</div>'.format( css, txt )
https://github.com/paulovn/sparql-kernel/blob/1d2d155ff5da72070cb2a98fae33ea8113fac782/sparqlkernel/utils.py#L52-L64
reading element from html - <td>
python
def getHTML(self): ''' getHTML - Get the full HTML as contained within this tree. If parsed from a document, this will contain the original whitespacing. @returns - <str> of html @see getFormattedHTML @see getMiniHTML ''' root = self.getRoot() if root is None: raise ValueError('Did not parse anything. Use parseFile or parseStr') if self.doctype: doctypeStr = '<!%s>\n' %(self.doctype) else: doctypeStr = '' # 6.6.0: If we have a real root tag, print the outerHTML. If we have a fake root tag (for multiple root condition), # then print the innerHTML (skipping the outer root tag). Otherwise, we will miss # untagged text (between the multiple root nodes). rootNode = self.getRoot() if rootNode.tagName == INVISIBLE_ROOT_TAG: return doctypeStr + rootNode.innerHTML else: return doctypeStr + rootNode.outerHTML
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Parser.py#L768-L796
reading element from html - <td>
python
def _write_html(self, filename): """Read the html site with the given filename from the data directory and write it to :data:`RedirectHandler.wfile`. :param filename: the filename to read :type filename: :class:`str` :returns: None :rtype: None :raises: None """ datapath = os.path.join('html', filename) sitepath = pkg_resources.resource_filename('pytwitcherapi', datapath) with open(sitepath, 'r') as f: html = f.read() self.wfile.write(html.encode('utf-8'))
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/oauth.py#L72-L86
reading element from html - <td>
python
def render(self, **kwargs): """Renders the HTML representation of the element.""" figure = self.get_root() assert isinstance(figure, Figure), ('You cannot render this Element ' 'if it is not in a Figure.') header = self._template.module.__dict__.get('header', None) if header is not None: figure.header.add_child(Element(header(self, kwargs)), name=self.get_name()) html = self._template.module.__dict__.get('html', None) if html is not None: figure.html.add_child(Element(html(self, kwargs)), name=self.get_name()) script = self._template.module.__dict__.get('script', None) if script is not None: figure.script.add_child(Element(script(self, kwargs)), name=self.get_name()) for name, element in self._children.items(): element.render(**kwargs)
https://github.com/python-visualization/branca/blob/4e89e88a5a7ff3586f0852249c2c125f72316da8/branca/element.py#L611-L633
reading element from html - <td>
python
def _parse(self, html): """Parse given string as HTML and return it's etree representation.""" if self._has_body_re.search(html): tree = lxml.html.document_fromstring(html).find('.//body') self.has_body = True else: tree = lxml.html.fragment_fromstring(html, create_parent=self._root_tag) if tree.tag != self._root_tag: # ensure the root element exists even if not really needed, # so the tree has always the same structure root = lxml.html.HtmlElement() root.tag = self._root_tag root.append(tree) return root return tree
https://github.com/honzajavorek/tipi/blob/cbe51192725608b6fba1244a48610ae231b13e08/tipi/html.py#L133-L149
reading element from html - <td>
python
def html(self, unicode=False): """ Return HTML of element """ html = lxml.html.tostring(self.element, encoding=self.encoding) if unicode: html = html.decode(self.encoding) return html
https://github.com/romankoblov/leaf/blob/e042d91ec462c834318d03f199fcc4a9f565cb84/leaf/__init__.py#L36-L41
reading element from html - <td>
python
def to_html(self): """Render as html Args: None Returns: Str the html representation Raises: Errors are propagated """ text = self.text if text is None: text = self.uri return '<a href="%s"%s>%s</a>' % ( self.uri, self.html_attributes(), text)
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/messaging/item/link.py#L50-L67
reading element from html - <td>
python
def dehtml(text): '''Remove HTML tag in input text and format the texts accordingly. ''' # added by BoPeng to handle html output from kernel # # Do not understand why, but I cannot define the class outside of the # function. try: # python 2 from HTMLParser import HTMLParser except ImportError: # python 3 from html.parser import HTMLParser # added by BoPeng to handle html output from kernel class _DeHTMLParser(HTMLParser): '''This parser analyzes input text, removes HTML tags such as <p>, <br>, <ul>, <li> etc and returns properly formatted texts. ''' def __init__(self): HTMLParser.__init__(self) self.__text = [] def handle_data(self, data): text = data.strip() if len(text) > 0: text = re.sub('[ \t\r\n]+', ' ', text) self.__text.append(text + ' ') def handle_starttag(self, tag, attrs): if tag == 'p': self.__text.append('\n\n\n\n') elif tag == 'br': self.__text.append('\n\n') elif tag == 'ul': self.__text.append('') elif tag == 'li': self.__text.append('\n\n * ') def handle_endtag(self, tag): if tag == 'ul': self.__text.append('\n\n') if tag == 'li': self.__text.append('\n\n') def handle_startendtag(self, tag, attrs): if tag == 'br': self.__text.append('\n\n') def text(self): return ''.join(self.__text).strip() try: parser = _DeHTMLParser() parser.feed(text) parser.close() return parser.text() except Exception as e: return text
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/misc/vim-ipython/vim_ipython.py#L356-L413
reading element from html - <td>
python
def parse(self, text, maxwidth=None, maxheight=None, template_dir=None, context=None, urlize_all_links=CONSUMER_URLIZE_ALL): """ Scans a block of text, replacing anything matching a provider pattern with an OEmbed html snippet, if possible. Templates should be stored at oembed/{format}.html, so for example: oembed/video.html An optional template_dir can be provided, allowing for oembed/[template_dir]/video.html These templates are passed a context variable, ``response``, which is an OEmbedResource, as well as the ``original_url`` """ context = context or Context() context['maxwidth'] = maxwidth context['maxheight'] = maxheight try: text = unicode(text) except UnicodeDecodeError: text = unicode(text.decode('utf-8')) return self.parse_data(text, maxwidth, maxheight, template_dir, context, urlize_all_links)
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/parsers/base.py#L47-L74
reading element from html - <td>
python
def to_html(self): """Render as html. """ uri = resource_url( resources_path('img', 'logos', 'inasafe-logo-white.png')) snippet = ( '<div class="branding">' '<img src="%s" title="%s" alt="%s" %s/></div>') % ( uri, 'InaSAFE', 'InaSAFE', self.html_attributes()) return snippet
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/messaging/item/brand.py#L52-L64
reading element from html - <td>
python
def stripHtml(html, joiner=''): """ Strips out the HTML tags from the inputted text, returning the basic text. This algorightm was found on [http://stackoverflow.com/questions/753052/strip-html-from-strings-in-python StackOverflow]. :param html | <str> :return <str> """ stripper = HTMLStripper() stripper.feed(html.replace('<br>', '\n').replace('<br/>', '\n')) return stripper.text(joiner)
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/text.py#L692-L704
reading element from html - <td>
python
def html_table(data, header=True, limit=None, withtype=False): """ Return a double iterable as an HTML table @param data (iterable): the data to format @param header (bool): if the first row is a header row @param limit (int): maximum number of rows to render (excluding header) @param withtype (bool): if columns are to have an alternating CSS class (even/odd) or not. @return (int,string): a pair <number-of-rendered-rows>, <html-table> """ if header and limit: limit += 1 ct = 'th' if header else 'td' rc = 'hdr' if header else 'odd' # import codecs # import datetime # with codecs.open( '/tmp/dump', 'w', encoding='utf-8') as f: # print( '************', datetime.datetime.now(), file=f ) # for n, row in enumerate(data): # print( '-------', n, file=f ) # for n, c in enumerate(row): # print( type(c), repr(c), file=f ) html = u'<table>' rn = -1 for rn, row in enumerate(data): html += u'<tr class={}>'.format(rc) html += '\n'.join((html_elem(c, ct, withtype) for c in row)) html += u'</tr>' rc = 'even' if rc == 'odd' else 'odd' ct = 'td' if limit: limit -= 1 if not limit: break return (0, '') if rn < 0 else (rn+1-header, html+u'</table>')
https://github.com/paulovn/sparql-kernel/blob/1d2d155ff5da72070cb2a98fae33ea8113fac782/sparqlkernel/connection.py#L105-L141
reading element from html - <td>
python
def htmlNewDocNoDtD(URI, ExternalID): """Creates a new HTML document without a DTD node if @URI and @ExternalID are None """ ret = libxml2mod.htmlNewDocNoDtD(URI, ExternalID) if ret is None:raise treeError('htmlNewDocNoDtD() failed') return xmlDoc(_obj=ret)
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L867-L872
reading element from html - <td>
python
def html(self): """ str: HTML representation of the page Note: Not settable Warning: This can be slow for very large pages """ if self._html is False: self._html = None query_params = { "prop": "revisions", "rvprop": "content", "rvlimit": 1, "rvparse": "", "titles": self.title, } request = self.mediawiki.wiki_request(query_params) page = request["query"]["pages"][self.pageid] self._html = page["revisions"][0]["*"] return self._html
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawikipage.py#L181-L200
reading element from html - <td>
python
def get_as_html(self) -> str: """ Returns the table object as an HTML string. :return: HTML representation of the table. """ table_string = self._get_pretty_table().get_html_string() title = ('{:^' + str(len(table_string.splitlines()[0])) + '}').format(self.title) return f'<center><h1>{title}</h1></center>{table_string}'
https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/table.py#L131-L139
reading element from html - <td>
python
def htmldiff(old_html, new_html): ## FIXME: this should take parsed documents too, and use their body ## or other content. """ Do a diff of the old and new document. The documents are HTML *fragments* (str/UTF8 or unicode), they are not complete documents (i.e., no <html> tag). Returns HTML with <ins> and <del> tags added around the appropriate text. Markup is generally ignored, with the markup from new_html preserved, and possibly some markup from old_html (though it is considered acceptable to lose some of the old markup). Only the words in the HTML are diffed. The exception is <img> tags, which are treated like words, and the href attribute of <a> tags, which are noted inside the tag itself when there are changes. """ old_html_tokens = tokenize(old_html) new_html_tokens = tokenize(new_html) result = htmldiff_tokens(old_html_tokens, new_html_tokens) result = ''.join(result).strip() return fixup_ins_del_tags(result)
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/diff.py#L154-L175
reading element from html - <td>
python
def html(self) -> str: """Get whole html representation of this node.""" if self._inner_element: return self.start_tag + self._inner_element.html + self.end_tag return super().html
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/tag.py#L148-L152
reading element from html - <td>
python
def textFromHTML(html): """ Cleans and parses text from the given HTML. """ cleaner = lxml.html.clean.Cleaner(scripts=True) cleaned = cleaner.clean_html(html) return lxml.html.fromstring(cleaned).text_content()
https://github.com/lvh/txeasymail/blob/7b845a5238b1371824854468646d54653a426f09/txeasymail/html.py#L27-L33
reading element from html - <td>
python
def _read_etc(etc_file): """Return information about table of content for each erd. """ etc_type = dtype([('offset', '<i'), ('samplestamp', '<i'), ('sample_num', '<i'), ('sample_span', '<h'), ('unknown', '<h')]) with etc_file.open('rb') as f: f.seek(352) # end of header etc = fromfile(f, dtype=etc_type) return etc
https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/ioeeg/ktlx.py#L610-L623
reading element from html - <td>
python
def parse_html(html): """ Create an lxml.html.HtmlElement from a string with html. XXX: mostly copy-pasted from parsel.selector.create_root_node """ body = html.strip().replace('\x00', '').encode('utf8') or b'<html/>' parser = lxml.html.HTMLParser(recover=True, encoding='utf8') root = lxml.etree.fromstring(body, parser=parser) if root is None: root = lxml.etree.fromstring(b'<html/>', parser=parser) return root
https://github.com/TeamHG-Memex/html-text/blob/871d4dbe9f4f99e5f041110c60458adcaae6fab4/html_text/html_text.py#L45-L54
reading element from html - <td>
python
def _get_page_content(self, response): """Given a :class:`requests.Response`, return the :class:`xml.etree.Element` of the content `div`. :param response: a :class:`requests.Response` to parse :returns: the :class:`Element` of the first content `div` or `None` """ document = html5lib.parse( response.content, encoding=response.encoding, treebuilder='etree', namespaceHTMLElements=False ) # etree doesn't fully support XPath, so we can't just search # the attribute values for "content" divs = document.findall( ".//body//div[@class]") content_div = None for div in divs: if "content" in div.attrib['class'].split(' '): content_div = div break # The `Element` object is False-y when there are no subelements, # so compare to `None` if content_div is None: return None return content_div
https://github.com/kennydo/nyaalib/blob/ab787b7ba141ed53d2ad978bf13eb7b8bcdd4b0d/nyaalib/__init__.py#L38-L65
reading element from html - <td>
python
def render(self, doc): """Render all elements using specified document. @param doc: the writable document to render to. @type doc: document.IWritableDocument @return: a deferred fired with the specified document when the rendering is done. @rtype: defer.Deferred """ d = defer.succeed(doc) for element in self._elements: d.addCallback(element.render) return d
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/web/markup/base.py#L436-L447
reading element from html - <td>
python
def url_read_text(url, verbose=True): r""" Directly reads text data from url """ data = url_read(url, verbose) text = data.decode('utf8') return text
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_grabdata.py#L420-L426
reading element from html - <td>
python
def element(self, tag_path, test=None, **attributes): """given a tag in xpath form and optional attributes, find the element in self.root or return a new one.""" xpath = tag_path tests = ["@%s='%s'" % (k, attributes[k]) for k in attributes] if test is not None: tests.insert(0, test) if len(tests) > 0: xpath += "[%s]" % ' and '.join(tests) e = self.find(self.root, xpath) if e is None: tag = tag_path.split('/')[-1].split('[')[0] tagname = tag.split(':')[-1] if ':' in tag: nstag = tag.split(':')[0] tag = "{%s}%s" % (self.NS[nstag], tagname) e = etree.Element(tag, **attributes) return e
https://github.com/BlackEarth/bxml/blob/8fbea5dad7fadc7b854ddbeff6ecfb55aaceeb77/bxml/xml.py#L243-L259
reading element from html - <td>
python
def text(self): """Renders the contents inside this element, without html tags.""" texts = [] for child in self.childs: if isinstance(child, Tag): texts.append(child.text()) elif isinstance(child, Content): texts.append(child.render()) else: texts.append(child) return " ".join(texts)
https://github.com/Hrabal/TemPy/blob/7d229b73e2ce3ccbb8254deae05c1f758f626ed6/tempy/elements.py#L195-L205
reading element from html - <td>
python
def _visible(self, element): """Used to filter text elements that have invisible text on the page. """ if element.name in self._disallowed_names: return False elif re.match(u'<!--.*-->', six.text_type(element.extract())): return False return True
https://github.com/deanmalmgren/textract/blob/117ea191d93d80321e4bf01f23cc1ac54d69a075/textract/parsers/html_parser.py#L27-L34
reading element from html - <td>
python
def _read_template(template): """ Read XSLT template. Args: template (str): Filename or XML string. Don't use ``\\n`` in case of filename. Returns: obj: Required XML parsed with ``lxml.etree``. """ template = _read_content_or_path(template) file_obj = StringIO.StringIO(template) return ET.parse(file_obj)
https://github.com/edeposit/marcxml2mods/blob/7b44157e859b4d2a372f79598ddbf77e43d39812/src/marcxml2mods/xslt_transformer.py#L130-L144
reading element from html - <td>
python
def meta_redirect(content): """ Returns redirecting URL if there is a HTML refresh meta tag, returns None otherwise :param content: HTML content """ decoded = content.decode("utf-8", errors="replace") try: soup = BeautifulSoup.BeautifulSoup(decoded) except Exception as e: return None result = soup.find("meta", attrs={"http-equiv": re.compile("^refresh$", re.I)}) if result: try: wait, text = result["content"].split(";") text = text.strip() if text.lower().startswith("url="): url = text[4:] return url except: # there are normal meta tag with refresh that are not # redirect and don't have a URL in it pass return None
https://github.com/iclab/centinel/blob/9a25dcf30c6a1db3c046f7ccb8ab8873e455c1a4/centinel/primitives/http.py#L17-L43
reading element from html - <td>
python
def convert_to_html(self, file, filename=None, file_content_type=None, model=None, **kwargs): """ Convert document to HTML. Converts a document to HTML. :param file file: The document to convert. :param str filename: The filename for file. :param str file_content_type: The content type of file. :param str model: The analysis model to be used by the service. For the **Element classification** and **Compare two documents** methods, the default is `contracts`. For the **Extract tables** method, the default is `tables`. These defaults apply to the standalone methods as well as to the methods' use in batch-processing requests. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if file is None: raise ValueError('file must be provided') headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('compare-comply', 'V1', 'convert_to_html') headers.update(sdk_headers) params = {'version': self.version, 'model': model} form_data = {} if not filename and hasattr(file, 'name'): filename = basename(file.name) if not filename: raise ValueError('filename must be provided') form_data['file'] = (filename, file, file_content_type or 'application/octet-stream') url = '/v1/html_conversion' response = self.request( method='POST', url=url, headers=headers, params=params, files=form_data, accept_json=True) return response
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/compare_comply_v1.py#L93-L144
reading element from html - <td>
python
def dehtml(text): '''Remove HTML tag in input text and format the texts accordingly. ''' try: parser = _DeHTMLParser() parser.feed(text) parser.close() return parser.text() except Exception as e: env.logger.warning(f'Failed to dehtml text: {e}') return text
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/utils.py#L598-L608
reading element from html - <td>
python
def read_csv_to_html_table(csvFile, hasHeader='N'): """ reads a CSV file and converts it to HTML """ txt = '<table class="as-table as-table-zebra as-table-horizontal">' with open(csvFile, "r") as f: # numRows = 1 for row in f: if hasHeader == 'Y': if numRows == 1: td_begin = '<TH>' td_end = '</TH>' else: td_begin = '<TD>' td_end = '</TD>' else: td_begin = '<TD>' td_end = '</TD>' cols = row.split(',') numRows += 1 txt += "<TR>" for col in cols: txt += td_begin try: colString = col except Exception: colString = '<font color=red>Error decoding column data</font>' txt += colString.strip('"') txt += td_end txt += "</TR>\n" txt += "</TABLE>\n\n" return txt
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/web_app/web_utils.py#L170-L202
reading element from html - <td>
python
def get_html(self) -> str: """Return complete report as a HTML string.""" data = self.getdoc() num_checks = 0 body_elements = [] # Order by section first... for section in data["sections"]: section_name = html.escape(section["key"][0]) section_stati_of_note = ( e for e in section["result"].elements() if e != "PASS" ) section_stati = "".join( EMOTICON[s] for s in sorted(section_stati_of_note, key=LOGLEVELS.index) ) body_elements.append(f"<h2>{section_name} {section_stati}</h2>") checks_by_id: Dict[str, List[Dict[str, str]]] = collections.defaultdict( list ) # ...and check second. for cluster in section["checks"]: if not isinstance(cluster, list): cluster = [cluster] num_checks += len(cluster) for check in cluster: checks_by_id[check["key"][1]].append(check) for check, results in checks_by_id.items(): check_name = html.escape(check) body_elements.append(f"<h3>{results[0]['description']}</h3>") body_elements.append(f"<div>Check ID: {check_name}</div>") for result in results: if "filename" in result: body_elements.append( html5_collapsible( f"{EMOTICON[result['result']]} <strong>{result['filename']}</strong>", self.html_for_check(result), ) ) else: body_elements.append( html5_collapsible( f"{EMOTICON[result['result']]} <strong>Family check</strong>", self.html_for_check(result), ) ) body_top = [ "<h1>Fontbakery Technical Report</h1>", "<div>If you think a check is flawed or have an idea for a check, please " f" file an issue at <a href='{ISSUE_URL}'>{ISSUE_URL}</a> and remember " "to include a pointer to the repo and branch you're checking.</div>", ] if num_checks: results_summary = [data["result"][k] for k in LOGLEVELS] body_top.append(summary_table(*results_summary, num_checks)) omitted = [l for l in LOGLEVELS if self.omit_loglevel(l)] if omitted: body_top.append( "<p><strong>Note:</strong>" " The following loglevels were omitted in this report:" f" {', '.join(omitted)}</p>" ) body_elements[0:0] = body_top return html5_document(body_elements)
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/reporters/html.py#L29-L96
reading element from html - <td>
python
def render_text(self, request, instance, context): """ Custom rendering function for HTML output """ render_template = self.get_render_template(request, instance, email_format='text') if not render_template: # If there is no TEXT variation, create it by removing the HTML tags. base_url = request.build_absolute_uri('/') html = self.render_html(request, instance, context) return html_to_text(html, base_url) instance_context = self.get_context(request, instance, email_format='text', parent_context=context) instance_context['email_format'] = 'text' text = self.render_to_string(request, render_template, instance_context) text = text + "" # Avoid being a safestring if self.render_replace_context_fields: text = replace_fields(text, instance_context, autoescape=False) return text
https://github.com/django-fluent/fluentcms-emailtemplates/blob/29f032dab9f60d05db852d2a1adcbd16e18017d1/fluentcms_emailtemplates/extensions.py#L87-L105
reading element from html - <td>
python
def read_links(self, file, encoding=None): '''Return an iterator of links found in the document. Args: file: A file object containing the document. encoding (str): The encoding of the document. Returns: iterable: str ''' return [item[0] for item in self.iter_text(file, encoding) if item[1]]
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/document/javascript.py#L68-L78