file_name
int64 0
72.3k
| vulnerable_line_numbers
stringlengths 1
1.06k
β | dataset_type
stringclasses 1
value | commit_hash
stringlengths 40
44
| unique_id
int64 0
271k
| project
stringclasses 10
values | target
int64 0
1
| repo_url
stringclasses 10
values | date
stringlengths 25
25
β | code
stringlengths 0
20.4M
| CVE
stringlengths 13
43
β | CWE
stringclasses 50
values | commit_link
stringlengths 73
97
β | severity
stringclasses 4
values | __index_level_0__
int64 0
124k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
35,593 | null | train_val | 796a0e014bc3985709c0a35538d606ef1da31e1b | 35,593 | Chrome | 0 | https://github.com/chromium/chromium | 2018-04-07 23:43:03+00:00 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/core/editing/markers/active_suggestion_marker_list_impl.h"
#include "third_party/blink/renderer/core/editing/markers/active_suggestion_marker.h"
#include "third_party/blink/renderer/core/editing/testing/editing_test_base.h"
namespace blink {
class ActiveSuggestionMarkerListImplTest : public EditingTestBase {
protected:
ActiveSuggestionMarkerListImplTest()
: marker_list_(new ActiveSuggestionMarkerListImpl()) {}
DocumentMarker* CreateMarker(unsigned start_offset, unsigned end_offset) {
return new ActiveSuggestionMarker(
start_offset, end_offset, Color::kTransparent,
ui::mojom::ImeTextSpanThickness::kThin, Color::kBlack);
}
Persistent<ActiveSuggestionMarkerListImpl> marker_list_;
};
// ActiveSuggestionMarkerListImpl shouldn't merge markers with touching
// endpoints
TEST_F(ActiveSuggestionMarkerListImplTest, Add) {
EXPECT_EQ(0u, marker_list_->GetMarkers().size());
marker_list_->Add(CreateMarker(0, 1));
marker_list_->Add(CreateMarker(1, 2));
EXPECT_EQ(2u, marker_list_->GetMarkers().size());
EXPECT_EQ(0u, marker_list_->GetMarkers()[0]->StartOffset());
EXPECT_EQ(1u, marker_list_->GetMarkers()[0]->EndOffset());
EXPECT_EQ(1u, marker_list_->GetMarkers()[1]->StartOffset());
EXPECT_EQ(2u, marker_list_->GetMarkers()[1]->EndOffset());
}
} // namespace blink
| null | null | null | null | 32,456 |
36,363 | null | train_val | e4311ee51d1e2676001b2d8fcefd92bdd79aad85 | 201,358 | linux | 0 | https://github.com/torvalds/linux | 2017-05-12 08:32:58+10:00 | /*
* RackMac vu-meter driver
*
* (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* Released under the term of the GNU GPL v2.
*
* Support the CPU-meter LEDs of the Xserve G5
*
* TODO: Implement PWM to do variable intensity and provide userland
* interface for fun. Also, the CPU-meter could be made nicer by being
* a bit less "immediate" but giving instead a more average load over
* time. Patches welcome :-)
*
*/
#undef DEBUG
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/kernel_stat.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/dbdma.h>
#include <asm/macio.h>
#include <asm/keylargo.h>
/* Number of samples in a sample buffer */
#define SAMPLE_COUNT 256
/* CPU meter sampling rate in ms */
#define CPU_SAMPLING_RATE 250
struct rackmeter_dma {
struct dbdma_cmd cmd[4] ____cacheline_aligned;
u32 mark ____cacheline_aligned;
u32 buf1[SAMPLE_COUNT] ____cacheline_aligned;
u32 buf2[SAMPLE_COUNT] ____cacheline_aligned;
} ____cacheline_aligned;
struct rackmeter_cpu {
struct delayed_work sniffer;
struct rackmeter *rm;
u64 prev_wall;
u64 prev_idle;
int zero;
} ____cacheline_aligned;
struct rackmeter {
struct macio_dev *mdev;
unsigned int irq;
struct device_node *i2s;
u8 *ubuf;
struct dbdma_regs __iomem *dma_regs;
void __iomem *i2s_regs;
dma_addr_t dma_buf_p;
struct rackmeter_dma *dma_buf_v;
int stale_irq;
struct rackmeter_cpu cpu[2];
int paused;
struct mutex sem;
};
/* To be set as a tunable */
static int rackmeter_ignore_nice;
/* This GPIO is whacked by the OS X driver when initializing */
#define RACKMETER_MAGIC_GPIO 0x78
/* This is copied from cpufreq_ondemand, maybe we should put it in
* a common header somewhere
*/
static inline u64 get_cpu_idle_time(unsigned int cpu)
{
u64 retval;
retval = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE] +
kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
if (rackmeter_ignore_nice)
retval += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
return retval;
}
static void rackmeter_setup_i2s(struct rackmeter *rm)
{
struct macio_chip *macio = rm->mdev->bus->chip;
/* First whack magic GPIO */
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, RACKMETER_MAGIC_GPIO, 5);
/* Call feature code to enable the sound channel and the proper
* clock sources
*/
pmac_call_feature(PMAC_FTR_SOUND_CHIP_ENABLE, rm->i2s, 0, 1);
/* Power i2s and stop i2s clock. We whack MacIO FCRs directly for now.
* This is a bit racy, thus we should add new platform functions to
* handle that. snd-aoa needs that too
*/
MACIO_BIS(KEYLARGO_FCR1, KL1_I2S0_ENABLE);
MACIO_BIC(KEYLARGO_FCR1, KL1_I2S0_CLK_ENABLE_BIT);
(void)MACIO_IN32(KEYLARGO_FCR1);
udelay(10);
/* Then setup i2s. For now, we use the same magic value that
* the OS X driver seems to use. We might want to play around
* with the clock divisors later
*/
out_le32(rm->i2s_regs + 0x10, 0x01fa0000);
(void)in_le32(rm->i2s_regs + 0x10);
udelay(10);
/* Fully restart i2s*/
MACIO_BIS(KEYLARGO_FCR1, KL1_I2S0_CELL_ENABLE |
KL1_I2S0_CLK_ENABLE_BIT);
(void)MACIO_IN32(KEYLARGO_FCR1);
udelay(10);
}
static void rackmeter_set_default_pattern(struct rackmeter *rm)
{
int i;
for (i = 0; i < 16; i++) {
if (i < 8)
rm->ubuf[i] = (i & 1) * 255;
else
rm->ubuf[i] = ((~i) & 1) * 255;
}
}
static void rackmeter_do_pause(struct rackmeter *rm, int pause)
{
struct rackmeter_dma *rdma = rm->dma_buf_v;
pr_debug("rackmeter: %s\n", pause ? "paused" : "started");
rm->paused = pause;
if (pause) {
DBDMA_DO_STOP(rm->dma_regs);
return;
}
memset(rdma->buf1, 0, ARRAY_SIZE(rdma->buf1));
memset(rdma->buf2, 0, ARRAY_SIZE(rdma->buf2));
rm->dma_buf_v->mark = 0;
mb();
out_le32(&rm->dma_regs->cmdptr_hi, 0);
out_le32(&rm->dma_regs->cmdptr, rm->dma_buf_p);
out_le32(&rm->dma_regs->control, (RUN << 16) | RUN);
}
static void rackmeter_setup_dbdma(struct rackmeter *rm)
{
struct rackmeter_dma *db = rm->dma_buf_v;
struct dbdma_cmd *cmd = db->cmd;
/* Make sure dbdma is reset */
DBDMA_DO_RESET(rm->dma_regs);
pr_debug("rackmeter: mark offset=0x%zx\n",
offsetof(struct rackmeter_dma, mark));
pr_debug("rackmeter: buf1 offset=0x%zx\n",
offsetof(struct rackmeter_dma, buf1));
pr_debug("rackmeter: buf2 offset=0x%zx\n",
offsetof(struct rackmeter_dma, buf2));
/* Prepare 4 dbdma commands for the 2 buffers */
memset(cmd, 0, 4 * sizeof(struct dbdma_cmd));
cmd->req_count = cpu_to_le16(4);
cmd->command = cpu_to_le16(STORE_WORD | INTR_ALWAYS | KEY_SYSTEM);
cmd->phy_addr = cpu_to_le32(rm->dma_buf_p +
offsetof(struct rackmeter_dma, mark));
cmd->cmd_dep = cpu_to_le32(0x02000000);
cmd++;
cmd->req_count = cpu_to_le16(SAMPLE_COUNT * 4);
cmd->command = cpu_to_le16(OUTPUT_MORE);
cmd->phy_addr = cpu_to_le32(rm->dma_buf_p +
offsetof(struct rackmeter_dma, buf1));
cmd++;
cmd->req_count = cpu_to_le16(4);
cmd->command = cpu_to_le16(STORE_WORD | INTR_ALWAYS | KEY_SYSTEM);
cmd->phy_addr = cpu_to_le32(rm->dma_buf_p +
offsetof(struct rackmeter_dma, mark));
cmd->cmd_dep = cpu_to_le32(0x01000000);
cmd++;
cmd->req_count = cpu_to_le16(SAMPLE_COUNT * 4);
cmd->command = cpu_to_le16(OUTPUT_MORE | BR_ALWAYS);
cmd->phy_addr = cpu_to_le32(rm->dma_buf_p +
offsetof(struct rackmeter_dma, buf2));
cmd->cmd_dep = cpu_to_le32(rm->dma_buf_p);
rackmeter_do_pause(rm, 0);
}
static void rackmeter_do_timer(struct work_struct *work)
{
struct rackmeter_cpu *rcpu =
container_of(work, struct rackmeter_cpu, sniffer.work);
struct rackmeter *rm = rcpu->rm;
unsigned int cpu = smp_processor_id();
u64 cur_nsecs, total_idle_nsecs;
u64 total_nsecs, idle_nsecs;
int i, offset, load, cumm, pause;
cur_nsecs = jiffies64_to_nsecs(get_jiffies_64());
total_nsecs = cur_nsecs - rcpu->prev_wall;
rcpu->prev_wall = cur_nsecs;
total_idle_nsecs = get_cpu_idle_time(cpu);
idle_nsecs = total_idle_nsecs - rcpu->prev_idle;
idle_nsecs = min(idle_nsecs, total_nsecs);
rcpu->prev_idle = total_idle_nsecs;
/* We do a very dumb calculation to update the LEDs for now,
* we'll do better once we have actual PWM implemented
*/
load = div64_u64(9 * (total_nsecs - idle_nsecs), total_nsecs);
offset = cpu << 3;
cumm = 0;
for (i = 0; i < 8; i++) {
u8 ub = (load > i) ? 0xff : 0;
rm->ubuf[i + offset] = ub;
cumm |= ub;
}
rcpu->zero = (cumm == 0);
/* Now check if LEDs are all 0, we can stop DMA */
pause = (rm->cpu[0].zero && rm->cpu[1].zero);
if (pause != rm->paused) {
mutex_lock(&rm->sem);
pause = (rm->cpu[0].zero && rm->cpu[1].zero);
rackmeter_do_pause(rm, pause);
mutex_unlock(&rm->sem);
}
schedule_delayed_work_on(cpu, &rcpu->sniffer,
msecs_to_jiffies(CPU_SAMPLING_RATE));
}
static void rackmeter_init_cpu_sniffer(struct rackmeter *rm)
{
unsigned int cpu;
/* This driver works only with 1 or 2 CPUs numbered 0 and 1,
* but that's really all we have on Apple Xserve. It doesn't
* play very nice with CPU hotplug neither but we don't do that
* on those machines yet
*/
rm->cpu[0].rm = rm;
INIT_DELAYED_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer);
rm->cpu[1].rm = rm;
INIT_DELAYED_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer);
for_each_online_cpu(cpu) {
struct rackmeter_cpu *rcpu;
if (cpu > 1)
continue;
rcpu = &rm->cpu[cpu];
rcpu->prev_idle = get_cpu_idle_time(cpu);
rcpu->prev_wall = jiffies64_to_nsecs(get_jiffies_64());
schedule_delayed_work_on(cpu, &rm->cpu[cpu].sniffer,
msecs_to_jiffies(CPU_SAMPLING_RATE));
}
}
static void rackmeter_stop_cpu_sniffer(struct rackmeter *rm)
{
cancel_delayed_work_sync(&rm->cpu[0].sniffer);
cancel_delayed_work_sync(&rm->cpu[1].sniffer);
}
static int rackmeter_setup(struct rackmeter *rm)
{
pr_debug("rackmeter: setting up i2s..\n");
rackmeter_setup_i2s(rm);
pr_debug("rackmeter: setting up default pattern..\n");
rackmeter_set_default_pattern(rm);
pr_debug("rackmeter: setting up dbdma..\n");
rackmeter_setup_dbdma(rm);
pr_debug("rackmeter: start CPU measurements..\n");
rackmeter_init_cpu_sniffer(rm);
printk(KERN_INFO "RackMeter initialized\n");
return 0;
}
/* XXX FIXME: No PWM yet, this is 0/1 */
static u32 rackmeter_calc_sample(struct rackmeter *rm, unsigned int index)
{
int led;
u32 sample = 0;
for (led = 0; led < 16; led++) {
sample >>= 1;
sample |= ((rm->ubuf[led] >= 0x80) << 15);
}
return (sample << 17) | (sample >> 15);
}
static irqreturn_t rackmeter_irq(int irq, void *arg)
{
struct rackmeter *rm = arg;
struct rackmeter_dma *db = rm->dma_buf_v;
unsigned int mark, i;
u32 *buf;
/* Flush PCI buffers with an MMIO read. Maybe we could actually
* check the status one day ... in case things go wrong, though
* this never happened to me
*/
(void)in_le32(&rm->dma_regs->status);
/* Make sure the CPU gets us in order */
rmb();
/* Read mark */
mark = db->mark;
if (mark != 1 && mark != 2) {
printk(KERN_WARNING "rackmeter: Incorrect DMA mark 0x%08x\n",
mark);
/* We allow for 3 errors like that (stale DBDMA irqs) */
if (++rm->stale_irq > 3) {
printk(KERN_ERR "rackmeter: Too many errors,"
" stopping DMA\n");
DBDMA_DO_RESET(rm->dma_regs);
}
return IRQ_HANDLED;
}
/* Next buffer we need to fill is mark value */
buf = mark == 1 ? db->buf1 : db->buf2;
/* Fill it now. This routine converts the 8 bits depth sample array
* into the PWM bitmap for each LED.
*/
for (i = 0; i < SAMPLE_COUNT; i++)
buf[i] = rackmeter_calc_sample(rm, i);
return IRQ_HANDLED;
}
static int rackmeter_probe(struct macio_dev* mdev,
const struct of_device_id *match)
{
struct device_node *i2s = NULL, *np = NULL;
struct rackmeter *rm = NULL;
struct resource ri2s, rdma;
int rc = -ENODEV;
pr_debug("rackmeter_probe()\n");
/* Get i2s-a node */
while ((i2s = of_get_next_child(mdev->ofdev.dev.of_node, i2s)) != NULL)
if (strcmp(i2s->name, "i2s-a") == 0)
break;
if (i2s == NULL) {
pr_debug(" i2s-a child not found\n");
goto bail;
}
/* Get lightshow or virtual sound */
while ((np = of_get_next_child(i2s, np)) != NULL) {
if (strcmp(np->name, "lightshow") == 0)
break;
if ((strcmp(np->name, "sound") == 0) &&
of_get_property(np, "virtual", NULL) != NULL)
break;
}
if (np == NULL) {
pr_debug(" lightshow or sound+virtual child not found\n");
goto bail;
}
/* Create and initialize our instance data */
rm = kzalloc(sizeof(struct rackmeter), GFP_KERNEL);
if (rm == NULL) {
printk(KERN_ERR "rackmeter: failed to allocate memory !\n");
rc = -ENOMEM;
goto bail_release;
}
rm->mdev = mdev;
rm->i2s = i2s;
mutex_init(&rm->sem);
dev_set_drvdata(&mdev->ofdev.dev, rm);
/* Check resources availability. We need at least resource 0 and 1 */
#if 0 /* Use that when i2s-a is finally an mdev per-se */
if (macio_resource_count(mdev) < 2 || macio_irq_count(mdev) < 2) {
printk(KERN_ERR
"rackmeter: found match but lacks resources: %s"
" (%d resources, %d interrupts)\n",
mdev->ofdev.node->full_name);
rc = -ENXIO;
goto bail_free;
}
if (macio_request_resources(mdev, "rackmeter")) {
printk(KERN_ERR
"rackmeter: failed to request resources: %s\n",
mdev->ofdev.node->full_name);
rc = -EBUSY;
goto bail_free;
}
rm->irq = macio_irq(mdev, 1);
#else
rm->irq = irq_of_parse_and_map(i2s, 1);
if (!rm->irq ||
of_address_to_resource(i2s, 0, &ri2s) ||
of_address_to_resource(i2s, 1, &rdma)) {
printk(KERN_ERR
"rackmeter: found match but lacks resources: %s",
mdev->ofdev.dev.of_node->full_name);
rc = -ENXIO;
goto bail_free;
}
#endif
pr_debug(" i2s @0x%08x\n", (unsigned int)ri2s.start);
pr_debug(" dma @0x%08x\n", (unsigned int)rdma.start);
pr_debug(" irq %d\n", rm->irq);
rm->ubuf = (u8 *)__get_free_page(GFP_KERNEL);
if (rm->ubuf == NULL) {
printk(KERN_ERR
"rackmeter: failed to allocate samples page !\n");
rc = -ENOMEM;
goto bail_release;
}
rm->dma_buf_v = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev,
sizeof(struct rackmeter_dma),
&rm->dma_buf_p, GFP_KERNEL);
if (rm->dma_buf_v == NULL) {
printk(KERN_ERR
"rackmeter: failed to allocate dma buffer !\n");
rc = -ENOMEM;
goto bail_free_samples;
}
#if 0
rm->i2s_regs = ioremap(macio_resource_start(mdev, 0), 0x1000);
#else
rm->i2s_regs = ioremap(ri2s.start, 0x1000);
#endif
if (rm->i2s_regs == NULL) {
printk(KERN_ERR
"rackmeter: failed to map i2s registers !\n");
rc = -ENXIO;
goto bail_free_dma;
}
#if 0
rm->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x100);
#else
rm->dma_regs = ioremap(rdma.start, 0x100);
#endif
if (rm->dma_regs == NULL) {
printk(KERN_ERR
"rackmeter: failed to map dma registers !\n");
rc = -ENXIO;
goto bail_unmap_i2s;
}
rc = rackmeter_setup(rm);
if (rc) {
printk(KERN_ERR
"rackmeter: failed to initialize !\n");
rc = -ENXIO;
goto bail_unmap_dma;
}
rc = request_irq(rm->irq, rackmeter_irq, 0, "rackmeter", rm);
if (rc != 0) {
printk(KERN_ERR
"rackmeter: failed to request interrupt !\n");
goto bail_stop_dma;
}
of_node_put(np);
return 0;
bail_stop_dma:
DBDMA_DO_RESET(rm->dma_regs);
bail_unmap_dma:
iounmap(rm->dma_regs);
bail_unmap_i2s:
iounmap(rm->i2s_regs);
bail_free_dma:
dma_free_coherent(&macio_get_pci_dev(mdev)->dev,
sizeof(struct rackmeter_dma),
rm->dma_buf_v, rm->dma_buf_p);
bail_free_samples:
free_page((unsigned long)rm->ubuf);
bail_release:
#if 0
macio_release_resources(mdev);
#endif
bail_free:
kfree(rm);
bail:
of_node_put(i2s);
of_node_put(np);
dev_set_drvdata(&mdev->ofdev.dev, NULL);
return rc;
}
static int rackmeter_remove(struct macio_dev* mdev)
{
struct rackmeter *rm = dev_get_drvdata(&mdev->ofdev.dev);
/* Stop CPU sniffer timer & work queues */
rackmeter_stop_cpu_sniffer(rm);
/* Clear reference to private data */
dev_set_drvdata(&mdev->ofdev.dev, NULL);
/* Stop/reset dbdma */
DBDMA_DO_RESET(rm->dma_regs);
/* Release the IRQ */
free_irq(rm->irq, rm);
/* Unmap registers */
iounmap(rm->dma_regs);
iounmap(rm->i2s_regs);
/* Free DMA */
dma_free_coherent(&macio_get_pci_dev(mdev)->dev,
sizeof(struct rackmeter_dma),
rm->dma_buf_v, rm->dma_buf_p);
/* Free samples */
free_page((unsigned long)rm->ubuf);
#if 0
/* Release resources */
macio_release_resources(mdev);
#endif
/* Get rid of me */
kfree(rm);
return 0;
}
static int rackmeter_shutdown(struct macio_dev* mdev)
{
struct rackmeter *rm = dev_get_drvdata(&mdev->ofdev.dev);
if (rm == NULL)
return -ENODEV;
/* Stop CPU sniffer timer & work queues */
rackmeter_stop_cpu_sniffer(rm);
/* Stop/reset dbdma */
DBDMA_DO_RESET(rm->dma_regs);
return 0;
}
static struct of_device_id rackmeter_match[] = {
{ .name = "i2s" },
{ }
};
MODULE_DEVICE_TABLE(of, rackmeter_match);
static struct macio_driver rackmeter_driver = {
.driver = {
.name = "rackmeter",
.owner = THIS_MODULE,
.of_match_table = rackmeter_match,
},
.probe = rackmeter_probe,
.remove = rackmeter_remove,
.shutdown = rackmeter_shutdown,
};
static int __init rackmeter_init(void)
{
pr_debug("rackmeter_init()\n");
return macio_register_driver(&rackmeter_driver);
}
static void __exit rackmeter_exit(void)
{
pr_debug("rackmeter_exit()\n");
macio_unregister_driver(&rackmeter_driver);
}
module_init(rackmeter_init);
module_exit(rackmeter_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
MODULE_DESCRIPTION("RackMeter: Support vu-meter on XServe front panel");
| null | null | null | null | 109,705 |
54,464 | null | train_val | 796a0e014bc3985709c0a35538d606ef1da31e1b | 54,464 | Chrome | 0 | https://github.com/chromium/chromium | 2018-04-07 23:43:03+00:00 | // Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_TEST_BASE_TESTING_BROWSER_PROCESS_PLATFORM_PART_H_
#define CHROME_TEST_BASE_TESTING_BROWSER_PROCESS_PLATFORM_PART_H_
#include "base/macros.h"
#include "chrome/browser/browser_process_platform_part.h"
// A TestingBrowserProcessPlatformPart is essentially a
// BrowserProcessPlatformPart except it doesn't have an OomPriorityManager on
// Chrome OS.
class TestingBrowserProcessPlatformPart : public BrowserProcessPlatformPart {
public:
TestingBrowserProcessPlatformPart();
~TestingBrowserProcessPlatformPart() override;
private:
DISALLOW_COPY_AND_ASSIGN(TestingBrowserProcessPlatformPart);
};
#endif // CHROME_TEST_BASE_TESTING_BROWSER_PROCESS_PLATFORM_PART_H_
| null | null | null | null | 51,327 |
858 | 13,28,29,30,31,32,33,34,35,36,37,98,99,100 | train_val | ea3d7209ca01da209cda6f0dea8be9cc4b7a933b | 165,853 | linux | 1 | https://github.com/torvalds/linux | 2015-12-07 14:28:03-05:00 | static long ext4_zero_range(struct file *file, loff_t offset,
loff_t len, int mode)
{
struct inode *inode = file_inode(file);
handle_t *handle = NULL;
unsigned int max_blocks;
loff_t new_size = 0;
int ret = 0;
int flags;
int credits;
int partial_begin, partial_end;
loff_t start, end;
ext4_lblk_t lblk;
struct address_space *mapping = inode->i_mapping;
unsigned int blkbits = inode->i_blkbits;
trace_ext4_zero_range(inode, offset, len, mode);
if (!S_ISREG(inode->i_mode))
return -EINVAL;
/* Call ext4_force_commit to flush all data in case of data=journal. */
if (ext4_should_journal_data(inode)) {
ret = ext4_force_commit(inode->i_sb);
if (ret)
return ret;
}
/*
* Write out all dirty pages to avoid race conditions
* Then release them.
*
if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
ret = filemap_write_and_wait_range(mapping, offset,
offset + len - 1);
if (ret)
return ret;
}
/*
* Round up offset. This is not fallocate, we neet to zero out
* blocks, so convert interior block aligned part of the range to
* unwritten and possibly manually zero out unaligned parts of the
* range.
*/
start = round_up(offset, 1 << blkbits);
end = round_down((offset + len), 1 << blkbits);
if (start < offset || end > offset + len)
return -EINVAL;
partial_begin = offset & ((1 << blkbits) - 1);
partial_end = (offset + len) & ((1 << blkbits) - 1);
lblk = start >> blkbits;
max_blocks = (end >> blkbits);
if (max_blocks < lblk)
max_blocks = 0;
else
max_blocks -= lblk;
mutex_lock(&inode->i_mutex);
/*
* Indirect files do not support unwritten extnets
*/
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
ret = -EOPNOTSUPP;
goto out_mutex;
}
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
offset + len > i_size_read(inode)) {
new_size = offset + len;
ret = inode_newsize_ok(inode, new_size);
if (ret)
goto out_mutex;
}
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
if (mode & FALLOC_FL_KEEP_SIZE)
flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
/* Preallocate the range including the unaligned edges */
if (partial_begin || partial_end) {
ret = ext4_alloc_file_blocks(file,
round_down(offset, 1 << blkbits) >> blkbits,
(round_up((offset + len), 1 << blkbits) -
round_down(offset, 1 << blkbits)) >> blkbits,
new_size, flags, mode);
if (ret)
goto out_mutex;
}
/* Zero range excluding the unaligned edges */
if (max_blocks > 0) {
flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
EXT4_EX_NOCACHE);
/* Now release the pages and zero block aligned part of pages*
truncate_pagecache_range(inode, start, end - 1);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
/* Wait all existing dio workers, newcomers will block on i_mutex */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
flags, mode);
if (ret)
goto out_dio;
}
if (!partial_begin && !partial_end)
goto out_dio;
/*
* In worst case we have to writeout two nonadjacent unwritten
* blocks and update the inode
*/
credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1;
if (ext4_should_journal_data(inode))
credits += 2;
handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_std_error(inode->i_sb, ret);
goto out_dio;
}
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
if (new_size) {
ext4_update_inode_size(inode, new_size);
} else {
/*
* Mark that we allocate beyond EOF so the subsequent truncate
* can proceed even if the new size is the same as i_size.
*/
if ((offset + len) > i_size_read(inode))
ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
}
ext4_mark_inode_dirty(handle, inode);
/* Zero out partial block at the edges of the range */
ret = ext4_zero_partial_blocks(handle, inode, offset, len);
if (file->f_flags & O_SYNC)
ext4_handle_sync(handle);
ext4_journal_stop(handle);
out_dio:
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
mutex_unlock(&inode->i_mutex);
return ret;
}
| CVE-2015-8839 | CWE-362 | https://github.com/torvalds/linux/commit/ea3d7209ca01da209cda6f0dea8be9cc4b7a933b | Medium | 3,747 |
13,365 | null | train_val | 796a0e014bc3985709c0a35538d606ef1da31e1b | 13,365 | Chrome | 0 | https://github.com/chromium/chromium | 2018-04-07 23:43:03+00:00 | // Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/ntp_tiles/most_visited_sites.h"
#include <algorithm>
#include <iterator>
#include <utility>
#include "base/bind.h"
#include "base/callback.h"
#include "base/feature_list.h"
#include "base/metrics/user_metrics.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
#include "components/history/core/browser/top_sites.h"
#include "components/ntp_tiles/constants.h"
#include "components/ntp_tiles/icon_cacher.h"
#include "components/ntp_tiles/pref_names.h"
#include "components/ntp_tiles/switches.h"
#include "components/pref_registry/pref_registry_syncable.h"
#include "components/prefs/pref_service.h"
using history::TopSites;
using suggestions::ChromeSuggestion;
using suggestions::SuggestionsProfile;
using suggestions::SuggestionsService;
namespace ntp_tiles {
namespace {
const base::Feature kDisplaySuggestionsServiceTiles{
"DisplaySuggestionsServiceTiles", base::FEATURE_ENABLED_BY_DEFAULT};
// URL host prefixes. Hosts with these prefixes often redirect to each other, or
// have the same content.
// Popular sites are excluded if the user has visited a page whose host only
// differs by one of these prefixes. Even if the URL does not point to the exact
// same page, the user will have a personalized suggestion that is more likely
// to be of use for them.
// A cleaner way could be checking the history for redirects but this requires
// the page to be visited on the device.
const char* kKnownGenericPagePrefixes[] = {
"m.", "mobile.", // Common prefixes among popular sites.
"edition.", // Used among news papers (CNN, Independent, ...)
"www.", // Usually no-www domains redirect to www or vice-versa.
// The following entry MUST REMAIN LAST as it is prefix of every string!
""}; // The no-www domain matches domains on same level .
// Determine whether we need any tiles from PopularSites to fill up a grid of
// |num_tiles| tiles. If exploration sections are used, we need popular sites
// regardless of how many tiles we already have.
bool NeedPopularSites(const PrefService* prefs, int num_tiles) {
return base::FeatureList::IsEnabled(kSiteExplorationUiFeature) ||
prefs->GetInteger(prefs::kNumPersonalTiles) < num_tiles;
}
bool AreURLsEquivalent(const GURL& url1, const GURL& url2) {
return url1.host_piece() == url2.host_piece() &&
url1.path_piece() == url2.path_piece();
}
bool HasHomeTile(const NTPTilesVector& tiles) {
for (const auto& tile : tiles) {
if (tile.source == TileSource::HOMEPAGE) {
return true;
}
}
return false;
}
std::string StripFirstGenericPrefix(const std::string& host) {
for (const char* prefix : kKnownGenericPagePrefixes) {
if (base::StartsWith(host, prefix, base::CompareCase::INSENSITIVE_ASCII)) {
return std::string(
base::TrimString(host, prefix, base::TrimPositions::TRIM_LEADING));
}
}
return host;
}
bool ShouldShowPopularSites() {
return base::FeatureList::IsEnabled(kUsePopularSitesSuggestions);
}
} // namespace
MostVisitedSites::MostVisitedSites(
PrefService* prefs,
scoped_refptr<history::TopSites> top_sites,
SuggestionsService* suggestions,
std::unique_ptr<PopularSites> popular_sites,
std::unique_ptr<IconCacher> icon_cacher,
std::unique_ptr<MostVisitedSitesSupervisor> supervisor)
: prefs_(prefs),
top_sites_(top_sites),
suggestions_service_(suggestions),
popular_sites_(std::move(popular_sites)),
icon_cacher_(std::move(icon_cacher)),
supervisor_(std::move(supervisor)),
observer_(nullptr),
num_sites_(0u),
top_sites_observer_(this),
mv_source_(TileSource::TOP_SITES),
top_sites_weak_ptr_factory_(this) {
DCHECK(prefs_);
// top_sites_ can be null in tests.
// TODO(sfiera): have iOS use a dummy TopSites in its tests.
DCHECK(suggestions_service_);
if (supervisor_)
supervisor_->SetObserver(this);
}
MostVisitedSites::~MostVisitedSites() {
if (supervisor_)
supervisor_->SetObserver(nullptr);
}
// static
bool MostVisitedSites::IsHostOrMobilePageKnown(
const std::set<std::string>& hosts_to_skip,
const std::string& host) {
std::string no_prefix_host = StripFirstGenericPrefix(host);
for (const char* prefix : kKnownGenericPagePrefixes) {
if (hosts_to_skip.count(prefix + no_prefix_host) ||
hosts_to_skip.count(prefix + host)) {
return true;
}
}
return false;
}
bool MostVisitedSites::DoesSourceExist(TileSource source) const {
switch (source) {
case TileSource::TOP_SITES:
return top_sites_ != nullptr;
case TileSource::SUGGESTIONS_SERVICE:
return suggestions_service_ != nullptr;
case TileSource::POPULAR_BAKED_IN:
case TileSource::POPULAR:
return popular_sites_ != nullptr;
case TileSource::WHITELIST:
return supervisor_ != nullptr;
case TileSource::HOMEPAGE:
return home_page_client_ != nullptr;
}
NOTREACHED();
return false;
}
void MostVisitedSites::SetHomePageClient(
std::unique_ptr<HomePageClient> client) {
DCHECK(client);
home_page_client_ = std::move(client);
}
void MostVisitedSites::SetMostVisitedURLsObserver(Observer* observer,
size_t num_sites) {
DCHECK(observer);
observer_ = observer;
num_sites_ = num_sites;
// The order for this condition is important, ShouldShowPopularSites() should
// always be called last to keep metrics as relevant as possible.
if (popular_sites_ && NeedPopularSites(prefs_, num_sites_) &&
ShouldShowPopularSites()) {
popular_sites_->MaybeStartFetch(
false, base::Bind(&MostVisitedSites::OnPopularSitesDownloaded,
base::Unretained(this)));
}
if (top_sites_) {
// Register as TopSitesObserver so that we can update ourselves when the
// TopSites changes.
top_sites_observer_.Add(top_sites_.get());
}
suggestions_subscription_ = suggestions_service_->AddCallback(base::Bind(
&MostVisitedSites::OnSuggestionsProfileChanged, base::Unretained(this)));
// Immediately build the current set of tiles, getting suggestions from the
// SuggestionsService's cache or, if that is empty, sites from TopSites.
BuildCurrentTiles();
// Also start a request for fresh suggestions.
Refresh();
}
void MostVisitedSites::Refresh() {
if (top_sites_) {
// TopSites updates itself after a delay. To ensure up-to-date results,
// force an update now.
// TODO(mastiz): Is seems unnecessary to refresh TopSites if we will end up
// using server-side suggestions.
top_sites_->SyncWithHistory();
}
suggestions_service_->FetchSuggestionsData();
}
void MostVisitedSites::OnHomePageStateChanged() {
BuildCurrentTiles();
}
void MostVisitedSites::AddOrRemoveBlacklistedUrl(const GURL& url,
bool add_url) {
if (add_url) {
base::RecordAction(base::UserMetricsAction("Suggestions.Site.Removed"));
} else {
base::RecordAction(
base::UserMetricsAction("Suggestions.Site.RemovalUndone"));
}
if (top_sites_) {
// Always blacklist in the local TopSites.
if (add_url)
top_sites_->AddBlacklistedURL(url);
else
top_sites_->RemoveBlacklistedURL(url);
}
// Only blacklist in the server-side suggestions service if it's active.
if (mv_source_ == TileSource::SUGGESTIONS_SERVICE) {
if (add_url)
suggestions_service_->BlacklistURL(url);
else
suggestions_service_->UndoBlacklistURL(url);
}
}
void MostVisitedSites::ClearBlacklistedUrls() {
if (top_sites_) {
// Always update the blacklist in the local TopSites.
top_sites_->ClearBlacklistedURLs();
}
// Only update the server-side blacklist if it's active.
if (mv_source_ == TileSource::SUGGESTIONS_SERVICE) {
suggestions_service_->ClearBlacklist();
}
}
void MostVisitedSites::OnBlockedSitesChanged() {
BuildCurrentTiles();
}
// static
void MostVisitedSites::RegisterProfilePrefs(
user_prefs::PrefRegistrySyncable* registry) {
registry->RegisterIntegerPref(prefs::kNumPersonalTiles, 0);
}
void MostVisitedSites::InitiateTopSitesQuery() {
if (!top_sites_)
return;
if (top_sites_weak_ptr_factory_.HasWeakPtrs())
return; // Ongoing query.
top_sites_->GetMostVisitedURLs(
base::Bind(&MostVisitedSites::OnMostVisitedURLsAvailable,
top_sites_weak_ptr_factory_.GetWeakPtr()),
false);
}
base::FilePath MostVisitedSites::GetWhitelistLargeIconPath(const GURL& url) {
if (supervisor_) {
for (const auto& whitelist : supervisor_->GetWhitelists()) {
if (AreURLsEquivalent(whitelist.entry_point, url))
return whitelist.large_icon_path;
}
}
return base::FilePath();
}
void MostVisitedSites::OnMostVisitedURLsAvailable(
const history::MostVisitedURLList& visited_list) {
// Ignore the event if tiles provided by the Suggestions Service, which take
// precedence.
if (mv_source_ == TileSource::SUGGESTIONS_SERVICE) {
return;
}
NTPTilesVector tiles;
size_t num_tiles = std::min(visited_list.size(), num_sites_);
for (size_t i = 0; i < num_tiles; ++i) {
const history::MostVisitedURL& visited = visited_list[i];
if (visited.url.is_empty())
break; // This is the signal that there are no more real visited sites.
if (supervisor_ && supervisor_->IsBlocked(visited.url))
continue;
NTPTile tile;
tile.title = visited.title;
tile.url = visited.url;
tile.source = TileSource::TOP_SITES;
tile.whitelist_icon_path = GetWhitelistLargeIconPath(visited.url);
// MostVisitedURL.title is either the title or the URL which is treated
// exactly as the title. Differentiating here is not worth the overhead.
tile.title_source = TileTitleSource::TITLE_TAG;
// TODO(crbug.com/773278): Populate |data_generation_time| here in order to
// log UMA metrics of age.
tiles.push_back(std::move(tile));
}
mv_source_ = TileSource::TOP_SITES;
InitiateNotificationForNewTiles(std::move(tiles));
}
void MostVisitedSites::OnSuggestionsProfileChanged(
const SuggestionsProfile& suggestions_profile) {
if (suggestions_profile.suggestions_size() == 0 &&
mv_source_ != TileSource::SUGGESTIONS_SERVICE) {
return;
}
BuildCurrentTilesGivenSuggestionsProfile(suggestions_profile);
}
void MostVisitedSites::BuildCurrentTiles() {
BuildCurrentTilesGivenSuggestionsProfile(
suggestions_service_->GetSuggestionsDataFromCache().value_or(
SuggestionsProfile()));
}
void MostVisitedSites::BuildCurrentTilesGivenSuggestionsProfile(
const suggestions::SuggestionsProfile& suggestions_profile) {
size_t num_tiles = suggestions_profile.suggestions_size();
// With no server suggestions, fall back to local TopSites.
if (num_tiles == 0 ||
!base::FeatureList::IsEnabled(kDisplaySuggestionsServiceTiles)) {
mv_source_ = TileSource::TOP_SITES;
InitiateTopSitesQuery();
return;
}
if (num_sites_ < num_tiles)
num_tiles = num_sites_;
const base::Time profile_timestamp =
base::Time::UnixEpoch() +
base::TimeDelta::FromMicroseconds(suggestions_profile.timestamp());
NTPTilesVector tiles;
for (size_t i = 0; i < num_tiles; ++i) {
const ChromeSuggestion& suggestion_pb = suggestions_profile.suggestions(i);
GURL url(suggestion_pb.url());
if (supervisor_ && supervisor_->IsBlocked(url))
continue;
NTPTile tile;
tile.title = base::UTF8ToUTF16(suggestion_pb.title());
tile.url = url;
tile.source = TileSource::SUGGESTIONS_SERVICE;
// The title is an aggregation of multiple history entries of one site.
tile.title_source = TileTitleSource::INFERRED;
tile.whitelist_icon_path = GetWhitelistLargeIconPath(url);
tile.thumbnail_url = GURL(suggestion_pb.thumbnail());
tile.favicon_url = GURL(suggestion_pb.favicon_url());
tile.data_generation_time = profile_timestamp;
icon_cacher_->StartFetchMostLikely(
url, base::Bind(&MostVisitedSites::OnIconMadeAvailable,
base::Unretained(this), url));
tiles.push_back(std::move(tile));
}
mv_source_ = TileSource::SUGGESTIONS_SERVICE;
InitiateNotificationForNewTiles(std::move(tiles));
}
NTPTilesVector MostVisitedSites::CreateWhitelistEntryPointTiles(
const std::set<std::string>& used_hosts,
size_t num_actual_tiles) {
if (!supervisor_) {
return NTPTilesVector();
}
NTPTilesVector whitelist_tiles;
for (const auto& whitelist : supervisor_->GetWhitelists()) {
if (whitelist_tiles.size() + num_actual_tiles >= num_sites_)
break;
// Skip blacklisted sites.
if (top_sites_ && top_sites_->IsBlacklisted(whitelist.entry_point))
continue;
// Skip tiles already present.
if (used_hosts.find(whitelist.entry_point.host()) != used_hosts.end())
continue;
// Skip whitelist entry points that are manually blocked.
if (supervisor_->IsBlocked(whitelist.entry_point))
continue;
NTPTile tile;
tile.title = whitelist.title;
tile.url = whitelist.entry_point;
tile.source = TileSource::WHITELIST;
// User-set. Might be the title but we cannot be sure.
tile.title_source = TileTitleSource::UNKNOWN;
tile.whitelist_icon_path = whitelist.large_icon_path;
whitelist_tiles.push_back(std::move(tile));
}
return whitelist_tiles;
}
std::map<SectionType, NTPTilesVector>
MostVisitedSites::CreatePopularSitesSections(
const std::set<std::string>& used_hosts,
size_t num_actual_tiles) {
std::map<SectionType, NTPTilesVector> sections = {
std::make_pair(SectionType::PERSONALIZED, NTPTilesVector())};
// For child accounts popular sites tiles will not be added.
if (supervisor_ && supervisor_->IsChildProfile()) {
return sections;
}
if (!popular_sites_ || !ShouldShowPopularSites()) {
return sections;
}
const std::set<std::string> no_hosts;
for (const auto& section_type_and_sites : popular_sites()->sections()) {
SectionType type = section_type_and_sites.first;
const PopularSites::SitesVector& sites = section_type_and_sites.second;
if (type == SectionType::PERSONALIZED) {
size_t num_required_tiles = num_sites_ - num_actual_tiles;
sections[type] =
CreatePopularSitesTiles(/*popular_sites=*/sites,
/*hosts_to_skip=*/used_hosts,
/*num_max_tiles=*/num_required_tiles);
} else {
sections[type] = CreatePopularSitesTiles(/*popular_sites=*/sites,
/*hosts_to_skip=*/no_hosts,
/*num_max_tiles=*/num_sites_);
}
}
return sections;
}
NTPTilesVector MostVisitedSites::CreatePopularSitesTiles(
const PopularSites::SitesVector& sites_vector,
const std::set<std::string>& hosts_to_skip,
size_t num_max_tiles) {
// Collect non-blacklisted popular suggestions, skipping those already present
// in the personal suggestions.
NTPTilesVector popular_sites_tiles;
for (const PopularSites::Site& popular_site : sites_vector) {
if (popular_sites_tiles.size() >= num_max_tiles) {
break;
}
// Skip blacklisted sites.
if (top_sites_ && top_sites_->IsBlacklisted(popular_site.url))
continue;
const std::string& host = popular_site.url.host();
if (IsHostOrMobilePageKnown(hosts_to_skip, host)) {
continue;
}
NTPTile tile;
tile.title = popular_site.title;
tile.url = GURL(popular_site.url);
tile.title_source = popular_site.title_source;
tile.source = popular_site.baked_in ? TileSource::POPULAR_BAKED_IN
: TileSource::POPULAR;
popular_sites_tiles.push_back(std::move(tile));
base::Closure icon_available =
base::Bind(&MostVisitedSites::OnIconMadeAvailable,
base::Unretained(this), popular_site.url);
icon_cacher_->StartFetchPopularSites(popular_site, icon_available,
icon_available);
}
return popular_sites_tiles;
}
void MostVisitedSites::OnHomePageTitleDetermined(
NTPTilesVector tiles,
const base::Optional<base::string16>& title) {
if (!title.has_value()) {
return; // If there is no title, the most recent tile was already sent out.
}
SaveTilesAndNotify(InsertHomeTile(std::move(tiles), title.value()));
}
NTPTilesVector MostVisitedSites::InsertHomeTile(
NTPTilesVector tiles,
const base::string16& title) const {
DCHECK(home_page_client_);
DCHECK_GT(num_sites_, 0u);
const GURL& home_page_url = home_page_client_->GetHomePageUrl();
NTPTilesVector new_tiles;
bool home_tile_added = false;
for (auto& tile : tiles) {
if (new_tiles.size() >= num_sites_) {
break;
}
// TODO(fhorschig): Introduce a more sophisticated deduplication.
if (tile.url.host() == home_page_url.host()) {
tile.source = TileSource::HOMEPAGE;
home_tile_added = true;
}
new_tiles.push_back(std::move(tile));
}
// Add the home page tile if there are less than 4 tiles
// and none of them is the home page (and there is space left).
if (!home_tile_added) {
// Make room for the home page tile.
if (new_tiles.size() >= num_sites_) {
new_tiles.pop_back();
}
NTPTile home_tile;
home_tile.url = home_page_url;
home_tile.title = title;
home_tile.source = TileSource::HOMEPAGE;
home_tile.title_source = TileTitleSource::TITLE_TAG; // From history.
new_tiles.push_back(std::move(home_tile));
}
return new_tiles;
}
void MostVisitedSites::InitiateNotificationForNewTiles(
NTPTilesVector new_tiles) {
if (ShouldAddHomeTile() && !HasHomeTile(new_tiles)) {
home_page_client_->QueryHomePageTitle(
base::BindOnce(&MostVisitedSites::OnHomePageTitleDetermined,
base::Unretained(this), new_tiles));
// Don't wait for the homepage title from history but immediately serve a
// copy of new tiles.
new_tiles = InsertHomeTile(std::move(new_tiles), base::string16());
}
SaveTilesAndNotify(std::move(new_tiles));
}
void MostVisitedSites::SaveTilesAndNotify(NTPTilesVector personal_tiles) {
std::set<std::string> used_hosts;
size_t num_actual_tiles = 0u;
AddToHostsAndTotalCount(personal_tiles, &used_hosts, &num_actual_tiles);
NTPTilesVector whitelist_tiles =
CreateWhitelistEntryPointTiles(used_hosts, num_actual_tiles);
AddToHostsAndTotalCount(whitelist_tiles, &used_hosts, &num_actual_tiles);
std::map<SectionType, NTPTilesVector> sections =
CreatePopularSitesSections(used_hosts, num_actual_tiles);
AddToHostsAndTotalCount(sections[SectionType::PERSONALIZED], &used_hosts,
&num_actual_tiles);
NTPTilesVector new_tiles =
MergeTiles(std::move(personal_tiles), std::move(whitelist_tiles),
std::move(sections[SectionType::PERSONALIZED]));
if (current_tiles_.has_value() && (*current_tiles_ == new_tiles)) {
return;
}
current_tiles_.emplace(std::move(new_tiles));
DCHECK_EQ(num_actual_tiles, current_tiles_->size());
int num_personal_tiles = 0;
for (const auto& tile : *current_tiles_) {
if (tile.source != TileSource::POPULAR &&
tile.source != TileSource::POPULAR_BAKED_IN) {
num_personal_tiles++;
}
}
prefs_->SetInteger(prefs::kNumPersonalTiles, num_personal_tiles);
if (!observer_)
return;
sections[SectionType::PERSONALIZED] = *current_tiles_;
observer_->OnURLsAvailable(sections);
}
// static
NTPTilesVector MostVisitedSites::MergeTiles(NTPTilesVector personal_tiles,
NTPTilesVector whitelist_tiles,
NTPTilesVector popular_tiles) {
NTPTilesVector merged_tiles;
std::move(personal_tiles.begin(), personal_tiles.end(),
std::back_inserter(merged_tiles));
std::move(whitelist_tiles.begin(), whitelist_tiles.end(),
std::back_inserter(merged_tiles));
std::move(popular_tiles.begin(), popular_tiles.end(),
std::back_inserter(merged_tiles));
return merged_tiles;
}
void MostVisitedSites::OnPopularSitesDownloaded(bool success) {
if (!success) {
LOG(WARNING) << "Download of popular sites failed";
return;
}
for (const auto& section : popular_sites_->sections()) {
for (const PopularSites::Site& site : section.second) {
// Ignore callback; these icons will be seen on the *next* NTP.
icon_cacher_->StartFetchPopularSites(site, base::Closure(),
base::Closure());
}
}
}
void MostVisitedSites::OnIconMadeAvailable(const GURL& site_url) {
observer_->OnIconMadeAvailable(site_url);
}
void MostVisitedSites::TopSitesLoaded(TopSites* top_sites) {}
void MostVisitedSites::TopSitesChanged(TopSites* top_sites,
ChangeReason change_reason) {
if (mv_source_ == TileSource::TOP_SITES) {
// The displayed tiles are invalidated.
InitiateTopSitesQuery();
}
}
bool MostVisitedSites::ShouldAddHomeTile() const {
return num_sites_ > 0u &&
home_page_client_ && // No platform-specific implementation - no tile.
home_page_client_->IsHomePageEnabled() &&
!home_page_client_->IsNewTabPageUsedAsHomePage() &&
!home_page_client_->GetHomePageUrl().is_empty() &&
!(top_sites_ &&
top_sites_->IsBlacklisted(home_page_client_->GetHomePageUrl()));
}
void MostVisitedSites::AddToHostsAndTotalCount(const NTPTilesVector& new_tiles,
std::set<std::string>* hosts,
size_t* total_tile_count) const {
for (const auto& tile : new_tiles) {
hosts->insert(tile.url.host());
}
*total_tile_count += new_tiles.size();
DCHECK_LE(*total_tile_count, num_sites_);
}
} // namespace ntp_tiles
| null | null | null | null | 10,228 |
15,791 | null | train_val | e4311ee51d1e2676001b2d8fcefd92bdd79aad85 | 180,786 | linux | 0 | https://github.com/torvalds/linux | 2017-05-12 08:32:58+10:00 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2014, Imagination Technologies Ltd.
*
* EVA functions for generic code
*/
#ifndef _ASM_EVA_H
#define _ASM_EVA_H
#include <kernel-entry-init.h>
#ifdef __ASSEMBLY__
#ifdef CONFIG_EVA
/*
* EVA early init code
*
* Platforms must define their own 'platform_eva_init' macro in
* their kernel-entry-init.h header. This macro usually does the
* platform specific configuration of the segmentation registers,
* and it is normally called from assembly code.
*
*/
.macro eva_init
platform_eva_init
.endm
#else
.macro eva_init
.endm
#endif /* CONFIG_EVA */
#endif /* __ASSEMBLY__ */
#endif
| null | null | null | null | 89,133 |
8,397 | null | train_val | 796a0e014bc3985709c0a35538d606ef1da31e1b | 8,397 | Chrome | 0 | https://github.com/chromium/chromium | 2018-04-07 23:43:03+00:00 | // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// See net/disk_cache/disk_cache.h for the public interface of the cache.
#ifndef NET_DISK_CACHE_BLOCKFILE_FILE_BLOCK_H_
#define NET_DISK_CACHE_BLOCKFILE_FILE_BLOCK_H_
#include <stddef.h>
namespace disk_cache {
// This interface exposes common functionality for a single block of data
// stored on a file-block, regardless of the real type or size of the block.
// Used to simplify loading / storing the block from disk.
class FileBlock {
public:
virtual ~FileBlock() {}
// Returns a pointer to the actual data.
virtual void* buffer() const = 0;
// Returns the size of the block;
virtual size_t size() const = 0;
// Returns the file offset of this block.
virtual int offset() const = 0;
};
} // namespace disk_cache
#endif // NET_DISK_CACHE_BLOCKFILE_FILE_BLOCK_H_
| null | null | null | null | 5,260 |
46,330 | null | train_val | 796a0e014bc3985709c0a35538d606ef1da31e1b | 46,330 | Chrome | 0 | https://github.com/chromium/chromium | 2018-04-07 23:43:03+00:00 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ash/tray_action/tray_action.h"
#include <utility>
#include "ash/lock_screen_action/lock_screen_note_display_state_handler.h"
#include "ash/tray_action/tray_action_observer.h"
#include "base/bind.h"
#include "base/callback.h"
#include "base/logging.h"
#include "ui/events/devices/input_device_manager.h"
#include "ui/events/devices/stylus_state.h"
namespace ash {
TrayAction::TrayAction(BacklightsForcedOffSetter* backlights_forced_off_setter)
: backlights_forced_off_setter_(backlights_forced_off_setter),
stylus_observer_(this) {
stylus_observer_.Add(ui::InputDeviceManager::GetInstance());
}
TrayAction::~TrayAction() = default;
void TrayAction::AddObserver(TrayActionObserver* observer) {
observers_.AddObserver(observer);
}
void TrayAction::RemoveObserver(TrayActionObserver* observer) {
observers_.RemoveObserver(observer);
}
void TrayAction::BindRequest(mojom::TrayActionRequest request) {
bindings_.AddBinding(this, std::move(request));
}
mojom::TrayActionState TrayAction::GetLockScreenNoteState() const {
if (!tray_action_client_)
return mojom::TrayActionState::kNotAvailable;
return lock_screen_note_state_;
}
bool TrayAction::IsLockScreenNoteActive() const {
return GetLockScreenNoteState() == mojom::TrayActionState::kActive;
}
void TrayAction::SetClient(mojom::TrayActionClientPtr tray_action_client,
mojom::TrayActionState lock_screen_note_state) {
mojom::TrayActionState old_lock_screen_note_state = GetLockScreenNoteState();
tray_action_client_ = std::move(tray_action_client);
if (tray_action_client_) {
// Makes sure the state is updated in case the connection is lost.
tray_action_client_.set_connection_error_handler(
base::Bind(&TrayAction::SetClient, base::Unretained(this), nullptr,
mojom::TrayActionState::kNotAvailable));
lock_screen_note_state_ = lock_screen_note_state;
lock_screen_note_display_state_handler_ =
std::make_unique<LockScreenNoteDisplayStateHandler>(
backlights_forced_off_setter_);
} else {
lock_screen_note_display_state_handler_.reset();
}
// Setting action handler value can change effective state - notify observers
// if that was the case.
if (GetLockScreenNoteState() != old_lock_screen_note_state)
NotifyLockScreenNoteStateChanged();
}
void TrayAction::UpdateLockScreenNoteState(mojom::TrayActionState state) {
if (state == lock_screen_note_state_)
return;
lock_screen_note_state_ = state;
if (lock_screen_note_state_ == mojom::TrayActionState::kNotAvailable)
lock_screen_note_display_state_handler_->Reset();
// If the client is not set, the effective state has not changed, so no need
// to notify observers of a state change.
if (tray_action_client_)
NotifyLockScreenNoteStateChanged();
}
void TrayAction::RequestNewLockScreenNote(mojom::LockScreenNoteOrigin origin) {
if (GetLockScreenNoteState() != mojom::TrayActionState::kAvailable)
return;
// An action state can be kAvailable only if |tray_action_client_| is set.
DCHECK(tray_action_client_);
tray_action_client_->RequestNewLockScreenNote(origin);
}
void TrayAction::CloseLockScreenNote(mojom::CloseLockScreenNoteReason reason) {
if (tray_action_client_)
tray_action_client_->CloseLockScreenNote(reason);
}
void TrayAction::OnStylusStateChanged(ui::StylusState state) {
if (state == ui::StylusState::REMOVED)
lock_screen_note_display_state_handler_->AttemptNoteLaunchForStylusEject();
}
void TrayAction::FlushMojoForTesting() {
if (tray_action_client_)
tray_action_client_.FlushForTesting();
}
void TrayAction::NotifyLockScreenNoteStateChanged() {
for (auto& observer : observers_)
observer.OnLockScreenNoteStateChanged(GetLockScreenNoteState());
}
} // namespace ash
| null | null | null | null | 43,193 |
66,713 | null | train_val | 796a0e014bc3985709c0a35538d606ef1da31e1b | 66,713 | Chrome | 0 | https://github.com/chromium/chromium | 2018-04-07 23:43:03+00:00 | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/download/notification/download_notification_manager.h"
#include "base/callback.h"
#include "base/command_line.h"
#include "base/location.h"
#include "base/single_thread_task_runner.h"
#include "base/strings/string_number_conversions.h"
#include "base/threading/thread_task_runner_handle.h"
#include "chrome/browser/browser_process.h"
#include "chrome/browser/download/download_item_model.h"
#include "chrome/browser/download/notification/download_item_notification.h"
#include "chrome/common/chrome_switches.h"
#include "components/download/public/common/download_item.h"
#include "content/public/browser/download_item_utils.h"
#include "ui/base/resource/resource_bundle.h"
///////////////////////////////////////////////////////////////////////////////
// DownloadNotificationManager implementation:
///////////////////////////////////////////////////////////////////////////////
DownloadNotificationManager::DownloadNotificationManager(Profile* profile)
: main_profile_(profile) {}
DownloadNotificationManager::~DownloadNotificationManager() = default;
void DownloadNotificationManager::OnAllDownloadsRemoving(Profile* profile) {
manager_for_profile_.erase(profile);
}
void DownloadNotificationManager::OnNewDownloadReady(
download::DownloadItem* download) {
Profile* profile = Profile::FromBrowserContext(
content::DownloadItemUtils::GetBrowserContext(download));
if (manager_for_profile_.find(profile) == manager_for_profile_.end()) {
manager_for_profile_[profile] =
std::make_unique<DownloadNotificationManagerForProfile>(profile, this);
}
manager_for_profile_[profile]->OnNewDownloadReady(download);
}
DownloadNotificationManagerForProfile*
DownloadNotificationManager::GetForProfile(Profile* profile) const {
return manager_for_profile_.at(profile).get();
}
///////////////////////////////////////////////////////////////////////////////
// DownloadNotificationManagerForProfile implementation:
///////////////////////////////////////////////////////////////////////////////
DownloadNotificationManagerForProfile::DownloadNotificationManagerForProfile(
Profile* profile,
DownloadNotificationManager* parent_manager)
: profile_(profile), parent_manager_(parent_manager) {}
DownloadNotificationManagerForProfile::
~DownloadNotificationManagerForProfile() {
for (const auto& download : items_) {
download.first->RemoveObserver(this);
}
}
void DownloadNotificationManagerForProfile::OnDownloadUpdated(
download::DownloadItem* changed_download) {
DCHECK(items_.find(changed_download) != items_.end());
items_[changed_download]->OnDownloadUpdated(changed_download);
}
void DownloadNotificationManagerForProfile::OnDownloadOpened(
download::DownloadItem* changed_download) {
items_[changed_download]->OnDownloadUpdated(changed_download);
}
void DownloadNotificationManagerForProfile::OnDownloadRemoved(
download::DownloadItem* download) {
DCHECK(items_.find(download) != items_.end());
std::unique_ptr<DownloadItemNotification> item = std::move(items_[download]);
items_.erase(download);
download->RemoveObserver(this);
// notify
item->OnDownloadRemoved(download);
// This removing might be initiated from DownloadNotificationItem, so delaying
// deleting for item to do remaining cleanups.
base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, item.release());
if (items_.size() == 0 && parent_manager_)
parent_manager_->OnAllDownloadsRemoving(profile_);
// |this| is deleted.
}
void DownloadNotificationManagerForProfile::OnDownloadDestroyed(
download::DownloadItem* download) {
// Do nothing. Cleanup is done in OnDownloadRemoved().
std::unique_ptr<DownloadItemNotification> item = std::move(items_[download]);
items_.erase(download);
item->OnDownloadRemoved(download);
// This removing might be initiated from DownloadNotificationItem, so delaying
// deleting for item to do remaining cleanups.
base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, item.release());
if (items_.size() == 0 && parent_manager_)
parent_manager_->OnAllDownloadsRemoving(profile_);
// |this| is deleted.
}
void DownloadNotificationManagerForProfile::OnNewDownloadReady(
download::DownloadItem* download) {
DCHECK_EQ(profile_,
Profile::FromBrowserContext(
content::DownloadItemUtils::GetBrowserContext(download)));
download->AddObserver(this);
for (auto& item : items_) {
download::DownloadItem* download_item = item.first;
DownloadItemNotification* download_notification = item.second.get();
if (download_item->GetState() == download::DownloadItem::IN_PROGRESS)
download_notification->DisablePopup();
}
items_[download] = std::make_unique<DownloadItemNotification>(download);
}
DownloadItemNotification*
DownloadNotificationManagerForProfile::GetNotificationItemByGuid(
const std::string& guid) {
for (auto& item : items_) {
if (item.first->GetGuid() == guid)
return item.second.get();
}
NOTREACHED();
return nullptr;
}
| null | null | null | null | 63,576 |
41,779 | null | train_val | 796a0e014bc3985709c0a35538d606ef1da31e1b | 41,779 | Chrome | 0 | https://github.com/chromium/chromium | 2018-04-07 23:43:03+00:00 | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef TRACE_AFTER_DISPATCH_IMPL_H_
#define TRACE_AFTER_DISPATCH_IMPL_H_
#include "heap/stubs.h"
namespace blink {
class X : public GarbageCollected<X> {
public:
void Trace(Visitor*) {}
};
enum ClassTag {
BASE, DERIVED
};
class TraceAfterDispatchInlinedBase
: public GarbageCollected<TraceAfterDispatchInlinedBase> {
public:
explicit TraceAfterDispatchInlinedBase(ClassTag tag) : tag_(tag) {}
void Trace(Visitor*);
void TraceAfterDispatch(Visitor* visitor) { visitor->Trace(x_base_); }
private:
ClassTag tag_;
Member<X> x_base_;
};
class TraceAfterDispatchInlinedDerived : public TraceAfterDispatchInlinedBase {
public:
TraceAfterDispatchInlinedDerived() : TraceAfterDispatchInlinedBase(DERIVED) {}
void TraceAfterDispatch(Visitor* visitor) {
visitor->Trace(x_derived_);
TraceAfterDispatchInlinedBase::TraceAfterDispatch(visitor);
}
private:
Member<X> x_derived_;
};
class TraceAfterDispatchExternBase
: public GarbageCollected<TraceAfterDispatchExternBase> {
public:
explicit TraceAfterDispatchExternBase(ClassTag tag) : tag_(tag) {}
void Trace(Visitor* visitor);
void TraceAfterDispatch(Visitor* visitor);
private:
ClassTag tag_;
Member<X> x_base_;
};
class TraceAfterDispatchExternDerived : public TraceAfterDispatchExternBase {
public:
TraceAfterDispatchExternDerived() : TraceAfterDispatchExternBase(DERIVED) {}
void TraceAfterDispatch(Visitor* visitor);
private:
Member<X> x_derived_;
};
}
#endif // TRACE_AFTER_DISPATCH_IMPL_H_
| null | null | null | null | 38,642 |
813 | null | train_val | c536b6be1a72aefd632d5530106a67c516cb9f4b | 257,200 | openssl | 0 | https://github.com/openssl/openssl | 2016-09-22 23:12:38+01:00 | /*
* Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the OpenSSL license (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
#include <stdio.h>
#include <limits.h>
#include "internal/cryptlib.h"
#include <openssl/asn1.h>
static int asn1_get_length(const unsigned char **pp, int *inf, long *rl,
long max);
static void asn1_put_length(unsigned char **pp, int length);
static int _asn1_check_infinite_end(const unsigned char **p, long len)
{
/*
* If there is 0 or 1 byte left, the length check should pick things up
*/
if (len <= 0)
return (1);
else if ((len >= 2) && ((*p)[0] == 0) && ((*p)[1] == 0)) {
(*p) += 2;
return (1);
}
return (0);
}
int ASN1_check_infinite_end(unsigned char **p, long len)
{
return _asn1_check_infinite_end((const unsigned char **)p, len);
}
int ASN1_const_check_infinite_end(const unsigned char **p, long len)
{
return _asn1_check_infinite_end(p, len);
}
int ASN1_get_object(const unsigned char **pp, long *plength, int *ptag,
int *pclass, long omax)
{
int i, ret;
long l;
const unsigned char *p = *pp;
int tag, xclass, inf;
long max = omax;
if (!max)
goto err;
ret = (*p & V_ASN1_CONSTRUCTED);
xclass = (*p & V_ASN1_PRIVATE);
i = *p & V_ASN1_PRIMITIVE_TAG;
if (i == V_ASN1_PRIMITIVE_TAG) { /* high-tag */
p++;
if (--max == 0)
goto err;
l = 0;
while (*p & 0x80) {
l <<= 7L;
l |= *(p++) & 0x7f;
if (--max == 0)
goto err;
if (l > (INT_MAX >> 7L))
goto err;
}
l <<= 7L;
l |= *(p++) & 0x7f;
tag = (int)l;
if (--max == 0)
goto err;
} else {
tag = i;
p++;
if (--max == 0)
goto err;
}
*ptag = tag;
*pclass = xclass;
if (!asn1_get_length(&p, &inf, plength, max))
goto err;
if (inf && !(ret & V_ASN1_CONSTRUCTED))
goto err;
if (*plength > (omax - (p - *pp))) {
ASN1err(ASN1_F_ASN1_GET_OBJECT, ASN1_R_TOO_LONG);
/*
* Set this so that even if things are not long enough the values are
* set correctly
*/
ret |= 0x80;
}
*pp = p;
return (ret | inf);
err:
ASN1err(ASN1_F_ASN1_GET_OBJECT, ASN1_R_HEADER_TOO_LONG);
return (0x80);
}
static int asn1_get_length(const unsigned char **pp, int *inf, long *rl,
long max)
{
const unsigned char *p = *pp;
unsigned long ret = 0;
unsigned long i;
if (max-- < 1)
return 0;
if (*p == 0x80) {
*inf = 1;
ret = 0;
p++;
} else {
*inf = 0;
i = *p & 0x7f;
if (*(p++) & 0x80) {
if (max < (long)i + 1)
return 0;
/* Skip leading zeroes */
while (i && *p == 0) {
p++;
i--;
}
if (i > sizeof(long))
return 0;
while (i-- > 0) {
ret <<= 8L;
ret |= *(p++);
}
} else
ret = i;
}
if (ret > LONG_MAX)
return 0;
*pp = p;
*rl = (long)ret;
return 1;
}
/*
* class 0 is constructed constructed == 2 for indefinite length constructed
*/
void ASN1_put_object(unsigned char **pp, int constructed, int length, int tag,
int xclass)
{
unsigned char *p = *pp;
int i, ttag;
i = (constructed) ? V_ASN1_CONSTRUCTED : 0;
i |= (xclass & V_ASN1_PRIVATE);
if (tag < 31)
*(p++) = i | (tag & V_ASN1_PRIMITIVE_TAG);
else {
*(p++) = i | V_ASN1_PRIMITIVE_TAG;
for (i = 0, ttag = tag; ttag > 0; i++)
ttag >>= 7;
ttag = i;
while (i-- > 0) {
p[i] = tag & 0x7f;
if (i != (ttag - 1))
p[i] |= 0x80;
tag >>= 7;
}
p += ttag;
}
if (constructed == 2)
*(p++) = 0x80;
else
asn1_put_length(&p, length);
*pp = p;
}
int ASN1_put_eoc(unsigned char **pp)
{
unsigned char *p = *pp;
*p++ = 0;
*p++ = 0;
*pp = p;
return 2;
}
static void asn1_put_length(unsigned char **pp, int length)
{
unsigned char *p = *pp;
int i, l;
if (length <= 127)
*(p++) = (unsigned char)length;
else {
l = length;
for (i = 0; l > 0; i++)
l >>= 8;
*(p++) = i | 0x80;
l = i;
while (i-- > 0) {
p[i] = length & 0xff;
length >>= 8;
}
p += l;
}
*pp = p;
}
int ASN1_object_size(int constructed, int length, int tag)
{
int ret = 1;
if (length < 0)
return -1;
if (tag >= 31) {
while (tag > 0) {
tag >>= 7;
ret++;
}
}
if (constructed == 2) {
ret += 3;
} else {
ret++;
if (length > 127) {
int tmplen = length;
while (tmplen > 0) {
tmplen >>= 8;
ret++;
}
}
}
if (ret >= INT_MAX - length)
return -1;
return ret + length;
}
int ASN1_STRING_copy(ASN1_STRING *dst, const ASN1_STRING *str)
{
if (str == NULL)
return 0;
dst->type = str->type;
if (!ASN1_STRING_set(dst, str->data, str->length))
return 0;
/* Copy flags but preserve embed value */
dst->flags &= ASN1_STRING_FLAG_EMBED;
dst->flags |= str->flags & ~ASN1_STRING_FLAG_EMBED;
return 1;
}
ASN1_STRING *ASN1_STRING_dup(const ASN1_STRING *str)
{
ASN1_STRING *ret;
if (!str)
return NULL;
ret = ASN1_STRING_new();
if (ret == NULL)
return NULL;
if (!ASN1_STRING_copy(ret, str)) {
ASN1_STRING_free(ret);
return NULL;
}
return ret;
}
int ASN1_STRING_set(ASN1_STRING *str, const void *_data, int len)
{
unsigned char *c;
const char *data = _data;
if (len < 0) {
if (data == NULL)
return (0);
else
len = strlen(data);
}
if ((str->length <= len) || (str->data == NULL)) {
c = str->data;
str->data = OPENSSL_realloc(c, len + 1);
if (str->data == NULL) {
ASN1err(ASN1_F_ASN1_STRING_SET, ERR_R_MALLOC_FAILURE);
str->data = c;
return (0);
}
}
str->length = len;
if (data != NULL) {
memcpy(str->data, data, len);
/* an allowance for strings :-) */
str->data[len] = '\0';
}
return (1);
}
void ASN1_STRING_set0(ASN1_STRING *str, void *data, int len)
{
OPENSSL_free(str->data);
str->data = data;
str->length = len;
}
ASN1_STRING *ASN1_STRING_new(void)
{
return (ASN1_STRING_type_new(V_ASN1_OCTET_STRING));
}
ASN1_STRING *ASN1_STRING_type_new(int type)
{
ASN1_STRING *ret;
ret = OPENSSL_zalloc(sizeof(*ret));
if (ret == NULL) {
ASN1err(ASN1_F_ASN1_STRING_TYPE_NEW, ERR_R_MALLOC_FAILURE);
return (NULL);
}
ret->type = type;
return (ret);
}
void ASN1_STRING_free(ASN1_STRING *a)
{
if (a == NULL)
return;
if (!(a->flags & ASN1_STRING_FLAG_NDEF))
OPENSSL_free(a->data);
if (!(a->flags & ASN1_STRING_FLAG_EMBED))
OPENSSL_free(a);
}
void ASN1_STRING_clear_free(ASN1_STRING *a)
{
if (a == NULL)
return;
if (a->data && !(a->flags & ASN1_STRING_FLAG_NDEF))
OPENSSL_cleanse(a->data, a->length);
ASN1_STRING_free(a);
}
int ASN1_STRING_cmp(const ASN1_STRING *a, const ASN1_STRING *b)
{
int i;
i = (a->length - b->length);
if (i == 0) {
i = memcmp(a->data, b->data, a->length);
if (i == 0)
return (a->type - b->type);
else
return (i);
} else
return (i);
}
int ASN1_STRING_length(const ASN1_STRING *x)
{
return x->length;
}
void ASN1_STRING_length_set(ASN1_STRING *x, int len)
{
x->length = len;
}
int ASN1_STRING_type(const ASN1_STRING *x)
{
return x->type;
}
const unsigned char *ASN1_STRING_get0_data(const ASN1_STRING *x)
{
return x->data;
}
# if OPENSSL_API_COMPAT < 0x10100000L
unsigned char *ASN1_STRING_data(ASN1_STRING *x)
{
return x->data;
}
#endif
| null | null | null | null | 118,645 |
30,079 | null | train_val | 796a0e014bc3985709c0a35538d606ef1da31e1b | 30,079 | Chrome | 0 | https://github.com/chromium/chromium | 2018-04-07 23:43:03+00:00 | /*
** 2016 September 10
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This file contains test code to delete an SQLite database and all
** of its associated files. Associated files include:
**
** * The journal file.
** * The wal file.
** * The SQLITE_ENABLE_8_3_NAMES version of the db, journal or wal files.
** * Files created by the test_multiplex.c module to extend any of the
** above.
*/
#ifndef SQLITE_OS_WIN
# include <unistd.h>
# include <errno.h>
#endif
#include <string.h>
#include <assert.h>
#include "sqlite3.h"
/* The following #defines are copied from test_multiplex.c */
#ifndef MX_CHUNK_NUMBER
# define MX_CHUNK_NUMBER 299
#endif
#ifndef SQLITE_MULTIPLEX_JOURNAL_8_3_OFFSET
# define SQLITE_MULTIPLEX_JOURNAL_8_3_OFFSET 400
#endif
#ifndef SQLITE_MULTIPLEX_WAL_8_3_OFFSET
# define SQLITE_MULTIPLEX_WAL_8_3_OFFSET 700
#endif
/*
** This routine is a copy of (most of) the code from SQLite function
** sqlite3FileSuffix3(). It modifies the filename in buffer z in the
** same way as SQLite does when in 8.3 filenames mode.
*/
static void sqlite3Delete83Name(char *z){
int i, sz;
sz = (int)strlen(z);
for(i=sz-1; i>0 && z[i]!='/' && z[i]!='.'; i--){}
if( z[i]=='.' && (sz>i+4) ) memmove(&z[i+1], &z[sz-3], 4);
}
/*
** zFile is a filename. Assuming no error occurs, if this file exists,
** set *pbExists to true and unlink it. Or, if the file does not exist,
** set *pbExists to false before returning.
**
** If an error occurs, non-zero is returned. Or, if no error occurs, zero.
*/
static int sqlite3DeleteUnlinkIfExists(
sqlite3_vfs *pVfs,
const char *zFile,
int *pbExists
){
int rc = SQLITE_ERROR;
#if SQLITE_OS_WIN
if( pVfs ){
if( pbExists ) *pbExists = 1;
rc = pVfs->xDelete(pVfs, zFile, 0);
if( rc==SQLITE_IOERR_DELETE_NOENT ){
if( pbExists ) *pbExists = 0;
rc = SQLITE_OK;
}
}
#else
assert( pVfs==0 );
rc = access(zFile, F_OK);
if( rc ){
if( errno==ENOENT ){
if( pbExists ) *pbExists = 0;
rc = SQLITE_OK;
}
}else{
if( pbExists ) *pbExists = 1;
rc = unlink(zFile);
}
#endif
return rc;
}
/*
** Delete the database file identified by the string argument passed to this
** function. The string must contain a filename, not an SQLite URI.
*/
SQLITE_API int sqlite3_delete_database(
const char *zFile /* File to delete */
){
char *zBuf; /* Buffer to sprintf() filenames to */
int nBuf; /* Size of buffer in bytes */
int rc = 0; /* System error code */
int i; /* Iterate through azFmt[] and aMFile[] */
const char *azFmt[] = { "%s", "%s-journal", "%s-wal", "%s-shm" };
struct MFile {
const char *zFmt;
int iOffset;
int b83;
} aMFile[] = {
{ "%s%03d", 0, 0 },
{ "%s-journal%03d", 0, 0 },
{ "%s-wal%03d", 0, 0 },
{ "%s%03d", 0, 1 },
{ "%s-journal%03d", SQLITE_MULTIPLEX_JOURNAL_8_3_OFFSET, 1 },
{ "%s-wal%03d", SQLITE_MULTIPLEX_WAL_8_3_OFFSET, 1 },
};
#ifdef SQLITE_OS_WIN
sqlite3_vfs *pVfs = sqlite3_vfs_find("win32");
#else
sqlite3_vfs *pVfs = 0;
#endif
/* Allocate a buffer large enough for any of the files that need to be
** deleted. */
nBuf = (int)strlen(zFile) + 100;
zBuf = (char*)sqlite3_malloc(nBuf);
if( zBuf==0 ) return SQLITE_NOMEM;
/* Delete both the regular and 8.3 filenames versions of the database,
** journal, wal and shm files. */
for(i=0; rc==0 && i<sizeof(azFmt)/sizeof(azFmt[0]); i++){
sqlite3_snprintf(nBuf, zBuf, azFmt[i], zFile);
rc = sqlite3DeleteUnlinkIfExists(pVfs, zBuf, 0);
if( rc==0 && i!=0 ){
sqlite3Delete83Name(zBuf);
rc = sqlite3DeleteUnlinkIfExists(pVfs, zBuf, 0);
}
}
/* Delete any multiplexor files */
for(i=0; rc==0 && i<sizeof(aMFile)/sizeof(aMFile[0]); i++){
struct MFile *p = &aMFile[i];
int iChunk;
for(iChunk=1; iChunk<=MX_CHUNK_NUMBER; iChunk++){
int bExists;
sqlite3_snprintf(nBuf, zBuf, p->zFmt, zFile, iChunk+p->iOffset);
if( p->b83 ) sqlite3Delete83Name(zBuf);
rc = sqlite3DeleteUnlinkIfExists(pVfs, zBuf, &bExists);
if( bExists==0 || rc!=0 ) break;
}
}
sqlite3_free(zBuf);
return (rc ? SQLITE_ERROR : SQLITE_OK);
}
| null | null | null | null | 26,942 |
8,834 | null | train_val | 796a0e014bc3985709c0a35538d606ef1da31e1b | 8,834 | Chrome | 0 | https://github.com/chromium/chromium | 2018-04-07 23:43:03+00:00 | // Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/ssl/ssl_client_session_cache.h"
#include "base/memory/ptr_util.h"
#include "base/run_loop.h"
#include "base/strings/string_number_conversions.h"
#include "base/test/simple_test_clock.h"
#include "base/time/time.h"
#include "base/trace_event/memory_allocator_dump.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event_argument.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/boringssl/src/include/openssl/ssl.h"
using testing::Contains;
using testing::Eq;
using testing::Field;
using testing::ByRef;
namespace net {
namespace {
std::unique_ptr<base::SimpleTestClock> MakeTestClock() {
std::unique_ptr<base::SimpleTestClock> clock =
std::make_unique<base::SimpleTestClock>();
// SimpleTestClock starts at the null base::Time which converts to and from
// time_t confusingly.
clock->SetNow(base::Time::FromTimeT(1000000000));
return clock;
}
class SSLClientSessionCacheTest : public testing::Test {
public:
SSLClientSessionCacheTest() : ssl_ctx_(SSL_CTX_new(TLS_method())) {}
protected:
bssl::UniquePtr<SSL_SESSION> NewSSLSession(
uint16_t version = TLS1_2_VERSION) {
SSL_SESSION* session = SSL_SESSION_new(ssl_ctx_.get());
if (!SSL_SESSION_set_protocol_version(session, version))
return nullptr;
return bssl::UniquePtr<SSL_SESSION>(session);
}
bssl::UniquePtr<SSL_SESSION> MakeTestSession(base::Time now,
base::TimeDelta timeout) {
bssl::UniquePtr<SSL_SESSION> session = NewSSLSession();
SSL_SESSION_set_time(session.get(), now.ToTimeT());
SSL_SESSION_set_timeout(session.get(), timeout.InSeconds());
return session;
}
private:
bssl::UniquePtr<SSL_CTX> ssl_ctx_;
};
} // namespace
// Test basic insertion and lookup operations.
TEST_F(SSLClientSessionCacheTest, Basic) {
SSLClientSessionCache::Config config;
SSLClientSessionCache cache(config);
bssl::UniquePtr<SSL_SESSION> session1 = NewSSLSession();
bssl::UniquePtr<SSL_SESSION> session2 = NewSSLSession();
bssl::UniquePtr<SSL_SESSION> session3 = NewSSLSession();
EXPECT_EQ(1u, session1->references);
EXPECT_EQ(1u, session2->references);
EXPECT_EQ(1u, session3->references);
EXPECT_EQ(nullptr, cache.Lookup("key1").get());
EXPECT_EQ(nullptr, cache.Lookup("key2").get());
EXPECT_EQ(0u, cache.size());
cache.Insert("key1", session1.get());
EXPECT_EQ(session1.get(), cache.Lookup("key1").get());
EXPECT_EQ(nullptr, cache.Lookup("key2").get());
EXPECT_EQ(1u, cache.size());
cache.Insert("key2", session2.get());
EXPECT_EQ(session1.get(), cache.Lookup("key1").get());
EXPECT_EQ(session2.get(), cache.Lookup("key2").get());
EXPECT_EQ(2u, cache.size());
EXPECT_EQ(2u, session1->references);
EXPECT_EQ(2u, session2->references);
cache.Insert("key1", session3.get());
EXPECT_EQ(session3.get(), cache.Lookup("key1").get());
EXPECT_EQ(session2.get(), cache.Lookup("key2").get());
EXPECT_EQ(2u, cache.size());
EXPECT_EQ(1u, session1->references);
EXPECT_EQ(2u, session2->references);
EXPECT_EQ(2u, session3->references);
cache.Flush();
EXPECT_EQ(nullptr, cache.Lookup("key1").get());
EXPECT_EQ(nullptr, cache.Lookup("key2").get());
EXPECT_EQ(nullptr, cache.Lookup("key3").get());
EXPECT_EQ(0u, cache.size());
EXPECT_EQ(1u, session1->references);
EXPECT_EQ(1u, session2->references);
EXPECT_EQ(1u, session3->references);
}
// Test basic insertion and lookup operations with single-use sessions.
TEST_F(SSLClientSessionCacheTest, BasicSingleUse) {
SSLClientSessionCache::Config config;
SSLClientSessionCache cache(config);
bssl::UniquePtr<SSL_SESSION> session1 = NewSSLSession(TLS1_3_VERSION);
bssl::UniquePtr<SSL_SESSION> session2 = NewSSLSession(TLS1_3_VERSION);
bssl::UniquePtr<SSL_SESSION> session3 = NewSSLSession(TLS1_3_VERSION);
EXPECT_EQ(1u, session1->references);
EXPECT_EQ(1u, session2->references);
EXPECT_EQ(1u, session3->references);
EXPECT_EQ(nullptr, cache.Lookup("key1").get());
EXPECT_EQ(nullptr, cache.Lookup("key2").get());
EXPECT_EQ(0u, cache.size());
cache.Insert("key1", session1.get());
EXPECT_EQ(session1.get(), cache.Lookup("key1").get());
EXPECT_EQ(nullptr, cache.Lookup("key2").get());
EXPECT_EQ(0u, cache.size());
EXPECT_EQ(nullptr, cache.Lookup("key1").get());
cache.Insert("key1", session1.get());
cache.Insert("key1", session1.get());
cache.Insert("key2", session2.get());
EXPECT_EQ(3u, session1->references);
EXPECT_EQ(2u, session2->references);
EXPECT_EQ(session1.get(), cache.Lookup("key1").get());
EXPECT_EQ(session2.get(), cache.Lookup("key2").get());
EXPECT_EQ(1u, cache.size());
EXPECT_EQ(session1.get(), cache.Lookup("key1").get());
EXPECT_EQ(nullptr, cache.Lookup("key2").get());
EXPECT_EQ(1u, session1->references);
EXPECT_EQ(1u, session2->references);
cache.Insert("key1", session1.get());
cache.Insert("key1", session3.get());
cache.Insert("key2", session2.get());
EXPECT_EQ(session3.get(), cache.Lookup("key1").get());
EXPECT_EQ(session1.get(), cache.Lookup("key1").get());
EXPECT_EQ(session2.get(), cache.Lookup("key2").get());
EXPECT_EQ(0u, cache.size());
EXPECT_EQ(1u, session1->references);
EXPECT_EQ(1u, session2->references);
EXPECT_EQ(1u, session3->references);
cache.Flush();
EXPECT_EQ(nullptr, cache.Lookup("key1").get());
EXPECT_EQ(nullptr, cache.Lookup("key2").get());
EXPECT_EQ(nullptr, cache.Lookup("key3").get());
EXPECT_EQ(0u, cache.size());
cache.Insert("key1", session1.get());
cache.Insert("key1", session2.get());
cache.Insert("key1", session3.get());
EXPECT_EQ(1u, session1->references);
EXPECT_EQ(2u, session2->references);
EXPECT_EQ(2u, session3->references);
EXPECT_EQ(session3.get(), cache.Lookup("key1").get());
EXPECT_EQ(session2.get(), cache.Lookup("key1").get());
EXPECT_EQ(nullptr, cache.Lookup("key1").get());
EXPECT_EQ(1u, session1->references);
EXPECT_EQ(1u, session2->references);
EXPECT_EQ(1u, session3->references);
}
// Test insertion and lookup operations with both single-use and reusable
// sessions.
TEST_F(SSLClientSessionCacheTest, MixedUse) {
SSLClientSessionCache::Config config;
SSLClientSessionCache cache(config);
bssl::UniquePtr<SSL_SESSION> session_single = NewSSLSession(TLS1_3_VERSION);
bssl::UniquePtr<SSL_SESSION> session_reuse = NewSSLSession(TLS1_2_VERSION);
EXPECT_EQ(1u, session_single->references);
EXPECT_EQ(1u, session_reuse->references);
EXPECT_EQ(nullptr, cache.Lookup("key1").get());
EXPECT_EQ(0u, cache.size());
cache.Insert("key1", session_reuse.get());
EXPECT_EQ(session_reuse.get(), cache.Lookup("key1").get());
EXPECT_EQ(1u, cache.size());
cache.Insert("key1", session_single.get());
EXPECT_EQ(session_single.get(), cache.Lookup("key1").get());
EXPECT_EQ(nullptr, cache.Lookup("key1").get());
EXPECT_EQ(0u, cache.size());
EXPECT_EQ(1u, session_single->references);
EXPECT_EQ(1u, session_reuse->references);
EXPECT_EQ(nullptr, cache.Lookup("key2").get());
EXPECT_EQ(0u, cache.size());
cache.Insert("key2", session_single.get());
cache.Insert("key2", session_single.get());
EXPECT_EQ(1u, cache.size());
EXPECT_EQ(session_single.get(), cache.Lookup("key2").get());
EXPECT_EQ(session_single.get(), cache.Lookup("key2").get());
EXPECT_EQ(nullptr, cache.Lookup("key2").get());
EXPECT_EQ(0u, cache.size());
cache.Insert("key2", session_single.get());
cache.Insert("key2", session_reuse.get());
EXPECT_EQ(session_reuse.get(), cache.Lookup("key2").get());
EXPECT_EQ(session_reuse.get(), cache.Lookup("key2").get());
EXPECT_EQ(1u, cache.size());
EXPECT_EQ(2u, session_single->references);
EXPECT_EQ(2u, session_reuse->references);
}
// Test that a session may be inserted at two different keys. This should never
// be necessary, but the API doesn't prohibit it.
TEST_F(SSLClientSessionCacheTest, DoubleInsert) {
SSLClientSessionCache::Config config;
SSLClientSessionCache cache(config);
bssl::UniquePtr<SSL_SESSION> session = NewSSLSession();
EXPECT_EQ(1u, session->references);
EXPECT_EQ(nullptr, cache.Lookup("key1").get());
EXPECT_EQ(nullptr, cache.Lookup("key2").get());
EXPECT_EQ(0u, cache.size());
cache.Insert("key1", session.get());
EXPECT_EQ(session.get(), cache.Lookup("key1").get());
EXPECT_EQ(nullptr, cache.Lookup("key2").get());
EXPECT_EQ(1u, cache.size());
EXPECT_EQ(2u, session->references);
cache.Insert("key2", session.get());
EXPECT_EQ(session.get(), cache.Lookup("key1").get());
EXPECT_EQ(session.get(), cache.Lookup("key2").get());
EXPECT_EQ(2u, cache.size());
EXPECT_EQ(3u, session->references);
cache.Flush();
EXPECT_EQ(nullptr, cache.Lookup("key1").get());
EXPECT_EQ(nullptr, cache.Lookup("key2").get());
EXPECT_EQ(0u, cache.size());
EXPECT_EQ(1u, session->references);
}
// Tests that the session cache's size is correctly bounded.
TEST_F(SSLClientSessionCacheTest, MaxEntries) {
SSLClientSessionCache::Config config;
config.max_entries = 3;
SSLClientSessionCache cache(config);
bssl::UniquePtr<SSL_SESSION> session1 = NewSSLSession();
bssl::UniquePtr<SSL_SESSION> session2 = NewSSLSession();
bssl::UniquePtr<SSL_SESSION> session3 = NewSSLSession();
bssl::UniquePtr<SSL_SESSION> session4 = NewSSLSession();
// Insert three entries.
cache.Insert("key1", session1.get());
cache.Insert("key2", session2.get());
cache.Insert("key3", session3.get());
EXPECT_EQ(session1.get(), cache.Lookup("key1").get());
EXPECT_EQ(session2.get(), cache.Lookup("key2").get());
EXPECT_EQ(session3.get(), cache.Lookup("key3").get());
EXPECT_EQ(3u, cache.size());
// On insertion of a fourth, the first is removed.
cache.Insert("key4", session4.get());
EXPECT_EQ(nullptr, cache.Lookup("key1").get());
EXPECT_EQ(session4.get(), cache.Lookup("key4").get());
EXPECT_EQ(session3.get(), cache.Lookup("key3").get());
EXPECT_EQ(session2.get(), cache.Lookup("key2").get());
EXPECT_EQ(3u, cache.size());
// Despite being newest, the next to be removed is session4 as it was accessed
// least. recently.
cache.Insert("key1", session1.get());
EXPECT_EQ(session1.get(), cache.Lookup("key1").get());
EXPECT_EQ(session2.get(), cache.Lookup("key2").get());
EXPECT_EQ(session3.get(), cache.Lookup("key3").get());
EXPECT_EQ(nullptr, cache.Lookup("key4").get());
EXPECT_EQ(3u, cache.size());
}
// Tests that session expiration works properly.
TEST_F(SSLClientSessionCacheTest, Expiration) {
const size_t kNumEntries = 20;
const size_t kExpirationCheckCount = 10;
const base::TimeDelta kTimeout = base::TimeDelta::FromSeconds(1000);
SSLClientSessionCache::Config config;
config.expiration_check_count = kExpirationCheckCount;
SSLClientSessionCache cache(config);
std::unique_ptr<base::SimpleTestClock> clock = MakeTestClock();
cache.SetClockForTesting(clock.get());
// Add |kNumEntries - 1| entries.
for (size_t i = 0; i < kNumEntries - 1; i++) {
bssl::UniquePtr<SSL_SESSION> session =
MakeTestSession(clock->Now(), kTimeout);
cache.Insert(base::NumberToString(i), session.get());
}
EXPECT_EQ(kNumEntries - 1, cache.size());
// Expire all the previous entries and insert one more entry.
clock->Advance(kTimeout * 2);
bssl::UniquePtr<SSL_SESSION> session =
MakeTestSession(clock->Now(), kTimeout);
cache.Insert("key", session.get());
// All entries are still in the cache.
EXPECT_EQ(kNumEntries, cache.size());
// Perform one fewer lookup than needed to trigger the expiration check. This
// shall not expire any session.
for (size_t i = 0; i < kExpirationCheckCount - 1; i++)
cache.Lookup("key");
// All entries are still in the cache.
EXPECT_EQ(kNumEntries, cache.size());
// Perform one more lookup. This will expire all sessions but the last one.
cache.Lookup("key");
EXPECT_EQ(1u, cache.size());
EXPECT_EQ(session.get(), cache.Lookup("key").get());
for (size_t i = 0; i < kNumEntries - 1; i++) {
SCOPED_TRACE(i);
EXPECT_EQ(nullptr, cache.Lookup(base::NumberToString(i)));
}
}
// Tests that Lookup performs an expiration check before returning a cached
// session.
TEST_F(SSLClientSessionCacheTest, LookupExpirationCheck) {
// kExpirationCheckCount is set to a suitably large number so the automated
// pruning never triggers.
const size_t kExpirationCheckCount = 1000;
const base::TimeDelta kTimeout = base::TimeDelta::FromSeconds(1000);
SSLClientSessionCache::Config config;
config.expiration_check_count = kExpirationCheckCount;
SSLClientSessionCache cache(config);
std::unique_ptr<base::SimpleTestClock> clock = MakeTestClock();
cache.SetClockForTesting(clock.get());
// Insert an entry into the session cache.
bssl::UniquePtr<SSL_SESSION> session =
MakeTestSession(clock->Now(), kTimeout);
cache.Insert("key", session.get());
EXPECT_EQ(session.get(), cache.Lookup("key").get());
EXPECT_EQ(1u, cache.size());
// Expire the session.
clock->Advance(kTimeout * 2);
// The entry has not been removed yet.
EXPECT_EQ(1u, cache.size());
// But it will not be returned on lookup and gets pruned at that point.
EXPECT_EQ(nullptr, cache.Lookup("key").get());
EXPECT_EQ(0u, cache.size());
// Re-inserting a session does not refresh the lifetime. The expiration
// information in the session is used.
cache.Insert("key", session.get());
EXPECT_EQ(nullptr, cache.Lookup("key").get());
EXPECT_EQ(0u, cache.size());
// Re-insert a fresh copy of the session.
session = MakeTestSession(clock->Now(), kTimeout);
cache.Insert("key", session.get());
EXPECT_EQ(session.get(), cache.Lookup("key").get());
EXPECT_EQ(1u, cache.size());
// Sessions also are treated as expired if the clock rewinds.
clock->Advance(base::TimeDelta::FromSeconds(-1));
EXPECT_EQ(nullptr, cache.Lookup("key").get());
EXPECT_EQ(0u, cache.size());
}
// Test that SSL cache is flushed on low memory notifications
TEST_F(SSLClientSessionCacheTest, TestFlushOnMemoryNotifications) {
// kExpirationCheckCount is set to a suitably large number so the automated
// pruning never triggers.
const size_t kExpirationCheckCount = 1000;
const base::TimeDelta kTimeout = base::TimeDelta::FromSeconds(1000);
SSLClientSessionCache::Config config;
config.expiration_check_count = kExpirationCheckCount;
SSLClientSessionCache cache(config);
std::unique_ptr<base::SimpleTestClock> clock = MakeTestClock();
cache.SetClockForTesting(clock.get());
// Insert an entry into the session cache.
bssl::UniquePtr<SSL_SESSION> session1 =
MakeTestSession(clock->Now(), kTimeout);
cache.Insert("key1", session1.get());
EXPECT_EQ(session1.get(), cache.Lookup("key1").get());
EXPECT_EQ(1u, cache.size());
// Expire the session.
clock->Advance(kTimeout * 2);
// Add one more session.
bssl::UniquePtr<SSL_SESSION> session2 =
MakeTestSession(clock->Now(), kTimeout);
cache.Insert("key2", session2.get());
EXPECT_EQ(2u, cache.size());
// Fire a notification that will flush expired sessions.
base::MemoryPressureListener::NotifyMemoryPressure(
base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE);
base::RunLoop().RunUntilIdle();
// Expired session's cache should be flushed.
// Lookup returns nullptr, when cache entry not found.
EXPECT_FALSE(cache.Lookup("key1"));
EXPECT_TRUE(cache.Lookup("key2"));
EXPECT_EQ(1u, cache.size());
// Fire notification that will flush everything.
base::MemoryPressureListener::NotifyMemoryPressure(
base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL);
base::RunLoop().RunUntilIdle();
EXPECT_EQ(0u, cache.size());
}
class SSLClientSessionCacheMemoryDumpTest
: public SSLClientSessionCacheTest,
public testing::WithParamInterface<
base::trace_event::MemoryDumpLevelOfDetail> {};
INSTANTIATE_TEST_CASE_P(
/* no prefix */,
SSLClientSessionCacheMemoryDumpTest,
::testing::Values(base::trace_event::MemoryDumpLevelOfDetail::DETAILED,
base::trace_event::MemoryDumpLevelOfDetail::BACKGROUND));
// Basic test for dumping memory stats.
TEST_P(SSLClientSessionCacheMemoryDumpTest, TestDumpMemoryStats) {
SSLClientSessionCache::Config config;
SSLClientSessionCache cache(config);
bssl::UniquePtr<SSL_SESSION> session1 = NewSSLSession();
bssl::UniquePtr<SSL_SESSION> session2 = NewSSLSession();
bssl::UniquePtr<SSL_SESSION> session3 = NewSSLSession();
// Insert three entries.
cache.Insert("key1", session1.get());
cache.Insert("key2", session2.get());
cache.Insert("key3", session3.get());
EXPECT_EQ(session1.get(), cache.Lookup("key1").get());
EXPECT_EQ(session2.get(), cache.Lookup("key2").get());
EXPECT_EQ(session3.get(), cache.Lookup("key3").get());
EXPECT_EQ(3u, cache.size());
base::trace_event::MemoryDumpArgs dump_args = {GetParam()};
std::unique_ptr<base::trace_event::ProcessMemoryDump> process_memory_dump(
new base::trace_event::ProcessMemoryDump(nullptr, dump_args));
cache.DumpMemoryStats(process_memory_dump.get());
using Entry = base::trace_event::MemoryAllocatorDump::Entry;
const base::trace_event::MemoryAllocatorDump* dump =
process_memory_dump->GetAllocatorDump("net/ssl_session_cache");
ASSERT_NE(nullptr, dump);
const std::vector<Entry>& entries = dump->entries();
EXPECT_THAT(entries, Contains(Field(&Entry::name, Eq("cert_count"))));
EXPECT_THAT(entries, Contains(Field(&Entry::name, Eq("cert_size"))));
EXPECT_THAT(entries,
Contains(Field(&Entry::name, Eq("undeduped_cert_size"))));
EXPECT_THAT(entries,
Contains(Field(&Entry::name, Eq("undeduped_cert_count"))));
EXPECT_THAT(
entries,
Contains(Field(&Entry::name,
Eq(base::trace_event::MemoryAllocatorDump::kNameSize))));
}
} // namespace net
| null | null | null | null | 5,697 |
64,653 | null | train_val | 796a0e014bc3985709c0a35538d606ef1da31e1b | 64,653 | Chrome | 0 | https://github.com/chromium/chromium | 2018-04-07 23:43:03+00:00 | // Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_UI_COCOA_INFOBARS_BEFORE_TRANSLATE_INFOBAR_CONTROLLER_H_
#define CHROME_BROWSER_UI_COCOA_INFOBARS_BEFORE_TRANSLATE_INFOBAR_CONTROLLER_H_
#import "chrome/browser/ui/cocoa/infobars/translate_infobar_base.h"
@interface BeforeTranslateInfobarController : TranslateInfoBarControllerBase {
base::scoped_nsobject<NSButton> alwaysTranslateButton_;
base::scoped_nsobject<NSButton> neverTranslateButton_;
}
// Creates and initializes the alwaysTranslate and neverTranslate buttons.
- (void)initializeExtraControls;
@end
@interface BeforeTranslateInfobarController (TestingAPI)
- (NSButton*)alwaysTranslateButton;
- (NSButton*)neverTranslateButton;
@end
#endif // CHROME_BROWSER_UI_COCOA_INFOBARS_BEFORE_TRANSLATE_INFOBAR_CONTROLLER_H_
| null | null | null | null | 61,516 |
29,676 | null | train_val | e4311ee51d1e2676001b2d8fcefd92bdd79aad85 | 194,671 | linux | 0 | https://github.com/torvalds/linux | 2017-05-12 08:32:58+10:00 | /*
* drivers/usb/core/usb.c
*
* (C) Copyright Linus Torvalds 1999
* (C) Copyright Johannes Erdfelt 1999-2001
* (C) Copyright Andreas Gal 1999
* (C) Copyright Gregory P. Smith 1999
* (C) Copyright Deti Fliegl 1999 (new USB architecture)
* (C) Copyright Randy Dunlap 2000
* (C) Copyright David Brownell 2000-2004
* (C) Copyright Yggdrasil Computing, Inc. 2000
* (usb_device_id matching changes by Adam J. Richter)
* (C) Copyright Greg Kroah-Hartman 2002-2003
*
* Released under the GPLv2 only.
* SPDX-License-Identifier: GPL-2.0
*
* NOTE! This is not actually a driver at all, rather this is
* just a collection of helper routines that implement the
* generic USB things that the real drivers can use..
*
* Think of this as a "USB library" rather than anything else.
* It should be considered a slave, with no callbacks. Callbacks
* are evil.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/interrupt.h> /* for in_interrupt() */
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/usb/of.h>
#include <asm/io.h>
#include <linux/scatterlist.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include "usb.h"
const char *usbcore_name = "usbcore";
static bool nousb; /* Disable USB when built into kernel image */
module_param(nousb, bool, 0444);
/*
* for external read access to <nousb>
*/
int usb_disabled(void)
{
return nousb;
}
EXPORT_SYMBOL_GPL(usb_disabled);
#ifdef CONFIG_PM
static int usb_autosuspend_delay = 2; /* Default delay value,
* in seconds */
module_param_named(autosuspend, usb_autosuspend_delay, int, 0644);
MODULE_PARM_DESC(autosuspend, "default autosuspend delay");
#else
#define usb_autosuspend_delay 0
#endif
/**
* usb_find_alt_setting() - Given a configuration, find the alternate setting
* for the given interface.
* @config: the configuration to search (not necessarily the current config).
* @iface_num: interface number to search in
* @alt_num: alternate interface setting number to search for.
*
* Search the configuration's interface cache for the given alt setting.
*
* Return: The alternate setting, if found. %NULL otherwise.
*/
struct usb_host_interface *usb_find_alt_setting(
struct usb_host_config *config,
unsigned int iface_num,
unsigned int alt_num)
{
struct usb_interface_cache *intf_cache = NULL;
int i;
for (i = 0; i < config->desc.bNumInterfaces; i++) {
if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
== iface_num) {
intf_cache = config->intf_cache[i];
break;
}
}
if (!intf_cache)
return NULL;
for (i = 0; i < intf_cache->num_altsetting; i++)
if (intf_cache->altsetting[i].desc.bAlternateSetting == alt_num)
return &intf_cache->altsetting[i];
printk(KERN_DEBUG "Did not find alt setting %u for intf %u, "
"config %u\n", alt_num, iface_num,
config->desc.bConfigurationValue);
return NULL;
}
EXPORT_SYMBOL_GPL(usb_find_alt_setting);
/**
* usb_ifnum_to_if - get the interface object with a given interface number
* @dev: the device whose current configuration is considered
* @ifnum: the desired interface
*
* This walks the device descriptor for the currently active configuration
* to find the interface object with the particular interface number.
*
* Note that configuration descriptors are not required to assign interface
* numbers sequentially, so that it would be incorrect to assume that
* the first interface in that descriptor corresponds to interface zero.
* This routine helps device drivers avoid such mistakes.
* However, you should make sure that you do the right thing with any
* alternate settings available for this interfaces.
*
* Don't call this function unless you are bound to one of the interfaces
* on this device or you have locked the device!
*
* Return: A pointer to the interface that has @ifnum as interface number,
* if found. %NULL otherwise.
*/
struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev,
unsigned ifnum)
{
struct usb_host_config *config = dev->actconfig;
int i;
if (!config)
return NULL;
for (i = 0; i < config->desc.bNumInterfaces; i++)
if (config->interface[i]->altsetting[0]
.desc.bInterfaceNumber == ifnum)
return config->interface[i];
return NULL;
}
EXPORT_SYMBOL_GPL(usb_ifnum_to_if);
/**
* usb_altnum_to_altsetting - get the altsetting structure with a given alternate setting number.
* @intf: the interface containing the altsetting in question
* @altnum: the desired alternate setting number
*
* This searches the altsetting array of the specified interface for
* an entry with the correct bAlternateSetting value.
*
* Note that altsettings need not be stored sequentially by number, so
* it would be incorrect to assume that the first altsetting entry in
* the array corresponds to altsetting zero. This routine helps device
* drivers avoid such mistakes.
*
* Don't call this function unless you are bound to the intf interface
* or you have locked the device!
*
* Return: A pointer to the entry of the altsetting array of @intf that
* has @altnum as the alternate setting number. %NULL if not found.
*/
struct usb_host_interface *usb_altnum_to_altsetting(
const struct usb_interface *intf,
unsigned int altnum)
{
int i;
for (i = 0; i < intf->num_altsetting; i++) {
if (intf->altsetting[i].desc.bAlternateSetting == altnum)
return &intf->altsetting[i];
}
return NULL;
}
EXPORT_SYMBOL_GPL(usb_altnum_to_altsetting);
struct find_interface_arg {
int minor;
struct device_driver *drv;
};
static int __find_interface(struct device *dev, void *data)
{
struct find_interface_arg *arg = data;
struct usb_interface *intf;
if (!is_usb_interface(dev))
return 0;
if (dev->driver != arg->drv)
return 0;
intf = to_usb_interface(dev);
return intf->minor == arg->minor;
}
/**
* usb_find_interface - find usb_interface pointer for driver and device
* @drv: the driver whose current configuration is considered
* @minor: the minor number of the desired device
*
* This walks the bus device list and returns a pointer to the interface
* with the matching minor and driver. Note, this only works for devices
* that share the USB major number.
*
* Return: A pointer to the interface with the matching major and @minor.
*/
struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor)
{
struct find_interface_arg argb;
struct device *dev;
argb.minor = minor;
argb.drv = &drv->drvwrap.driver;
dev = bus_find_device(&usb_bus_type, NULL, &argb, __find_interface);
/* Drop reference count from bus_find_device */
put_device(dev);
return dev ? to_usb_interface(dev) : NULL;
}
EXPORT_SYMBOL_GPL(usb_find_interface);
struct each_dev_arg {
void *data;
int (*fn)(struct usb_device *, void *);
};
static int __each_dev(struct device *dev, void *data)
{
struct each_dev_arg *arg = (struct each_dev_arg *)data;
/* There are struct usb_interface on the same bus, filter them out */
if (!is_usb_device(dev))
return 0;
return arg->fn(to_usb_device(dev), arg->data);
}
/**
* usb_for_each_dev - iterate over all USB devices in the system
* @data: data pointer that will be handed to the callback function
* @fn: callback function to be called for each USB device
*
* Iterate over all USB devices and call @fn for each, passing it @data. If it
* returns anything other than 0, we break the iteration prematurely and return
* that value.
*/
int usb_for_each_dev(void *data, int (*fn)(struct usb_device *, void *))
{
struct each_dev_arg arg = {data, fn};
return bus_for_each_dev(&usb_bus_type, NULL, &arg, __each_dev);
}
EXPORT_SYMBOL_GPL(usb_for_each_dev);
/**
* usb_release_dev - free a usb device structure when all users of it are finished.
* @dev: device that's been disconnected
*
* Will be called only by the device core when all users of this usb device are
* done.
*/
static void usb_release_dev(struct device *dev)
{
struct usb_device *udev;
struct usb_hcd *hcd;
udev = to_usb_device(dev);
hcd = bus_to_hcd(udev->bus);
usb_destroy_configuration(udev);
usb_release_bos_descriptor(udev);
usb_put_hcd(hcd);
kfree(udev->product);
kfree(udev->manufacturer);
kfree(udev->serial);
kfree(udev);
}
static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct usb_device *usb_dev;
usb_dev = to_usb_device(dev);
if (add_uevent_var(env, "BUSNUM=%03d", usb_dev->bus->busnum))
return -ENOMEM;
if (add_uevent_var(env, "DEVNUM=%03d", usb_dev->devnum))
return -ENOMEM;
return 0;
}
#ifdef CONFIG_PM
/* USB device Power-Management thunks.
* There's no need to distinguish here between quiescing a USB device
* and powering it down; the generic_suspend() routine takes care of
* it by skipping the usb_port_suspend() call for a quiesce. And for
* USB interfaces there's no difference at all.
*/
static int usb_dev_prepare(struct device *dev)
{
return 0; /* Implement eventually? */
}
static void usb_dev_complete(struct device *dev)
{
/* Currently used only for rebinding interfaces */
usb_resume_complete(dev);
}
static int usb_dev_suspend(struct device *dev)
{
return usb_suspend(dev, PMSG_SUSPEND);
}
static int usb_dev_resume(struct device *dev)
{
return usb_resume(dev, PMSG_RESUME);
}
static int usb_dev_freeze(struct device *dev)
{
return usb_suspend(dev, PMSG_FREEZE);
}
static int usb_dev_thaw(struct device *dev)
{
return usb_resume(dev, PMSG_THAW);
}
static int usb_dev_poweroff(struct device *dev)
{
return usb_suspend(dev, PMSG_HIBERNATE);
}
static int usb_dev_restore(struct device *dev)
{
return usb_resume(dev, PMSG_RESTORE);
}
static const struct dev_pm_ops usb_device_pm_ops = {
.prepare = usb_dev_prepare,
.complete = usb_dev_complete,
.suspend = usb_dev_suspend,
.resume = usb_dev_resume,
.freeze = usb_dev_freeze,
.thaw = usb_dev_thaw,
.poweroff = usb_dev_poweroff,
.restore = usb_dev_restore,
.runtime_suspend = usb_runtime_suspend,
.runtime_resume = usb_runtime_resume,
.runtime_idle = usb_runtime_idle,
};
#endif /* CONFIG_PM */
static char *usb_devnode(struct device *dev,
umode_t *mode, kuid_t *uid, kgid_t *gid)
{
struct usb_device *usb_dev;
usb_dev = to_usb_device(dev);
return kasprintf(GFP_KERNEL, "bus/usb/%03d/%03d",
usb_dev->bus->busnum, usb_dev->devnum);
}
struct device_type usb_device_type = {
.name = "usb_device",
.release = usb_release_dev,
.uevent = usb_dev_uevent,
.devnode = usb_devnode,
#ifdef CONFIG_PM
.pm = &usb_device_pm_ops,
#endif
};
/* Returns 1 if @usb_bus is WUSB, 0 otherwise */
static unsigned usb_bus_is_wusb(struct usb_bus *bus)
{
struct usb_hcd *hcd = bus_to_hcd(bus);
return hcd->wireless;
}
/**
* usb_alloc_dev - usb device constructor (usbcore-internal)
* @parent: hub to which device is connected; null to allocate a root hub
* @bus: bus used to access the device
* @port1: one-based index of port; ignored for root hubs
* Context: !in_interrupt()
*
* Only hub drivers (including virtual root hub drivers for host
* controllers) should ever call this.
*
* This call may not be used in a non-sleeping context.
*
* Return: On success, a pointer to the allocated usb device. %NULL on
* failure.
*/
struct usb_device *usb_alloc_dev(struct usb_device *parent,
struct usb_bus *bus, unsigned port1)
{
struct usb_device *dev;
struct usb_hcd *usb_hcd = bus_to_hcd(bus);
unsigned root_hub = 0;
unsigned raw_port = port1;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
if (!usb_get_hcd(usb_hcd)) {
kfree(dev);
return NULL;
}
/* Root hubs aren't true devices, so don't allocate HCD resources */
if (usb_hcd->driver->alloc_dev && parent &&
!usb_hcd->driver->alloc_dev(usb_hcd, dev)) {
usb_put_hcd(bus_to_hcd(bus));
kfree(dev);
return NULL;
}
device_initialize(&dev->dev);
dev->dev.bus = &usb_bus_type;
dev->dev.type = &usb_device_type;
dev->dev.groups = usb_device_groups;
/*
* Fake a dma_mask/offset for the USB device:
* We cannot really use the dma-mapping API (dma_alloc_* and
* dma_map_*) for USB devices but instead need to use
* usb_alloc_coherent and pass data in 'urb's, but some subsystems
* manually look into the mask/offset pair to determine whether
* they need bounce buffers.
* Note: calling dma_set_mask() on a USB device would set the
* mask for the entire HCD, so don't do that.
*/
dev->dev.dma_mask = bus->controller->dma_mask;
dev->dev.dma_pfn_offset = bus->controller->dma_pfn_offset;
set_dev_node(&dev->dev, dev_to_node(bus->controller));
dev->state = USB_STATE_ATTACHED;
dev->lpm_disable_count = 1;
atomic_set(&dev->urbnum, 0);
INIT_LIST_HEAD(&dev->ep0.urb_list);
dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT;
/* ep0 maxpacket comes later, from device descriptor */
usb_enable_endpoint(dev, &dev->ep0, false);
dev->can_submit = 1;
/* Save readable and stable topology id, distinguishing devices
* by location for diagnostics, tools, driver model, etc. The
* string is a path along hub ports, from the root. Each device's
* dev->devpath will be stable until USB is re-cabled, and hubs
* are often labeled with these port numbers. The name isn't
* as stable: bus->busnum changes easily from modprobe order,
* cardbus or pci hotplugging, and so on.
*/
if (unlikely(!parent)) {
dev->devpath[0] = '0';
dev->route = 0;
dev->dev.parent = bus->controller;
dev_set_name(&dev->dev, "usb%d", bus->busnum);
root_hub = 1;
} else {
/* match any labeling on the hubs; it's one-based */
if (parent->devpath[0] == '0') {
snprintf(dev->devpath, sizeof dev->devpath,
"%d", port1);
/* Root ports are not counted in route string */
dev->route = 0;
} else {
snprintf(dev->devpath, sizeof dev->devpath,
"%s.%d", parent->devpath, port1);
/* Route string assumes hubs have less than 16 ports */
if (port1 < 15)
dev->route = parent->route +
(port1 << ((parent->level - 1)*4));
else
dev->route = parent->route +
(15 << ((parent->level - 1)*4));
}
dev->dev.parent = &parent->dev;
dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath);
if (!parent->parent) {
/* device under root hub's port */
raw_port = usb_hcd_find_raw_port_number(usb_hcd,
port1);
}
dev->dev.of_node = usb_of_get_child_node(parent->dev.of_node,
raw_port);
/* hub driver sets up TT records */
}
dev->portnum = port1;
dev->bus = bus;
dev->parent = parent;
INIT_LIST_HEAD(&dev->filelist);
#ifdef CONFIG_PM
pm_runtime_set_autosuspend_delay(&dev->dev,
usb_autosuspend_delay * 1000);
dev->connect_time = jiffies;
dev->active_duration = -jiffies;
#endif
if (root_hub) /* Root hub always ok [and always wired] */
dev->authorized = 1;
else {
dev->authorized = !!HCD_DEV_AUTHORIZED(usb_hcd);
dev->wusb = usb_bus_is_wusb(bus) ? 1 : 0;
}
return dev;
}
EXPORT_SYMBOL_GPL(usb_alloc_dev);
/**
* usb_get_dev - increments the reference count of the usb device structure
* @dev: the device being referenced
*
* Each live reference to a device should be refcounted.
*
* Drivers for USB interfaces should normally record such references in
* their probe() methods, when they bind to an interface, and release
* them by calling usb_put_dev(), in their disconnect() methods.
*
* Return: A pointer to the device with the incremented reference counter.
*/
struct usb_device *usb_get_dev(struct usb_device *dev)
{
if (dev)
get_device(&dev->dev);
return dev;
}
EXPORT_SYMBOL_GPL(usb_get_dev);
/**
* usb_put_dev - release a use of the usb device structure
* @dev: device that's been disconnected
*
* Must be called when a user of a device is finished with it. When the last
* user of the device calls this function, the memory of the device is freed.
*/
void usb_put_dev(struct usb_device *dev)
{
if (dev)
put_device(&dev->dev);
}
EXPORT_SYMBOL_GPL(usb_put_dev);
/**
* usb_get_intf - increments the reference count of the usb interface structure
* @intf: the interface being referenced
*
* Each live reference to a interface must be refcounted.
*
* Drivers for USB interfaces should normally record such references in
* their probe() methods, when they bind to an interface, and release
* them by calling usb_put_intf(), in their disconnect() methods.
*
* Return: A pointer to the interface with the incremented reference counter.
*/
struct usb_interface *usb_get_intf(struct usb_interface *intf)
{
if (intf)
get_device(&intf->dev);
return intf;
}
EXPORT_SYMBOL_GPL(usb_get_intf);
/**
* usb_put_intf - release a use of the usb interface structure
* @intf: interface that's been decremented
*
* Must be called when a user of an interface is finished with it. When the
* last user of the interface calls this function, the memory of the interface
* is freed.
*/
void usb_put_intf(struct usb_interface *intf)
{
if (intf)
put_device(&intf->dev);
}
EXPORT_SYMBOL_GPL(usb_put_intf);
/* USB device locking
*
* USB devices and interfaces are locked using the semaphore in their
* embedded struct device. The hub driver guarantees that whenever a
* device is connected or disconnected, drivers are called with the
* USB device locked as well as their particular interface.
*
* Complications arise when several devices are to be locked at the same
* time. Only hub-aware drivers that are part of usbcore ever have to
* do this; nobody else needs to worry about it. The rule for locking
* is simple:
*
* When locking both a device and its parent, always lock the
* the parent first.
*/
/**
* usb_lock_device_for_reset - cautiously acquire the lock for a usb device structure
* @udev: device that's being locked
* @iface: interface bound to the driver making the request (optional)
*
* Attempts to acquire the device lock, but fails if the device is
* NOTATTACHED or SUSPENDED, or if iface is specified and the interface
* is neither BINDING nor BOUND. Rather than sleeping to wait for the
* lock, the routine polls repeatedly. This is to prevent deadlock with
* disconnect; in some drivers (such as usb-storage) the disconnect()
* or suspend() method will block waiting for a device reset to complete.
*
* Return: A negative error code for failure, otherwise 0.
*/
int usb_lock_device_for_reset(struct usb_device *udev,
const struct usb_interface *iface)
{
unsigned long jiffies_expire = jiffies + HZ;
if (udev->state == USB_STATE_NOTATTACHED)
return -ENODEV;
if (udev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
if (iface && (iface->condition == USB_INTERFACE_UNBINDING ||
iface->condition == USB_INTERFACE_UNBOUND))
return -EINTR;
while (!usb_trylock_device(udev)) {
/* If we can't acquire the lock after waiting one second,
* we're probably deadlocked */
if (time_after(jiffies, jiffies_expire))
return -EBUSY;
msleep(15);
if (udev->state == USB_STATE_NOTATTACHED)
return -ENODEV;
if (udev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
if (iface && (iface->condition == USB_INTERFACE_UNBINDING ||
iface->condition == USB_INTERFACE_UNBOUND))
return -EINTR;
}
return 0;
}
EXPORT_SYMBOL_GPL(usb_lock_device_for_reset);
/**
* usb_get_current_frame_number - return current bus frame number
* @dev: the device whose bus is being queried
*
* Return: The current frame number for the USB host controller used
* with the given USB device. This can be used when scheduling
* isochronous requests.
*
* Note: Different kinds of host controller have different "scheduling
* horizons". While one type might support scheduling only 32 frames
* into the future, others could support scheduling up to 1024 frames
* into the future.
*
*/
int usb_get_current_frame_number(struct usb_device *dev)
{
return usb_hcd_get_frame_number(dev);
}
EXPORT_SYMBOL_GPL(usb_get_current_frame_number);
/*-------------------------------------------------------------------*/
/*
* __usb_get_extra_descriptor() finds a descriptor of specific type in the
* extra field of the interface and endpoint descriptor structs.
*/
EXPORT_SYMBOL_GPL(__usb_get_extra_descriptor);
/**
* usb_alloc_coherent - allocate dma-consistent buffer for URB_NO_xxx_DMA_MAP
* @dev: device the buffer will be used with
* @size: requested buffer size
* @mem_flags: affect whether allocation may block
* @dma: used to return DMA address of buffer
*
* Return: Either null (indicating no buffer could be allocated), or the
* cpu-space pointer to a buffer that may be used to perform DMA to the
* specified device. Such cpu-space buffers are returned along with the DMA
* address (through the pointer provided).
*
* Note:
* These buffers are used with URB_NO_xxx_DMA_MAP set in urb->transfer_flags
* to avoid behaviors like using "DMA bounce buffers", or thrashing IOMMU
* hardware during URB completion/resubmit. The implementation varies between
* platforms, depending on details of how DMA will work to this device.
* Using these buffers also eliminates cacheline sharing problems on
* architectures where CPU caches are not DMA-coherent. On systems without
* bus-snooping caches, these buffers are uncached.
*
* When the buffer is no longer used, free it with usb_free_coherent().
*/
void *usb_alloc_coherent(struct usb_device *dev, size_t size, gfp_t mem_flags,
dma_addr_t *dma)
{
if (!dev || !dev->bus)
return NULL;
return hcd_buffer_alloc(dev->bus, size, mem_flags, dma);
}
EXPORT_SYMBOL_GPL(usb_alloc_coherent);
/**
* usb_free_coherent - free memory allocated with usb_alloc_coherent()
* @dev: device the buffer was used with
* @size: requested buffer size
* @addr: CPU address of buffer
* @dma: DMA address of buffer
*
* This reclaims an I/O buffer, letting it be reused. The memory must have
* been allocated using usb_alloc_coherent(), and the parameters must match
* those provided in that allocation request.
*/
void usb_free_coherent(struct usb_device *dev, size_t size, void *addr,
dma_addr_t dma)
{
if (!dev || !dev->bus)
return;
if (!addr)
return;
hcd_buffer_free(dev->bus, size, addr, dma);
}
EXPORT_SYMBOL_GPL(usb_free_coherent);
/**
* usb_buffer_map - create DMA mapping(s) for an urb
* @urb: urb whose transfer_buffer/setup_packet will be mapped
*
* URB_NO_TRANSFER_DMA_MAP is added to urb->transfer_flags if the operation
* succeeds. If the device is connected to this system through a non-DMA
* controller, this operation always succeeds.
*
* This call would normally be used for an urb which is reused, perhaps
* as the target of a large periodic transfer, with usb_buffer_dmasync()
* calls to synchronize memory and dma state.
*
* Reverse the effect of this call with usb_buffer_unmap().
*
* Return: Either %NULL (indicating no buffer could be mapped), or @urb.
*
*/
#if 0
struct urb *usb_buffer_map(struct urb *urb)
{
struct usb_bus *bus;
struct device *controller;
if (!urb
|| !urb->dev
|| !(bus = urb->dev->bus)
|| !(controller = bus->controller))
return NULL;
if (controller->dma_mask) {
urb->transfer_dma = dma_map_single(controller,
urb->transfer_buffer, urb->transfer_buffer_length,
usb_pipein(urb->pipe)
? DMA_FROM_DEVICE : DMA_TO_DEVICE);
/* FIXME generic api broken like pci, can't report errors */
/* if (urb->transfer_dma == DMA_ADDR_INVALID) return 0; */
} else
urb->transfer_dma = ~0;
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
return urb;
}
EXPORT_SYMBOL_GPL(usb_buffer_map);
#endif /* 0 */
/* XXX DISABLED, no users currently. If you wish to re-enable this
* XXX please determine whether the sync is to transfer ownership of
* XXX the buffer from device to cpu or vice verse, and thusly use the
* XXX appropriate _for_{cpu,device}() method. -DaveM
*/
#if 0
/**
* usb_buffer_dmasync - synchronize DMA and CPU view of buffer(s)
* @urb: urb whose transfer_buffer/setup_packet will be synchronized
*/
void usb_buffer_dmasync(struct urb *urb)
{
struct usb_bus *bus;
struct device *controller;
if (!urb
|| !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
|| !urb->dev
|| !(bus = urb->dev->bus)
|| !(controller = bus->controller))
return;
if (controller->dma_mask) {
dma_sync_single_for_cpu(controller,
urb->transfer_dma, urb->transfer_buffer_length,
usb_pipein(urb->pipe)
? DMA_FROM_DEVICE : DMA_TO_DEVICE);
if (usb_pipecontrol(urb->pipe))
dma_sync_single_for_cpu(controller,
urb->setup_dma,
sizeof(struct usb_ctrlrequest),
DMA_TO_DEVICE);
}
}
EXPORT_SYMBOL_GPL(usb_buffer_dmasync);
#endif
/**
* usb_buffer_unmap - free DMA mapping(s) for an urb
* @urb: urb whose transfer_buffer will be unmapped
*
* Reverses the effect of usb_buffer_map().
*/
#if 0
void usb_buffer_unmap(struct urb *urb)
{
struct usb_bus *bus;
struct device *controller;
if (!urb
|| !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
|| !urb->dev
|| !(bus = urb->dev->bus)
|| !(controller = bus->controller))
return;
if (controller->dma_mask) {
dma_unmap_single(controller,
urb->transfer_dma, urb->transfer_buffer_length,
usb_pipein(urb->pipe)
? DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
}
EXPORT_SYMBOL_GPL(usb_buffer_unmap);
#endif /* 0 */
#if 0
/**
* usb_buffer_map_sg - create scatterlist DMA mapping(s) for an endpoint
* @dev: device to which the scatterlist will be mapped
* @is_in: mapping transfer direction
* @sg: the scatterlist to map
* @nents: the number of entries in the scatterlist
*
* Return: Either < 0 (indicating no buffers could be mapped), or the
* number of DMA mapping array entries in the scatterlist.
*
* Note:
* The caller is responsible for placing the resulting DMA addresses from
* the scatterlist into URB transfer buffer pointers, and for setting the
* URB_NO_TRANSFER_DMA_MAP transfer flag in each of those URBs.
*
* Top I/O rates come from queuing URBs, instead of waiting for each one
* to complete before starting the next I/O. This is particularly easy
* to do with scatterlists. Just allocate and submit one URB for each DMA
* mapping entry returned, stopping on the first error or when all succeed.
* Better yet, use the usb_sg_*() calls, which do that (and more) for you.
*
* This call would normally be used when translating scatterlist requests,
* rather than usb_buffer_map(), since on some hardware (with IOMMUs) it
* may be able to coalesce mappings for improved I/O efficiency.
*
* Reverse the effect of this call with usb_buffer_unmap_sg().
*/
int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
struct scatterlist *sg, int nents)
{
struct usb_bus *bus;
struct device *controller;
if (!dev
|| !(bus = dev->bus)
|| !(controller = bus->controller)
|| !controller->dma_mask)
return -EINVAL;
/* FIXME generic api broken like pci, can't report errors */
return dma_map_sg(controller, sg, nents,
is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE) ? : -ENOMEM;
}
EXPORT_SYMBOL_GPL(usb_buffer_map_sg);
#endif
/* XXX DISABLED, no users currently. If you wish to re-enable this
* XXX please determine whether the sync is to transfer ownership of
* XXX the buffer from device to cpu or vice verse, and thusly use the
* XXX appropriate _for_{cpu,device}() method. -DaveM
*/
#if 0
/**
* usb_buffer_dmasync_sg - synchronize DMA and CPU view of scatterlist buffer(s)
* @dev: device to which the scatterlist will be mapped
* @is_in: mapping transfer direction
* @sg: the scatterlist to synchronize
* @n_hw_ents: the positive return value from usb_buffer_map_sg
*
* Use this when you are re-using a scatterlist's data buffers for
* another USB request.
*/
void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
struct scatterlist *sg, int n_hw_ents)
{
struct usb_bus *bus;
struct device *controller;
if (!dev
|| !(bus = dev->bus)
|| !(controller = bus->controller)
|| !controller->dma_mask)
return;
dma_sync_sg_for_cpu(controller, sg, n_hw_ents,
is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg);
#endif
#if 0
/**
* usb_buffer_unmap_sg - free DMA mapping(s) for a scatterlist
* @dev: device to which the scatterlist will be mapped
* @is_in: mapping transfer direction
* @sg: the scatterlist to unmap
* @n_hw_ents: the positive return value from usb_buffer_map_sg
*
* Reverses the effect of usb_buffer_map_sg().
*/
void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
struct scatterlist *sg, int n_hw_ents)
{
struct usb_bus *bus;
struct device *controller;
if (!dev
|| !(bus = dev->bus)
|| !(controller = bus->controller)
|| !controller->dma_mask)
return;
dma_unmap_sg(controller, sg, n_hw_ents,
is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
EXPORT_SYMBOL_GPL(usb_buffer_unmap_sg);
#endif
/*
* Notifications of device and interface registration
*/
static int usb_bus_notify(struct notifier_block *nb, unsigned long action,
void *data)
{
struct device *dev = data;
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
if (dev->type == &usb_device_type)
(void) usb_create_sysfs_dev_files(to_usb_device(dev));
else if (dev->type == &usb_if_device_type)
usb_create_sysfs_intf_files(to_usb_interface(dev));
break;
case BUS_NOTIFY_DEL_DEVICE:
if (dev->type == &usb_device_type)
usb_remove_sysfs_dev_files(to_usb_device(dev));
else if (dev->type == &usb_if_device_type)
usb_remove_sysfs_intf_files(to_usb_interface(dev));
break;
}
return 0;
}
static struct notifier_block usb_bus_nb = {
.notifier_call = usb_bus_notify,
};
struct dentry *usb_debug_root;
EXPORT_SYMBOL_GPL(usb_debug_root);
static struct dentry *usb_debug_devices;
static int usb_debugfs_init(void)
{
usb_debug_root = debugfs_create_dir("usb", NULL);
if (!usb_debug_root)
return -ENOENT;
usb_debug_devices = debugfs_create_file("devices", 0444,
usb_debug_root, NULL,
&usbfs_devices_fops);
if (!usb_debug_devices) {
debugfs_remove(usb_debug_root);
usb_debug_root = NULL;
return -ENOENT;
}
return 0;
}
static void usb_debugfs_cleanup(void)
{
debugfs_remove(usb_debug_devices);
debugfs_remove(usb_debug_root);
}
/*
* Init
*/
static int __init usb_init(void)
{
int retval;
if (usb_disabled()) {
pr_info("%s: USB support disabled\n", usbcore_name);
return 0;
}
usb_init_pool_max();
retval = usb_debugfs_init();
if (retval)
goto out;
usb_acpi_register();
retval = bus_register(&usb_bus_type);
if (retval)
goto bus_register_failed;
retval = bus_register_notifier(&usb_bus_type, &usb_bus_nb);
if (retval)
goto bus_notifier_failed;
retval = usb_major_init();
if (retval)
goto major_init_failed;
retval = usb_register(&usbfs_driver);
if (retval)
goto driver_register_failed;
retval = usb_devio_init();
if (retval)
goto usb_devio_init_failed;
retval = usb_hub_init();
if (retval)
goto hub_init_failed;
retval = usb_register_device_driver(&usb_generic_driver, THIS_MODULE);
if (!retval)
goto out;
usb_hub_cleanup();
hub_init_failed:
usb_devio_cleanup();
usb_devio_init_failed:
usb_deregister(&usbfs_driver);
driver_register_failed:
usb_major_cleanup();
major_init_failed:
bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
bus_notifier_failed:
bus_unregister(&usb_bus_type);
bus_register_failed:
usb_acpi_unregister();
usb_debugfs_cleanup();
out:
return retval;
}
/*
* Cleanup
*/
static void __exit usb_exit(void)
{
/* This will matter if shutdown/reboot does exitcalls. */
if (usb_disabled())
return;
usb_deregister_device_driver(&usb_generic_driver);
usb_major_cleanup();
usb_deregister(&usbfs_driver);
usb_devio_cleanup();
usb_hub_cleanup();
bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
bus_unregister(&usb_bus_type);
usb_acpi_unregister();
usb_debugfs_cleanup();
idr_destroy(&usb_bus_idr);
}
subsys_initcall(usb_init);
module_exit(usb_exit);
MODULE_LICENSE("GPL");
| null | null | null | null | 103,018 |
30,530 | null | train_val | 796a0e014bc3985709c0a35538d606ef1da31e1b | 30,530 | Chrome | 0 | https://github.com/chromium/chromium | 2018-04-07 23:43:03+00:00 | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_WEBKIT_PUBLIC_PLATFORM_SCHEDULER_WEB_MAIN_THREAD_SCHEDULER_H_
#define THIRD_PARTY_WEBKIT_PUBLIC_PLATFORM_SCHEDULER_WEB_MAIN_THREAD_SCHEDULER_H_
#include <memory>
#include "base/macros.h"
#include "base/message_loop/message_loop.h"
#include "base/optional.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread.h"
#include "base/time/time.h"
#include "build/build_config.h"
#include "third_party/blink/public/platform/scheduler/single_thread_idle_task_runner.h"
#include "third_party/blink/public/platform/scheduler/web_render_widget_scheduling_state.h"
#include "third_party/blink/public/platform/scheduler/web_thread_scheduler.h"
#include "third_party/blink/public/platform/web_common.h"
#include "third_party/blink/public/platform/web_input_event_result.h"
#include "third_party/blink/public/platform/web_scoped_virtual_time_pauser.h"
#include "v8/include/v8.h"
namespace base {
namespace trace_event {
class BlameContext;
}
} // namespace base
namespace blink {
class WebThread;
class WebInputEvent;
} // namespace blink
namespace viz {
struct BeginFrameArgs;
}
namespace blink {
namespace scheduler {
enum class RendererProcessType;
class WebRenderWidgetSchedulingState;
class BLINK_PLATFORM_EXPORT WebMainThreadScheduler : public WebThreadScheduler {
public:
class BLINK_PLATFORM_EXPORT RAILModeObserver {
public:
virtual ~RAILModeObserver();
virtual void OnRAILModeChanged(v8::RAILMode rail_mode) = 0;
};
~WebMainThreadScheduler() override;
// If |initial_virtual_time| is specified then the scheduler will be created
// with virtual time enabled and paused, and base::Time will be overridden to
// start at |initial_virtual_time|.
static std::unique_ptr<WebMainThreadScheduler> Create(
base::Optional<base::Time> initial_virtual_time = base::nullopt);
// Returns the compositor task runner.
virtual scoped_refptr<base::SingleThreadTaskRunner>
CompositorTaskRunner() = 0;
// Returns the input task runner.
virtual scoped_refptr<base::SingleThreadTaskRunner> InputTaskRunner() = 0;
// Creates a WebThread implementation for the renderer main thread.
virtual std::unique_ptr<WebThread> CreateMainThread() = 0;
// Returns a new WebRenderWidgetSchedulingState. The signals from this will
// be used to make scheduling decisions.
virtual std::unique_ptr<WebRenderWidgetSchedulingState>
NewRenderWidgetSchedulingState() = 0;
// Called to notify about the start of an extended period where no frames
// need to be drawn. Must be called from the main thread.
virtual void BeginFrameNotExpectedSoon() = 0;
// Called to notify about the start of a period where main frames are not
// scheduled and so short idle work can be scheduled. This will precede
// BeginFrameNotExpectedSoon and is also called when the compositor may be
// busy but the main thread is not.
virtual void BeginMainFrameNotExpectedUntil(base::TimeTicks time) = 0;
// Called to notify about the start of a new frame. Must be called from the
// main thread.
virtual void WillBeginFrame(const viz::BeginFrameArgs& args) = 0;
// Called to notify that a previously begun frame was committed. Must be
// called from the main thread.
virtual void DidCommitFrameToCompositor() = 0;
// Keep InputEventStateToString() in sync with this enum.
enum class InputEventState {
EVENT_CONSUMED_BY_COMPOSITOR,
EVENT_FORWARDED_TO_MAIN_THREAD,
};
static const char* InputEventStateToString(InputEventState input_event_state);
// Tells the scheduler that the system processed an input event. Called by the
// compositor (impl) thread. Note it's expected that every call to
// DidHandleInputEventOnCompositorThread where |event_state| is
// EVENT_FORWARDED_TO_MAIN_THREAD will be followed by a corresponding call
// to DidHandleInputEventOnMainThread.
virtual void DidHandleInputEventOnCompositorThread(
const WebInputEvent& web_input_event,
InputEventState event_state) = 0;
// Tells the scheduler that the system processed an input event. Must be
// called from the main thread.
virtual void DidHandleInputEventOnMainThread(
const WebInputEvent& web_input_event,
WebInputEventResult result) = 0;
// Returns the most recently reported expected queueing time, computed over
// the past 1 second window.
virtual base::TimeDelta MostRecentExpectedQueueingTime() = 0;
// Tells the scheduler that the system is displaying an input animation (e.g.
// a fling). Called by the compositor (impl) thread.
virtual void DidAnimateForInputOnCompositorThread() = 0;
// Tells the scheduler about the change of renderer visibility status (e.g.
// "all widgets are hidden" condition). Used mostly for metric purposes.
// Must be called on the main thread.
virtual void SetRendererHidden(bool hidden) = 0;
// Tells the scheduler about the change of renderer background status, i.e.,
// there are no critical, user facing activities (visual, audio, etc...)
// driven by this process. A stricter condition than |SetRendererHidden()|,
// the process is assumed to be foregrounded when the scheduler is
// constructed. Must be called on the main thread.
virtual void SetRendererBackgrounded(bool backgrounded) = 0;
// Tells the scheduler about "keep-alive" state which can be due to:
// service workers, shared workers, or fetch keep-alive.
// If set to true, then the scheduler should not freeze the renderer.
virtual void SetSchedulerKeepActive(bool keep_active) = 0;
#if defined(OS_ANDROID)
// Android WebView has very strange WebView.pauseTimers/resumeTimers API.
// It's very old and very inconsistent. The API promises that this
// "pauses all layout, parsing, and JavaScript timers for all WebViews".
// Also CTS tests expect that loading tasks continue to run.
// We should change it to something consistent (e.g. stop all javascript)
// but changing WebView and CTS is a slow and painful process, so for
// the time being we're doing our best.
// DO NOT USE FOR ANYTHING EXCEPT ANDROID WEBVIEW API IMPLEMENTATION.
virtual void PauseTimersForAndroidWebView() = 0;
virtual void ResumeTimersForAndroidWebView() = 0;
#endif // defined(OS_ANDROID)
// RAII handle for pausing the renderer. Renderer is paused while
// at least one pause handle exists.
class BLINK_PLATFORM_EXPORT RendererPauseHandle {
public:
RendererPauseHandle() = default;
virtual ~RendererPauseHandle() = default;
private:
DISALLOW_COPY_AND_ASSIGN(RendererPauseHandle);
};
// Tells the scheduler that the renderer process should be paused.
// Pausing means that all javascript callbacks should not fire.
// https://html.spec.whatwg.org/#pause
//
// Renderer will be resumed when the handle is destroyed.
// Handle should be destroyed before the renderer.
virtual std::unique_ptr<RendererPauseHandle> PauseRenderer()
WARN_UNUSED_RESULT = 0;
enum class NavigatingFrameType { kMainFrame, kChildFrame };
// Tells the scheduler that a navigation task is pending. While any main-frame
// navigation tasks are pending, the scheduler will ensure that loading tasks
// are not blocked even if they are expensive. Must be called on the main
// thread.
virtual void AddPendingNavigation(NavigatingFrameType type) = 0;
// Tells the scheduler that a navigation task is no longer pending.
// Must be called on the main thread.
virtual void RemovePendingNavigation(NavigatingFrameType type) = 0;
// Returns true if the scheduler has reason to believe that high priority work
// may soon arrive on the main thread, e.g., if gesture events were observed
// recently.
// Must be called from the main thread.
virtual bool IsHighPriorityWorkAnticipated() = 0;
// Sets whether to allow suspension of tasks after the backgrounded signal is
// received via SetRendererBackgrounded(true). Defaults to disabled.
virtual void SetStoppingWhenBackgroundedEnabled(bool enabled) = 0;
// Sets the default blame context to which top level work should be
// attributed in this renderer. |blame_context| must outlive this scheduler.
virtual void SetTopLevelBlameContext(
base::trace_event::BlameContext* blame_context) = 0;
// The renderer scheduler maintains an estimated RAIL mode[1]. This observer
// can be used to get notified when the mode changes. The observer will be
// called on the main thread and must outlive this class.
// [1]
// https://developers.google.com/web/tools/chrome-devtools/profile/evaluate-performance/rail
virtual void SetRAILModeObserver(RAILModeObserver* observer) = 0;
// Returns whether or not the main thread appears unresponsive, based on the
// length and frequency of recent main thread tasks. To be called from the
// compositor thread.
virtual bool MainThreadSeemsUnresponsive(
base::TimeDelta main_thread_responsiveness_threshold) = 0;
// Sets the kind of renderer process. Should be called on the main thread
// once.
virtual void SetRendererProcessType(RendererProcessType type) = 0;
// Returns a WebScopedVirtualTimePauser which can be used to vote for pausing
// virtual time. Virtual time will be paused if any WebScopedVirtualTimePauser
// votes to pause it, and only unpaused only if all
// WebScopedVirtualTimePausers are either destroyed or vote to unpause. Note
// the WebScopedVirtualTimePauser returned by this method is initially
// unpaused.
virtual WebScopedVirtualTimePauser CreateWebScopedVirtualTimePauser(
const char* name,
WebScopedVirtualTimePauser::VirtualTaskDuration duration =
WebScopedVirtualTimePauser::VirtualTaskDuration::kNonInstant) = 0;
protected:
WebMainThreadScheduler();
DISALLOW_COPY_AND_ASSIGN(WebMainThreadScheduler);
};
} // namespace scheduler
} // namespace blink
#endif // THIRD_PARTY_WEBKIT_PUBLIC_PLATFORM_SCHEDULER_WEB_MAIN_THREAD_SCHEDULER_H_
| null | null | null | null | 27,393 |
23,070 | null | train_val | 796a0e014bc3985709c0a35538d606ef1da31e1b | 23,070 | Chrome | 0 | https://github.com/chromium/chromium | 2018-04-07 23:43:03+00:00 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/browser/service_worker/service_worker_navigation_loader.h"
#include "base/run_loop.h"
#include "base/test/scoped_feature_list.h"
#include "content/browser/loader/navigation_loader_interceptor.h"
#include "content/browser/service_worker/embedded_worker_test_helper.h"
#include "content/browser/service_worker/service_worker_context_core.h"
#include "content/browser/service_worker/service_worker_registration.h"
#include "content/browser/service_worker/service_worker_test_utils.h"
#include "content/browser/service_worker/service_worker_version.h"
#include "content/common/service_worker/service_worker_event_dispatcher.mojom.h"
#include "content/common/service_worker/service_worker_utils.h"
#include "content/common/single_request_url_loader_factory.h"
#include "content/public/test/test_browser_thread_bundle.h"
#include "mojo/public/cpp/bindings/strong_binding.h"
#include "mojo/public/cpp/system/data_pipe_utils.h"
#include "net/http/http_util.h"
#include "net/ssl/ssl_info.h"
#include "net/test/cert_test_util.h"
#include "net/test/test_data_directory.h"
#include "services/network/public/cpp/features.h"
#include "services/network/public/cpp/resource_response.h"
#include "services/network/public/mojom/fetch_api.mojom.h"
#include "services/network/test/test_url_loader_client.h"
#include "storage/browser/blob/blob_data_builder.h"
#include "storage/browser/blob/blob_data_handle.h"
#include "storage/browser/blob/blob_impl.h"
#include "storage/browser/blob/blob_storage_context.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/blink/public/mojom/service_worker/service_worker_event_status.mojom.h"
#include "third_party/blink/public/mojom/service_worker/service_worker_registration.mojom.h"
namespace content {
namespace service_worker_navigation_loader_unittest {
void ReceiveRequestHandler(
SingleRequestURLLoaderFactory::RequestHandler* out_handler,
SingleRequestURLLoaderFactory::RequestHandler handler) {
*out_handler = std::move(handler);
}
// NavigationPreloadLoaderClient mocks the renderer-side URLLoaderClient for the
// navigation preload network request performed by the browser. In production
// code, this is ServiceWorkerContextClient::NavigationPreloadRequest,
// which it forwards the response to FetchEvent#preloadResponse. Here, it
// simulates passing the response to FetchEvent#respondWith.
//
// The navigation preload test is quite involved. The flow of data is:
// 1. ServiceWorkerNavigationLoader asks ServiceWorkerFetchDispatcher to start
// navigation preload.
// 2. ServiceWorkerFetchDispatcher starts the network request which is mocked
// by MockNetworkURLLoaderFactory. The response is sent to
// ServiceWorkerFetchDispatcher::DelegatingURLLoaderClient.
// 3. DelegatingURLLoaderClient sends the response to the |preload_handle|
// that was passed to Helper::OnFetchEvent().
// 4. Helper::OnFetchEvent() creates NavigationPreloadLoaderClient, which
// receives the response.
// 5. NavigationPreloadLoaderClient calls OnFetchEvent()'s callbacks
// with the response.
// 6. Like all FetchEvent responses, the response is sent to
// ServiceWorkerNavigationLoader::DidDispatchFetchEvent, and the
// RequestHandler is returned.
class NavigationPreloadLoaderClient final
: public network::mojom::URLLoaderClient {
public:
NavigationPreloadLoaderClient(
mojom::FetchEventPreloadHandlePtr preload_handle,
mojom::ServiceWorkerFetchResponseCallbackPtr response_callback,
mojom::ServiceWorkerEventDispatcher::DispatchFetchEventCallback
finish_callback)
: url_loader_(std::move(preload_handle->url_loader)),
binding_(this, std::move(preload_handle->url_loader_client_request)),
response_callback_(std::move(response_callback)),
finish_callback_(std::move(finish_callback)) {
binding_.set_connection_error_handler(
base::BindOnce(&NavigationPreloadLoaderClient::OnConnectionError,
base::Unretained(this)));
}
~NavigationPreloadLoaderClient() override = default;
// network::mojom::URLLoaderClient implementation
void OnReceiveResponse(
const network::ResourceResponseHead& response_head,
network::mojom::DownloadedTempFilePtr downloaded_file) override {
response_head_ = response_head;
}
void OnStartLoadingResponseBody(
mojo::ScopedDataPipeConsumerHandle body) override {
body_ = std::move(body);
// We could call OnResponseStream() here, but for simplicity, don't do
// anything until OnComplete().
}
void OnComplete(const network::URLLoaderCompletionStatus& status) override {
blink::mojom::ServiceWorkerStreamCallbackPtr stream_callback;
auto stream_handle = blink::mojom::ServiceWorkerStreamHandle::New();
stream_handle->callback_request = mojo::MakeRequest(&stream_callback);
stream_handle->stream = std::move(body_);
// Simulate passing the navigation preload response to
// FetchEvent#respondWith.
response_callback_->OnResponseStream(
ServiceWorkerResponse(
std::make_unique<std::vector<GURL>>(
response_head_.url_list_via_service_worker),
response_head_.headers->response_code(),
response_head_.headers->GetStatusText(),
response_head_.response_type_via_service_worker,
std::make_unique<ServiceWorkerHeaderMap>(), "" /* blob_uuid */,
0 /* blob_size */, nullptr /* blob */,
blink::mojom::ServiceWorkerResponseError::kUnknown, base::Time(),
false /* response_is_in_cache_storage */,
std::string() /* response_cache_storage_cache_name */,
std::make_unique<
ServiceWorkerHeaderList>() /* cors_exposed_header_names */),
std::move(stream_handle), base::Time::Now());
std::move(finish_callback_)
.Run(blink::mojom::ServiceWorkerEventStatus::COMPLETED,
base::Time::Now());
stream_callback->OnCompleted();
delete this;
}
void OnReceiveRedirect(
const net::RedirectInfo& redirect_info,
const network::ResourceResponseHead& response_head) override {}
void OnDataDownloaded(int64_t data_length,
int64_t encoded_data_length) override {}
void OnUploadProgress(int64_t current_position,
int64_t total_size,
OnUploadProgressCallback ack_callback) override {}
void OnReceiveCachedMetadata(const std::vector<uint8_t>& data) override {}
void OnTransferSizeUpdated(int32_t transfer_size_diff) override {}
void OnConnectionError() { delete this; }
private:
network::mojom::URLLoaderPtr url_loader_;
mojo::Binding<network::mojom::URLLoaderClient> binding_;
network::ResourceResponseHead response_head_;
mojo::ScopedDataPipeConsumerHandle body_;
// Callbacks that complete Helper::OnFetchEvent().
mojom::ServiceWorkerFetchResponseCallbackPtr response_callback_;
mojom::ServiceWorkerEventDispatcher::DispatchFetchEventCallback
finish_callback_;
DISALLOW_COPY_AND_ASSIGN(NavigationPreloadLoaderClient);
};
// A URLLoaderFactory that returns 200 OK with a simple body to any request.
//
// ServiceWorkerNavigationLoaderTest sets the network factory for
// ServiceWorkerContextCore to MockNetworkURLLoaderFactory. So far, it's only
// used for navigation preload in these tests.
class MockNetworkURLLoaderFactory final
: public network::mojom::URLLoaderFactory {
public:
MockNetworkURLLoaderFactory() = default;
// network::mojom::URLLoaderFactory implementation.
void CreateLoaderAndStart(network::mojom::URLLoaderRequest request,
int32_t routing_id,
int32_t request_id,
uint32_t options,
const network::ResourceRequest& url_request,
network::mojom::URLLoaderClientPtr client,
const net::MutableNetworkTrafficAnnotationTag&
traffic_annotation) override {
std::string headers = "HTTP/1.1 200 OK\n\n";
net::HttpResponseInfo info;
info.headers = new net::HttpResponseHeaders(
net::HttpUtil::AssembleRawHeaders(headers.c_str(), headers.length()));
network::ResourceResponseHead response;
response.headers = info.headers;
response.headers->GetMimeType(&response.mime_type);
client->OnReceiveResponse(response, nullptr);
std::string body = "this body came from the network";
uint32_t bytes_written = body.size();
mojo::DataPipe data_pipe;
data_pipe.producer_handle->WriteData(body.data(), &bytes_written,
MOJO_WRITE_DATA_FLAG_ALL_OR_NONE);
client->OnStartLoadingResponseBody(std::move(data_pipe.consumer_handle));
network::URLLoaderCompletionStatus status;
status.error_code = net::OK;
client->OnComplete(status);
}
void Clone(network::mojom::URLLoaderFactoryRequest factory) override {
NOTREACHED();
}
private:
DISALLOW_COPY_AND_ASSIGN(MockNetworkURLLoaderFactory);
};
// Helper simulates a service worker handling fetch events. The response can be
// customized via RespondWith* functions.
class Helper : public EmbeddedWorkerTestHelper {
public:
Helper()
: EmbeddedWorkerTestHelper(
base::FilePath(),
base::MakeRefCounted<URLLoaderFactoryGetter>()) {
url_loader_factory_getter()->SetNetworkFactoryForTesting(
&mock_url_loader_factory_);
}
~Helper() override = default;
// Tells this helper to respond to fetch events with the specified blob.
void RespondWithBlob(blink::mojom::BlobPtr blob) {
response_mode_ = ResponseMode::kBlob;
blob_body_ = std::move(blob);
}
// Tells this helper to respond to fetch events with the specified stream.
void RespondWithStream(
blink::mojom::ServiceWorkerStreamCallbackRequest callback_request,
mojo::ScopedDataPipeConsumerHandle consumer_handle) {
response_mode_ = ResponseMode::kStream;
stream_handle_ = blink::mojom::ServiceWorkerStreamHandle::New();
stream_handle_->callback_request = std::move(callback_request);
stream_handle_->stream = std::move(consumer_handle);
}
// Tells this helper to respond to fetch events with network fallback.
// i.e., simulate the service worker not calling respondWith().
void RespondWithFallback() {
response_mode_ = ResponseMode::kFallbackResponse;
}
// Tells this helper to respond to fetch events with an error response.
void RespondWithError() { response_mode_ = ResponseMode::kErrorResponse; }
// Tells this helper to respond to fetch events with
// FetchEvent#preloadResponse. See NavigationPreloadLoaderClient's
// documentation for details.
void RespondWithNavigationPreloadResponse() {
response_mode_ = ResponseMode::kNavigationPreloadResponse;
}
// Tells this helper to respond to fetch events with the redirect response.
void RespondWithRedirectResponse(const GURL& new_url) {
response_mode_ = ResponseMode::kRedirect;
redirected_url_ = new_url;
}
// Tells this helper to simulate failure to dispatch the fetch event to the
// service worker.
void FailToDispatchFetchEvent() {
response_mode_ = ResponseMode::kFailFetchEventDispatch;
}
// Tells this helper to simulate "early response", where the respondWith()
// promise resolves before the waitUntil() promise. In this mode, the
// helper sets the response mode to "early response", which simulates the
// promise passed to respondWith() resolving before the waitUntil() promise
// resolves. In this mode, the helper will respond to fetch events
// immediately, but will not finish the fetch event until FinishWaitUntil() is
// called.
void RespondEarly() { response_mode_ = ResponseMode::kEarlyResponse; }
void FinishWaitUntil() {
std::move(finish_callback_)
.Run(blink::mojom::ServiceWorkerEventStatus::COMPLETED,
base::Time::Now());
base::RunLoop().RunUntilIdle();
}
void ReadRequestBody(std::string* out_string) {
ASSERT_TRUE(request_body_);
const std::vector<network::DataElement>* elements =
request_body_->elements();
// So far this test expects a single bytes element.
ASSERT_EQ(1u, elements->size());
const network::DataElement& element = elements->front();
ASSERT_EQ(network::DataElement::TYPE_BYTES, element.type());
*out_string = std::string(element.bytes(), element.length());
}
protected:
void OnFetchEvent(
int embedded_worker_id,
const network::ResourceRequest& request,
mojom::FetchEventPreloadHandlePtr preload_handle,
mojom::ServiceWorkerFetchResponseCallbackPtr response_callback,
mojom::ServiceWorkerEventDispatcher::DispatchFetchEventCallback
finish_callback) override {
// Basic checks on DispatchFetchEvent parameters.
EXPECT_TRUE(ServiceWorkerUtils::IsMainResourceType(
static_cast<ResourceType>(request.resource_type)));
request_body_ = request.request_body;
switch (response_mode_) {
case ResponseMode::kDefault:
EmbeddedWorkerTestHelper::OnFetchEvent(
embedded_worker_id, request, std::move(preload_handle),
std::move(response_callback), std::move(finish_callback));
return;
case ResponseMode::kBlob:
response_callback->OnResponseBlob(
ServiceWorkerResponse(
std::make_unique<std::vector<GURL>>(), 200, "OK",
network::mojom::FetchResponseType::kDefault,
std::make_unique<ServiceWorkerHeaderMap>(), "" /* blob_uuid */,
0 /* blob_size */, nullptr /* blob */,
blink::mojom::ServiceWorkerResponseError::kUnknown,
base::Time(), false /* response_is_in_cache_storage */,
std::string() /* response_cache_storage_cache_name */,
std::make_unique<
ServiceWorkerHeaderList>() /* cors_exposed_header_names */),
std::move(blob_body_), base::Time::Now());
std::move(finish_callback)
.Run(blink::mojom::ServiceWorkerEventStatus::COMPLETED,
base::Time::Now());
return;
case ResponseMode::kStream:
response_callback->OnResponseStream(
ServiceWorkerResponse(
std::make_unique<std::vector<GURL>>(), 200, "OK",
network::mojom::FetchResponseType::kDefault,
std::make_unique<ServiceWorkerHeaderMap>(), "" /* blob_uuid */,
0 /* blob_size */, nullptr /* blob */,
blink::mojom::ServiceWorkerResponseError::kUnknown,
base::Time(), false /* response_is_in_cache_storage */,
std::string() /* response_cache_storage_cache_name */,
std::make_unique<
ServiceWorkerHeaderList>() /* cors_exposed_header_names */),
std::move(stream_handle_), base::Time::Now());
std::move(finish_callback)
.Run(blink::mojom::ServiceWorkerEventStatus::COMPLETED,
base::Time::Now());
return;
case ResponseMode::kFallbackResponse:
response_callback->OnFallback(base::Time::Now());
std::move(finish_callback)
.Run(blink::mojom::ServiceWorkerEventStatus::COMPLETED,
base::Time::Now());
return;
case ResponseMode::kErrorResponse:
response_callback->OnResponse(
ServiceWorkerResponse(
std::make_unique<std::vector<GURL>>(), 0 /* status_code */,
"" /* status_text */,
network::mojom::FetchResponseType::kDefault,
std::make_unique<ServiceWorkerHeaderMap>(), "" /* blob_uuid */,
0 /* blob_size */, nullptr /* blob */,
blink::mojom::ServiceWorkerResponseError::kPromiseRejected,
base::Time(), false /* response_is_in_cache_storage */,
std::string() /* response_cache_storage_cache_name */,
std::make_unique<
ServiceWorkerHeaderList>() /* cors_exposed_header_names */),
base::Time::Now());
std::move(finish_callback)
.Run(blink::mojom::ServiceWorkerEventStatus::REJECTED,
base::Time::Now());
return;
case ResponseMode::kNavigationPreloadResponse:
// Deletes itself when done.
new NavigationPreloadLoaderClient(std::move(preload_handle),
std::move(response_callback),
std::move(finish_callback));
return;
case ResponseMode::kFailFetchEventDispatch:
// Simulate failure by stopping the worker before the event finishes.
// This causes ServiceWorkerVersion::StartRequest() to call its error
// callback, which triggers ServiceWorkerNavigationLoader's dispatch
// failed behavior.
SimulateWorkerStopped(embedded_worker_id);
// Finish the event by calling |finish_callback|.
// This is the Mojo callback for
// mojom::ServiceWorkerEventDispatcher::DispatchFetchEvent().
// If this is not called, Mojo will complain. In production code,
// ServiceWorkerContextClient would call this when it aborts all
// callbacks after an unexpected stop.
std::move(finish_callback)
.Run(blink::mojom::ServiceWorkerEventStatus::ABORTED,
base::Time::Now());
return;
case ResponseMode::kEarlyResponse:
finish_callback_ = std::move(finish_callback);
response_callback->OnResponse(
ServiceWorkerResponse(
std::make_unique<std::vector<GURL>>(), 200, "OK",
network::mojom::FetchResponseType::kDefault,
std::make_unique<ServiceWorkerHeaderMap>(), "" /* blob_uuid */,
0 /* blob_size */, nullptr /* blob */,
blink::mojom::ServiceWorkerResponseError::kUnknown,
base::Time(), false /* response_is_in_cache_storage */,
std::string() /* response_cache_storage_cache_name */,
std::make_unique<
ServiceWorkerHeaderList>() /* cors_exposed_header_names */),
base::Time::Now());
// Now the caller must call FinishWaitUntil() to finish the event.
return;
case ResponseMode::kRedirect:
auto headers = std::make_unique<ServiceWorkerHeaderMap>();
(*headers)["location"] = redirected_url_.spec();
response_callback->OnResponse(
ServiceWorkerResponse(
std::make_unique<std::vector<GURL>>(), 301, "Moved Permanently",
network::mojom::FetchResponseType::kDefault, std::move(headers),
"" /* blob_uuid */, 0 /* blob_size */, nullptr /* blob */,
blink::mojom::ServiceWorkerResponseError::kUnknown,
base::Time(), false /* response_is_in_cache_storage */,
std::string() /* response_cache_storage_cache_name */,
std::make_unique<
ServiceWorkerHeaderList>() /* cors_exposed_header_names */),
base::Time::Now());
std::move(finish_callback)
.Run(blink::mojom::ServiceWorkerEventStatus::COMPLETED,
base::Time::Now());
return;
}
NOTREACHED();
}
private:
enum class ResponseMode {
kDefault,
kBlob,
kStream,
kFallbackResponse,
kErrorResponse,
kNavigationPreloadResponse,
kFailFetchEventDispatch,
kEarlyResponse,
kRedirect
};
ResponseMode response_mode_ = ResponseMode::kDefault;
scoped_refptr<network::ResourceRequestBody> request_body_;
// For ResponseMode::kBlob.
blink::mojom::BlobPtr blob_body_;
// For ResponseMode::kStream.
blink::mojom::ServiceWorkerStreamHandlePtr stream_handle_;
// For ResponseMode::kEarlyResponse.
mojom::ServiceWorkerEventDispatcher::DispatchFetchEventCallback
finish_callback_;
// For ResponseMode::kRedirect.
GURL redirected_url_;
MockNetworkURLLoaderFactory mock_url_loader_factory_;
DISALLOW_COPY_AND_ASSIGN(Helper);
};
// Returns typical response info for a resource load that went through a service
// worker.
std::unique_ptr<network::ResourceResponseHead>
CreateResponseInfoFromServiceWorker() {
auto head = std::make_unique<network::ResourceResponseHead>();
head->was_fetched_via_service_worker = true;
head->was_fallback_required_by_service_worker = false;
head->url_list_via_service_worker = std::vector<GURL>();
head->response_type_via_service_worker =
network::mojom::FetchResponseType::kDefault;
head->is_in_cache_storage = false;
head->cache_storage_cache_name = std::string();
head->did_service_worker_navigation_preload = false;
return head;
}
// ServiceWorkerNavigationLoaderTest is for testing the handling of requests
// by a service worker via ServiceWorkerNavigationLoader.
//
// Of course, no actual service worker runs in the unit test, it is simulated
// via EmbeddedWorkerTestHelper receiving IPC messages from the browser and
// responding as if a service worker is running in the renderer.
//
// ServiceWorkerNavigationLoaderTest is also a
// ServiceWorkerNavigationLoader::Delegate. In production code,
// ServiceWorkerControlleeRequestHandler is the Delegate. So this class also
// basically mocks that part of ServiceWorkerControlleeRequestHandler.
class ServiceWorkerNavigationLoaderTest
: public testing::Test,
public ServiceWorkerNavigationLoader::Delegate {
public:
ServiceWorkerNavigationLoaderTest()
: thread_bundle_(TestBrowserThreadBundle::IO_MAINLOOP),
helper_(std::make_unique<Helper>()) {}
~ServiceWorkerNavigationLoaderTest() override = default;
void SetUp() override {
feature_list_.InitAndEnableFeature(network::features::kNetworkService);
// Create an active service worker.
storage()->LazyInitializeForTest(base::DoNothing());
base::RunLoop().RunUntilIdle();
blink::mojom::ServiceWorkerRegistrationOptions options;
options.scope = GURL("https://example.com/");
registration_ =
new ServiceWorkerRegistration(options, storage()->NewRegistrationId(),
helper_->context()->AsWeakPtr());
version_ = new ServiceWorkerVersion(
registration_.get(), GURL("https://example.com/service_worker.js"),
storage()->NewVersionId(), helper_->context()->AsWeakPtr());
std::vector<ServiceWorkerDatabase::ResourceRecord> records;
records.push_back(WriteToDiskCacheSync(
storage(), version_->script_url(), storage()->NewResourceId(),
{} /* headers */, "I'm the body", "I'm the meta data"));
version_->script_cache_map()->SetResources(records);
version_->set_fetch_handler_existence(
ServiceWorkerVersion::FetchHandlerExistence::EXISTS);
version_->SetStatus(ServiceWorkerVersion::ACTIVATED);
registration_->SetActiveVersion(version_);
// Make the registration findable via storage functions.
registration_->set_last_update_check(base::Time::Now());
ServiceWorkerStatusCode status = SERVICE_WORKER_ERROR_FAILED;
storage()->StoreRegistration(registration_.get(), version_.get(),
CreateReceiverOnCurrentThread(&status));
base::RunLoop().RunUntilIdle();
ASSERT_EQ(SERVICE_WORKER_OK, status);
}
ServiceWorkerStorage* storage() { return helper_->context()->storage(); }
// Indicates whether ServiceWorkerNavigationLoader decided to handle a
// request, i.e., it returned a non-null RequestHandler for the request.
enum class LoaderResult {
kHandledRequest,
kDidNotHandleRequest,
};
// Returns whether ServiceWorkerNavigationLoader handled the request. If
// kHandledRequest was returned, the request is ongoing and the caller can use
// functions like client_.RunUntilComplete() to wait for completion.
LoaderResult StartRequest(std::unique_ptr<network::ResourceRequest> request) {
// Start a ServiceWorkerNavigationLoader. It should return a
// RequestHandler.
SingleRequestURLLoaderFactory::RequestHandler handler;
loader_ = std::make_unique<ServiceWorkerNavigationLoader>(
base::BindOnce(&ReceiveRequestHandler, &handler), this, *request,
base::WrapRefCounted<URLLoaderFactoryGetter>(
helper_->context()->loader_factory_getter()));
loader_->ForwardToServiceWorker();
base::RunLoop().RunUntilIdle();
if (!handler)
return LoaderResult::kDidNotHandleRequest;
// Run the handler. It will load |request.url|.
std::move(handler).Run(mojo::MakeRequest(&loader_ptr_),
client_.CreateInterfacePtr());
return LoaderResult::kHandledRequest;
}
void ExpectResponseInfo(const network::ResourceResponseHead& info,
const network::ResourceResponseHead& expected_info) {
EXPECT_EQ(expected_info.was_fetched_via_service_worker,
info.was_fetched_via_service_worker);
EXPECT_EQ(expected_info.was_fallback_required_by_service_worker,
info.was_fallback_required_by_service_worker);
EXPECT_EQ(expected_info.url_list_via_service_worker,
info.url_list_via_service_worker);
EXPECT_EQ(expected_info.response_type_via_service_worker,
info.response_type_via_service_worker);
EXPECT_FALSE(info.service_worker_start_time.is_null());
EXPECT_FALSE(info.service_worker_ready_time.is_null());
EXPECT_LT(info.service_worker_start_time, info.service_worker_ready_time);
EXPECT_EQ(expected_info.is_in_cache_storage, info.is_in_cache_storage);
EXPECT_EQ(expected_info.cache_storage_cache_name,
info.cache_storage_cache_name);
EXPECT_EQ(expected_info.did_service_worker_navigation_preload,
info.did_service_worker_navigation_preload);
}
std::unique_ptr<network::ResourceRequest> CreateRequest() {
std::unique_ptr<network::ResourceRequest> request =
std::make_unique<network::ResourceRequest>();
request->url = GURL("https://www.example.com/");
request->method = "GET";
request->fetch_request_mode = network::mojom::FetchRequestMode::kNavigate;
request->fetch_credentials_mode =
network::mojom::FetchCredentialsMode::kInclude;
request->fetch_redirect_mode = network::mojom::FetchRedirectMode::kManual;
return request;
}
protected:
// ServiceWorkerNavigationLoader::Delegate -----------------------------------
void OnPrepareToRestart() override {}
ServiceWorkerVersion* GetServiceWorkerVersion(
ServiceWorkerMetrics::URLRequestJobResult* result) override {
return version_.get();
}
bool RequestStillValid(
ServiceWorkerMetrics::URLRequestJobResult* result) override {
return true;
}
void MainResourceLoadFailed() override {
was_main_resource_load_failed_called_ = true;
}
// --------------------------------------------------------------------------
TestBrowserThreadBundle thread_bundle_;
std::unique_ptr<Helper> helper_;
scoped_refptr<ServiceWorkerRegistration> registration_;
scoped_refptr<ServiceWorkerVersion> version_;
storage::BlobStorageContext blob_context_;
network::TestURLLoaderClient client_;
bool was_main_resource_load_failed_called_ = false;
std::unique_ptr<ServiceWorkerNavigationLoader> loader_;
network::mojom::URLLoaderPtr loader_ptr_;
base::test::ScopedFeatureList feature_list_;
};
TEST_F(ServiceWorkerNavigationLoaderTest, Basic) {
// Perform the request
LoaderResult result = StartRequest(CreateRequest());
EXPECT_EQ(LoaderResult::kHandledRequest, result);
client_.RunUntilComplete();
EXPECT_EQ(net::OK, client_.completion_status().error_code);
const network::ResourceResponseHead& info = client_.response_head();
EXPECT_EQ(200, info.headers->response_code());
ExpectResponseInfo(info, *CreateResponseInfoFromServiceWorker());
}
TEST_F(ServiceWorkerNavigationLoaderTest, NoActiveWorker) {
// Clear |version_| to make GetServiceWorkerVersion() return null.
version_ = nullptr;
// Perform the request.
LoaderResult result = StartRequest(CreateRequest());
EXPECT_EQ(LoaderResult::kHandledRequest, result);
client_.RunUntilComplete();
EXPECT_EQ(net::ERR_FAILED, client_.completion_status().error_code);
}
// Test that the request body is passed to the fetch event.
TEST_F(ServiceWorkerNavigationLoaderTest, RequestBody) {
const std::string kData = "hi this is the request body";
// Create a request with a body.
auto request_body = base::MakeRefCounted<network::ResourceRequestBody>();
request_body->AppendBytes(kData.c_str(), kData.length());
std::unique_ptr<network::ResourceRequest> request = CreateRequest();
request->method = "POST";
request->request_body = request_body;
// This test doesn't use the response to the fetch event, so just have the
// service worker do simple network fallback.
helper_->RespondWithFallback();
LoaderResult result = StartRequest(std::move(request));
EXPECT_EQ(LoaderResult::kDidNotHandleRequest, result);
// Verify that the request body was passed to the fetch event.
std::string body;
helper_->ReadRequestBody(&body);
EXPECT_EQ(kData, body);
}
TEST_F(ServiceWorkerNavigationLoaderTest, BlobResponse) {
// Construct the blob to respond with.
const std::string kResponseBody = "Here is sample text for the blob.";
auto blob_data = std::make_unique<storage::BlobDataBuilder>("blob-id:myblob");
blob_data->AppendData(kResponseBody);
std::unique_ptr<storage::BlobDataHandle> blob_handle =
blob_context_.AddFinishedBlob(std::move(blob_data));
blink::mojom::BlobPtr blob_ptr;
blink::mojom::BlobRequest request = mojo::MakeRequest(&blob_ptr);
storage::BlobImpl::Create(std::move(blob_handle), std::move(request));
helper_->RespondWithBlob(std::move(blob_ptr));
// Perform the request.
LoaderResult result = StartRequest(CreateRequest());
EXPECT_EQ(LoaderResult::kHandledRequest, result);
client_.RunUntilComplete();
const network::ResourceResponseHead& info = client_.response_head();
EXPECT_EQ(200, info.headers->response_code());
ExpectResponseInfo(info, *CreateResponseInfoFromServiceWorker());
// Test the body.
std::string body;
EXPECT_TRUE(client_.response_body().is_valid());
EXPECT_TRUE(
mojo::BlockingCopyToString(client_.response_body_release(), &body));
EXPECT_EQ(kResponseBody, body);
EXPECT_EQ(net::OK, client_.completion_status().error_code);
}
// Tell the helper to respond with a non-existent Blob.
TEST_F(ServiceWorkerNavigationLoaderTest, BrokenBlobResponse) {
const std::string kBrokenUUID = "broken_uuid";
// Create the broken blob.
std::unique_ptr<storage::BlobDataHandle> blob_handle =
blob_context_.AddBrokenBlob(kBrokenUUID, "", "",
storage::BlobStatus::ERR_OUT_OF_MEMORY);
blink::mojom::BlobPtr blob_ptr;
blink::mojom::BlobRequest request = mojo::MakeRequest(&blob_ptr);
storage::BlobImpl::Create(std::move(blob_handle), std::move(request));
helper_->RespondWithBlob(std::move(blob_ptr));
// Perform the request.
LoaderResult result = StartRequest(CreateRequest());
EXPECT_EQ(LoaderResult::kHandledRequest, result);
// We should get a valid response once the headers arrive.
client_.RunUntilResponseReceived();
const network::ResourceResponseHead& info = client_.response_head();
EXPECT_EQ(200, info.headers->response_code());
ExpectResponseInfo(info, *CreateResponseInfoFromServiceWorker());
// However, since the blob is broken we should get an error while transferring
// the body.
client_.RunUntilComplete();
EXPECT_EQ(net::ERR_OUT_OF_MEMORY, client_.completion_status().error_code);
}
TEST_F(ServiceWorkerNavigationLoaderTest, StreamResponse) {
// Construct the Stream to respond with.
const char kResponseBody[] = "Here is sample text for the Stream.";
blink::mojom::ServiceWorkerStreamCallbackPtr stream_callback;
mojo::DataPipe data_pipe;
helper_->RespondWithStream(mojo::MakeRequest(&stream_callback),
std::move(data_pipe.consumer_handle));
// Perform the request.
LoaderResult result = StartRequest(CreateRequest());
EXPECT_EQ(LoaderResult::kHandledRequest, result);
client_.RunUntilResponseReceived();
const network::ResourceResponseHead& info = client_.response_head();
EXPECT_EQ(200, info.headers->response_code());
ExpectResponseInfo(info, *CreateResponseInfoFromServiceWorker());
EXPECT_TRUE(version_->HasWorkInBrowser());
// Write the body stream.
uint32_t written_bytes = sizeof(kResponseBody) - 1;
MojoResult mojo_result = data_pipe.producer_handle->WriteData(
kResponseBody, &written_bytes, MOJO_WRITE_DATA_FLAG_NONE);
ASSERT_EQ(MOJO_RESULT_OK, mojo_result);
EXPECT_EQ(sizeof(kResponseBody) - 1, written_bytes);
stream_callback->OnCompleted();
data_pipe.producer_handle.reset();
client_.RunUntilComplete();
EXPECT_EQ(net::OK, client_.completion_status().error_code);
// Test the body.
std::string response;
EXPECT_TRUE(client_.response_body().is_valid());
EXPECT_TRUE(
mojo::BlockingCopyToString(client_.response_body_release(), &response));
EXPECT_EQ(kResponseBody, response);
}
// Test when a stream response body is aborted.
TEST_F(ServiceWorkerNavigationLoaderTest, StreamResponse_Abort) {
// Construct the Stream to respond with.
const char kResponseBody[] = "Here is sample text for the Stream.";
blink::mojom::ServiceWorkerStreamCallbackPtr stream_callback;
mojo::DataPipe data_pipe;
helper_->RespondWithStream(mojo::MakeRequest(&stream_callback),
std::move(data_pipe.consumer_handle));
// Perform the request.
LoaderResult result = StartRequest(CreateRequest());
EXPECT_EQ(LoaderResult::kHandledRequest, result);
client_.RunUntilResponseReceived();
const network::ResourceResponseHead& info = client_.response_head();
EXPECT_EQ(200, info.headers->response_code());
ExpectResponseInfo(info, *CreateResponseInfoFromServiceWorker());
// Start writing the body stream, then abort before finishing.
uint32_t written_bytes = sizeof(kResponseBody) - 1;
MojoResult mojo_result = data_pipe.producer_handle->WriteData(
kResponseBody, &written_bytes, MOJO_WRITE_DATA_FLAG_NONE);
ASSERT_EQ(MOJO_RESULT_OK, mojo_result);
EXPECT_EQ(sizeof(kResponseBody) - 1, written_bytes);
stream_callback->OnAborted();
data_pipe.producer_handle.reset();
client_.RunUntilComplete();
EXPECT_EQ(net::ERR_ABORTED, client_.completion_status().error_code);
// Test the body.
std::string response;
EXPECT_TRUE(client_.response_body().is_valid());
EXPECT_TRUE(
mojo::BlockingCopyToString(client_.response_body_release(), &response));
EXPECT_EQ(kResponseBody, response);
}
// Test when the loader is cancelled while a stream response is being written.
TEST_F(ServiceWorkerNavigationLoaderTest, StreamResponseAndCancel) {
// Construct the Stream to respond with.
const char kResponseBody[] = "Here is sample text for the Stream.";
blink::mojom::ServiceWorkerStreamCallbackPtr stream_callback;
mojo::DataPipe data_pipe;
helper_->RespondWithStream(mojo::MakeRequest(&stream_callback),
std::move(data_pipe.consumer_handle));
// Perform the request.
LoaderResult result = StartRequest(CreateRequest());
EXPECT_EQ(LoaderResult::kHandledRequest, result);
client_.RunUntilResponseReceived();
const network::ResourceResponseHead& info = client_.response_head();
EXPECT_EQ(200, info.headers->response_code());
ExpectResponseInfo(info, *CreateResponseInfoFromServiceWorker());
// Start writing the body stream, then cancel the loader before finishing.
uint32_t written_bytes = sizeof(kResponseBody) - 1;
MojoResult mojo_result = data_pipe.producer_handle->WriteData(
kResponseBody, &written_bytes, MOJO_WRITE_DATA_FLAG_NONE);
ASSERT_EQ(MOJO_RESULT_OK, mojo_result);
EXPECT_EQ(sizeof(kResponseBody) - 1, written_bytes);
EXPECT_TRUE(data_pipe.producer_handle.is_valid());
EXPECT_FALSE(loader_->WasCanceled());
EXPECT_TRUE(version_->HasWorkInBrowser());
loader_->Cancel();
EXPECT_TRUE(loader_->WasCanceled());
EXPECT_FALSE(version_->HasWorkInBrowser());
// Although ServiceWorkerNavigationLoader resets its URLLoaderClient pointer
// in Cancel(), the URLLoaderClient still exists. In this test, it is
// |client_| which owns the data pipe, so it's still valid to write data to
// it.
mojo_result = data_pipe.producer_handle->WriteData(
kResponseBody, &written_bytes, MOJO_WRITE_DATA_FLAG_NONE);
// TODO(falken): This should probably be an error.
EXPECT_EQ(MOJO_RESULT_OK, mojo_result);
client_.RunUntilComplete();
EXPECT_FALSE(data_pipe.consumer_handle.is_valid());
EXPECT_EQ(net::ERR_ABORTED, client_.completion_status().error_code);
}
// Test when the service worker responds with network fallback.
// i.e., does not call respondWith().
TEST_F(ServiceWorkerNavigationLoaderTest, FallbackResponse) {
helper_->RespondWithFallback();
// Perform the request.
LoaderResult result = StartRequest(CreateRequest());
EXPECT_EQ(LoaderResult::kDidNotHandleRequest, result);
// The request should not be handled by the loader, but it shouldn't be a
// failure.
EXPECT_FALSE(was_main_resource_load_failed_called_);
}
// Test when the service worker rejects the FetchEvent.
TEST_F(ServiceWorkerNavigationLoaderTest, ErrorResponse) {
helper_->RespondWithError();
// Perform the request.
LoaderResult result = StartRequest(CreateRequest());
EXPECT_EQ(LoaderResult::kHandledRequest, result);
client_.RunUntilComplete();
EXPECT_EQ(net::ERR_FAILED, client_.completion_status().error_code);
}
// Test when dispatching the fetch event to the service worker failed.
TEST_F(ServiceWorkerNavigationLoaderTest, FailFetchDispatch) {
helper_->FailToDispatchFetchEvent();
// Perform the request.
LoaderResult result = StartRequest(CreateRequest());
EXPECT_EQ(LoaderResult::kDidNotHandleRequest, result);
EXPECT_TRUE(was_main_resource_load_failed_called_);
}
// Test when the respondWith() promise resolves before the waitUntil() promise
// resolves. The response should be received before the event finishes.
TEST_F(ServiceWorkerNavigationLoaderTest, EarlyResponse) {
helper_->RespondEarly();
// Perform the request.
LoaderResult result = StartRequest(CreateRequest());
EXPECT_EQ(LoaderResult::kHandledRequest, result);
client_.RunUntilComplete();
const network::ResourceResponseHead& info = client_.response_head();
EXPECT_EQ(200, info.headers->response_code());
ExpectResponseInfo(info, *CreateResponseInfoFromServiceWorker());
// Although the response was already received, the event remains outstanding
// until waitUntil() resolves.
EXPECT_TRUE(version_->HasWorkInBrowser());
helper_->FinishWaitUntil();
EXPECT_FALSE(version_->HasWorkInBrowser());
}
// Test asking the loader to fallback to network. In production code, this
// happens when there is no active service worker for the URL, or it must be
// skipped, etc.
TEST_F(ServiceWorkerNavigationLoaderTest, FallbackToNetwork) {
network::ResourceRequest request;
request.url = GURL("https://www.example.com/");
request.method = "GET";
request.fetch_request_mode = network::mojom::FetchRequestMode::kNavigate;
request.fetch_credentials_mode =
network::mojom::FetchCredentialsMode::kInclude;
request.fetch_redirect_mode = network::mojom::FetchRedirectMode::kManual;
SingleRequestURLLoaderFactory::RequestHandler handler;
auto loader = std::make_unique<ServiceWorkerNavigationLoader>(
base::BindOnce(&ReceiveRequestHandler, &handler), this, request,
base::WrapRefCounted<URLLoaderFactoryGetter>(
helper_->context()->loader_factory_getter()));
// Ask the loader to fallback to network. In production code,
// ServiceWorkerControlleeRequestHandler calls FallbackToNetwork() to do this.
loader->FallbackToNetwork();
base::RunLoop().RunUntilIdle();
EXPECT_FALSE(handler);
}
// Test responding to the fetch event with the navigation preload response.
TEST_F(ServiceWorkerNavigationLoaderTest, NavigationPreload) {
registration_->EnableNavigationPreload(true);
helper_->RespondWithNavigationPreloadResponse();
// Perform the request
LoaderResult result = StartRequest(CreateRequest());
ASSERT_EQ(LoaderResult::kHandledRequest, result);
client_.RunUntilComplete();
EXPECT_EQ(net::OK, client_.completion_status().error_code);
const network::ResourceResponseHead& info = client_.response_head();
EXPECT_EQ(200, info.headers->response_code());
std::unique_ptr<network::ResourceResponseHead> expected_info =
CreateResponseInfoFromServiceWorker();
expected_info->did_service_worker_navigation_preload = true;
ExpectResponseInfo(info, *expected_info);
std::string response;
EXPECT_TRUE(client_.response_body().is_valid());
EXPECT_TRUE(
mojo::BlockingCopyToString(client_.response_body_release(), &response));
EXPECT_EQ("this body came from the network", response);
}
// Test responding to the fetch event with a redirect response.
TEST_F(ServiceWorkerNavigationLoaderTest, Redirect) {
GURL new_url("https://example.com/redirected");
helper_->RespondWithRedirectResponse(new_url);
// Perform the request.
LoaderResult result = StartRequest(CreateRequest());
EXPECT_EQ(LoaderResult::kHandledRequest, result);
client_.RunUntilRedirectReceived();
const network::ResourceResponseHead& info = client_.response_head();
EXPECT_EQ(301, info.headers->response_code());
ExpectResponseInfo(info, *CreateResponseInfoFromServiceWorker());
const net::RedirectInfo& redirect_info = client_.redirect_info();
EXPECT_EQ(301, redirect_info.status_code);
EXPECT_EQ("GET", redirect_info.new_method);
EXPECT_EQ(new_url, redirect_info.new_url);
}
} // namespace service_worker_navigation_loader_unittest
} // namespace content
| null | null | null | null | 19,933 |
4,139 | null | train_val | 796a0e014bc3985709c0a35538d606ef1da31e1b | 4,139 | Chrome | 0 | https://github.com/chromium/chromium | 2018-04-07 23:43:03+00:00 | // Copyright 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef IOS_CHROME_BROWSER_INFOBARS_INFOBAR_CONTAINER_VIEW_H_
#define IOS_CHROME_BROWSER_INFOBARS_INFOBAR_CONTAINER_VIEW_H_
#import <UIKit/UIKit.h>
class InfoBarIOS;
@interface InfoBarContainerView : UIView {
}
// Add a new infobar to the container view at position |position|.
- (void)addInfoBar:(InfoBarIOS*)infoBarIOS position:(NSInteger)position;
// Height of the frontmost infobar that is not hidden.
- (CGFloat)topmostVisibleInfoBarHeight;
@end
#endif // IOS_CHROME_BROWSER_INFOBARS_INFOBAR_CONTAINER_VIEW_H_
| null | null | null | null | 1,002 |
15,720 | null | train_val | 796a0e014bc3985709c0a35538d606ef1da31e1b | 15,720 | Chrome | 0 | https://github.com/chromium/chromium | 2018-04-07 23:43:03+00:00 | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_COMPONENT_UPDATER_COMPONENT_UPDATER_SERVICE_H_
#define COMPONENTS_COMPONENT_UPDATER_COMPONENT_UPDATER_SERVICE_H_
#include <stdint.h>
#include <memory>
#include <string>
#include <vector>
#include "base/callback_forward.h"
#include "base/gtest_prod_util.h"
#include "base/memory/ref_counted.h"
#include "base/version.h"
#include "build/build_config.h"
#include "components/update_client/update_client.h"
#include "url/gurl.h"
class ComponentsUI;
class PluginObserver;
namespace policy {
class ComponentUpdaterPolicyTest;
}
namespace update_client {
class ComponentInstaller;
class Configurator;
struct CrxComponent;
struct CrxUpdateItem;
}
namespace component_updater {
// Called when a non-blocking call in this module completes.
using Callback = update_client::Callback;
class OnDemandUpdater;
using Configurator = update_client::Configurator;
using CrxComponent = update_client::CrxComponent;
using CrxUpdateItem = update_client::CrxUpdateItem;
struct ComponentInfo {
ComponentInfo(const std::string& id,
const std::string& fingerprint,
const base::string16& name,
const base::Version& version);
ComponentInfo(const ComponentInfo& other);
ComponentInfo(ComponentInfo&& other);
~ComponentInfo();
const std::string id;
const std::string fingerprint;
const base::string16 name;
const base::Version version;
};
// The component update service is in charge of installing or upgrading
// select parts of chrome. Each part is called a component and managed by
// instances of CrxComponent registered using RegisterComponent(). On the
// server, each component is packaged as a CRX which is the same format used
// to package extensions. To the update service each component is identified
// by its public key hash (CrxComponent::pk_hash). If there is an update
// available and its version is bigger than (CrxComponent::version), it will
// be downloaded, verified and unpacked. Then component-specific installer
// ComponentInstaller::Install (of CrxComponent::installer) will be called.
//
// During the normal operation of the component updater some specific
// notifications are fired, like COMPONENT_UPDATER_STARTED and
// COMPONENT_UPDATE_FOUND. See notification_type.h for more details.
//
// All methods are safe to call ONLY from the browser's main thread.
class ComponentUpdateService {
public:
using Observer = update_client::UpdateClient::Observer;
// Adds an observer for this class. An observer should not be added more
// than once. The caller retains the ownership of the observer object.
virtual void AddObserver(Observer* observer) = 0;
// Removes an observer. It is safe for an observer to be removed while
// the observers are being notified.
virtual void RemoveObserver(Observer* observer) = 0;
// Add component to be checked for updates.
virtual bool RegisterComponent(const CrxComponent& component) = 0;
// Unregisters the component with the given ID. This means that the component
// is not going to be included in future update checks. If a download or
// update operation for the component is currently in progress, it will
// silently finish without triggering the next step.
// Note that the installer for the component is responsible for removing any
// existing versions of the component from disk. Returns true if the
// uninstall has completed successfully and the component files have been
// removed, or if the uninstalled has been deferred because the component
// is being updated. Returns false if the component id is not known or the
/// uninstall encountered an error.
virtual bool UnregisterComponent(const std::string& id) = 0;
// Returns a list of registered components.
virtual std::vector<std::string> GetComponentIDs() const = 0;
// Returns a ComponentInfo describing a registered component that implements a
// handler for the specified |mime_type|. If multiple such components exist,
// returns information for the one that was most recently registered. If no
// such components exist, returns nullptr.
virtual std::unique_ptr<ComponentInfo> GetComponentForMimeType(
const std::string& mime_type) const = 0;
// Returns a list of ComponentInfo objects describing all registered
// components.
virtual std::vector<ComponentInfo> GetComponents() const = 0;
// Returns an interface for on-demand updates. On-demand updates are
// proactively triggered outside the normal component update service schedule.
virtual OnDemandUpdater& GetOnDemandUpdater() = 0;
// This method is used to trigger an on-demand update for component |id|.
// This can be used when loading a resource that depends on this component.
//
// |callback| is called on the main thread once the on-demand update is
// complete, regardless of success. |callback| may be called immediately
// within the method body.
//
// Additionally, this function implements an embedder-defined cooldown
// interval between on demand update attempts. This behavior is intended
// to be defensive against programming bugs, usually triggered by web fetches,
// where the on-demand functionality is invoked too often. If this function
// is called while still on cooldown, |callback| will be called immediately.
virtual void MaybeThrottle(const std::string& id,
base::OnceClosure callback) = 0;
virtual ~ComponentUpdateService() {}
private:
// Returns details about registered component in the |item| parameter. The
// function returns true in case of success and false in case of errors.
virtual bool GetComponentDetails(const std::string& id,
CrxUpdateItem* item) const = 0;
friend class ::ComponentsUI;
FRIEND_TEST_ALL_PREFIXES(ComponentInstallerTest, RegisterComponent);
};
using ServiceObserver = ComponentUpdateService::Observer;
class OnDemandUpdater {
public:
virtual ~OnDemandUpdater() {}
private:
friend class OnDemandTester;
friend class policy::ComponentUpdaterPolicyTest;
friend class SupervisedUserWhitelistInstaller;
friend class ::ComponentsUI;
friend class ::PluginObserver;
friend class SwReporterOnDemandFetcher;
#if defined(OS_CHROMEOS)
friend class CrOSComponentManager;
#endif // defined(OS_CHROMEOS)
friend class VrAssetsComponentInstallerPolicy;
// Triggers an update check for a component. |id| is a value
// returned by GetCrxComponentID(). If an update for this component is already
// in progress, the function returns |kInProgress|. If an update is available,
// the update will be applied. The caller can subscribe to component update
// service notifications and provide an optional callback to get the result
// of the call. The function does not implement any cooldown interval.
virtual void OnDemandUpdate(const std::string& id, Callback callback) = 0;
};
// Creates the component updater.
std::unique_ptr<ComponentUpdateService> ComponentUpdateServiceFactory(
scoped_refptr<Configurator> config);
} // namespace component_updater
#endif // COMPONENTS_COMPONENT_UPDATER_COMPONENT_UPDATER_SERVICE_H_
| null | null | null | null | 12,583 |
End of preview. Expand
in Dataset Viewer.
π Details
This is a C++ vulnerability detection dataset following realistic settings. For details, please check our study Revisiting the Performance of Deep Learning-Based Vulnerability Detection on Realistic Datasets (Partha et al., 2024)
The column names are self-describing. The most important two columns are,
target: int
: vulnerable to not.code: str
: the code segment.
π Citation Information
@article{Chakraborty2024,
title = {Revisiting the Performance of Deep Learning-Based Vulnerability Detection on Realistic Datasets},
ISSN = {2326-3881},
url = {http://dx.doi.org/10.1109/TSE.2024.3423712},
DOI = {10.1109/tse.2024.3423712},
journal = {IEEE Transactions on Software Engineering},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
author = {Chakraborty, Partha and Arumugam, Krishna Kanth and Alfadel, Mahmoud and Nagappan, Meiyappan and McIntosh, Shane},
year = {2024},
pages = {1β15}
}
- Downloads last month
- 95