text
stringlengths 213
7.14k
| idx
int64 16
12.5k
|
---|---|
--- initial
+++ final
@@ -1,17 +1,17 @@
static int ulite_transmit(struct uart_port *port, int stat) {
struct circ_buf *xmit = &port->state->xmit;
if (stat & ULITE_STATUS_TXFULL) return 0;
if (port->x_char) {
- iowrite32be(port->x_char, port->membase + ULITE_TX);
+ uart_out32(port->x_char, ULITE_TX, port);
port->x_char = 0;
port->icount.tx++;
return 1;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) return 0;
- iowrite32be(xmit->buf[xmit->tail], port->membase + ULITE_TX);
+ uart_out32(xmit->buf[xmit->tail], ULITE_TX, port);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
/* wake up */
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port);
return 1;
}<sep>@@
expression e1,e2,port;
@@
- iowrite32be(e1, port->membase + e2);
+ uart_out32(e1, e2, port);
<|end_of_text|> | 7,088 |
--- initial
+++ final
@@ -1,33 +1,32 @@
static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl) {
struct net_device_stats *stats = &dev->stats;
struct c_can_priv *priv = netdev_priv(dev);
struct can_frame *frame;
struct sk_buff *skb;
u32 arb, data;
skb = alloc_can_skb(dev, &frame);
if (!skb) {
stats->rx_dropped++;
return -ENOMEM;
}
frame->can_dlc = get_can_dlc(ctrl & 0x0F);
- arb = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface));
- arb |= priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)) << 16;
+ arb = priv->read_reg32(priv, C_CAN_IFACE(ARB1_REG, iface));
if (arb & IF_ARB_MSGXTD)
frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
else
frame->can_id = (arb >> 18) & CAN_SFF_MASK;
if (arb & IF_ARB_TRANSMIT) {
frame->can_id |= CAN_RTR_FLAG;
} else {
int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
data = priv->read_reg(priv, dreg);
frame->data[i] = data;
frame->data[i + 1] = data >> 8;
}
}
stats->rx_packets++;
stats->rx_bytes += frame->can_dlc;
netif_receive_skb(skb);
return 0;
}<sep>@@
expression e,e1,e2,m1,m2;
@@
- e = e1->read_reg(e1, C_CAN_IFACE(m1, e2));
- e |= e1->read_reg(e1, C_CAN_IFACE(m2, e2)) << 16;
+ e = e1->read_reg32(e1, C_CAN_IFACE(m1, e2));
<|end_of_text|> | 7,090 |
--- initial
+++ final
@@ -1,11 +1,9 @@
static void c_can_setup_receive_object(struct net_device *dev, int iface, u32 obj, u32 mask, u32 id, u32 mcont) {
struct c_can_priv *priv = netdev_priv(dev);
mask |= BIT(29);
- priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
- priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), mask >> 16);
+ priv->write_reg32(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
id |= IF_ARB_MSGVAL;
- priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), id);
- priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), id >> 16);
+ priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), id);
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
}<sep>@@
expression e1,e2,e3,m1,m2;
@@
- e1->write_reg(e1, C_CAN_IFACE(m1, e2), e3);
- e1->write_reg(e1, C_CAN_IFACE(m2, e2), e3 >> 16);
+ e1->write_reg32(e1, C_CAN_IFACE(m1, e2), e3);
<|end_of_text|> | 7,091 |
--- initial
+++ final
@@ -1,29 +1,28 @@
static void c_can_setup_tx_object(struct net_device *dev, int iface, struct can_frame *frame, int idx) {
struct c_can_priv *priv = netdev_priv(dev);
u16 ctrl = IF_MCONT_TX | frame->can_dlc;
bool rtr = frame->can_id & CAN_RTR_FLAG;
u32 arb = IF_ARB_MSGVAL;
int i;
if (frame->can_id & CAN_EFF_FLAG) {
arb |= frame->can_id & CAN_EFF_MASK;
arb |= IF_ARB_MSGXTD;
} else {
arb |= (frame->can_id & CAN_SFF_MASK) << 18;
}
if (!rtr) arb |= IF_ARB_TRANSMIT;
/*
* If we change the DIR bit, we need to invalidate the buffer
* first, i.e. clear the MSGVAL flag in the arbiter.
*/
if (rtr != (bool)test_bit(idx, &priv->tx_dir)) {
u32 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
c_can_inval_msg_object(dev, iface, obj);
change_bit(idx, &priv->tx_dir);
}
- priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
- priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), arb >> 16);
+ priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
for (i = 0; i < frame->can_dlc; i += 2) {
priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2, frame->data[i] | (frame->data[i + 1] << 8));
}
}<sep>@@
expression e1,e2,e3,m1,m2;
@@
- e1->write_reg(e1, C_CAN_IFACE(m1, e2), e3);
- e1->write_reg(e1, C_CAN_IFACE(m2, e2), e3 >> 16);
+ e1->write_reg32(e1, C_CAN_IFACE(m1, e2), e3);
<|end_of_text|> | 7,092 |
--- initial
+++ final
@@ -1,35 +1,33 @@
void consistent_free(size_t size, void *vaddr) {
struct page *page;
if (in_interrupt()) BUG();
size = PAGE_ALIGN(size);
#ifndef CONFIG_MMU
/* Clear SHADOW_MASK bit in address, and free as per usual */
#ifdef CONFIG_XILINX_UNCACHED_SHADOW
vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
#endif
page = virt_to_page(vaddr);
do {
- ClearPageReserved(page);
- __free_page(page);
+ __free_reserved_page(page);
page++;
} while (size -= PAGE_SIZE);
#else
do {
pte_t *ptep;
unsigned long pfn;
ptep = pte_offset_kernel(pmd_offset(pgd_offset_k((unsigned int)vaddr), (unsigned int)vaddr), (unsigned int)vaddr);
if (!pte_none(*ptep) && pte_present(*ptep)) {
pfn = pte_pfn(*ptep);
pte_clear(&init_mm, (unsigned int)vaddr, ptep);
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
- ClearPageReserved(page);
- __free_page(page);
+ __free_reserved_page(page);
}
}
vaddr += PAGE_SIZE;
} while (size -= PAGE_SIZE);
/* flush tlb */
flush_tlb_all();
#endif
}<sep>@@
expression page;
@@
- ClearPageReserved(page);
- __free_page(page);
+ __free_reserved_page(page);
<|end_of_text|> | 7,093 |
--- initial
+++ final
@@ -1,39 +1,38 @@
void __dma_free_coherent(size_t size, void *vaddr) {
struct ppc_vm_region *c;
unsigned long flags, addr;
size = PAGE_ALIGN(size);
spin_lock_irqsave(&consistent_lock, flags);
c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
if (!c) goto no_area;
if ((c->vm_end - c->vm_start) != size) {
printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", __func__, c->vm_end - c->vm_start, size);
dump_stack();
size = c->vm_end - c->vm_start;
}
addr = c->vm_start;
do {
pte_t *ptep;
unsigned long pfn;
ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr), addr), addr), addr);
if (!pte_none(*ptep) && pte_present(*ptep)) {
pfn = pte_pfn(*ptep);
pte_clear(&init_mm, addr, ptep);
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
- ClearPageReserved(page);
- __free_page(page);
+ __free_reserved_page(page);
}
}
addr += PAGE_SIZE;
} while (size -= PAGE_SIZE);
flush_tlb_kernel_range(c->vm_start, c->vm_end);
list_del(&c->vm_list);
spin_unlock_irqrestore(&consistent_lock, flags);
kfree(c);
return;
no_area:
spin_unlock_irqrestore(&consistent_lock, flags);
printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", __func__, vaddr);
dump_stack();
}<sep>@@
expression page;
@@
- ClearPageReserved(page);
- __free_page(page);
+ __free_reserved_page(page);
<|end_of_text|> | 7,094 |
--- initial
+++ final
@@ -1,42 +1,41 @@
void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) {
struct metag_vm_region *c;
unsigned long flags, addr;
pte_t *ptep;
size = PAGE_ALIGN(size);
spin_lock_irqsave(&consistent_lock, flags);
c = metag_vm_region_find(&consistent_head, (unsigned long)vaddr);
if (!c) goto no_area;
c->vm_active = 0;
if ((c->vm_end - c->vm_start) != size) {
pr_err("%s: freeing wrong coherent size (%ld != %d)\n", __func__, c->vm_end - c->vm_start, size);
dump_stack();
size = c->vm_end - c->vm_start;
}
ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
addr = c->vm_start;
do {
pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
unsigned long pfn;
ptep++;
addr += PAGE_SIZE;
if (!pte_none(pte) && pte_present(pte)) {
pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
- ClearPageReserved(page);
- __free_page(page);
+ __free_reserved_page(page);
continue;
}
}
pr_crit("%s: bad page in kernel page table\n", __func__);
} while (size -= PAGE_SIZE);
flush_tlb_kernel_range(c->vm_start, c->vm_end);
list_del(&c->vm_list);
spin_unlock_irqrestore(&consistent_lock, flags);
kfree(c);
return;
no_area:
spin_unlock_irqrestore(&consistent_lock, flags);
pr_err("%s: trying to free invalid coherent area: %p\n", __func__, vaddr);
dump_stack();
}<sep>@@
expression page;
@@
- ClearPageReserved(page);
- __free_page(page);
+ __free_reserved_page(page);
<|end_of_text|> | 7,095 |
--- initial
+++ final
@@ -1,23 +1,21 @@
static inline void free_unused_pages(unsigned int virtual_start, unsigned int virtual_end) {
int mb_freed = 0;
/*
* Align addresses
*/
virtual_start = PAGE_ALIGN(virtual_start);
virtual_end = PAGE_ALIGN(virtual_end);
while (virtual_start < virtual_end) {
struct page *page;
/*
* Clear page reserved bit,
* set count to 1, and free
* the page.
*/
page = virt_to_page(virtual_start);
- ClearPageReserved(page);
- init_page_count(page);
- free_page(virtual_start);
+ __free_reserved_page(page);
virtual_start += PAGE_SIZE;
mb_freed += PAGE_SIZE / 1024;
}
printk("acornfb: freed %dK memory\n", mb_freed);
}<sep>@@
expression page,e;
@@
page = virt_to_page(e);
- ClearPageReserved(page);
- init_page_count(page);
- free_page(e);
+ __free_reserved_page(page);
<|end_of_text|> | 7,096 |
--- initial
+++ final
@@ -1,9 +1,9 @@
static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) {
dev_dbg(dev, "%s(): cmd=%08x\n", __func__, enabled);
if (enabled) {
at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
- at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
+ at91_rtc_write_ier(AT91_RTC_ALARM);
} else
- at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+ at91_rtc_write_idr(AT91_RTC_ALARM);
return 0;
}<sep>@@
expression e;
@@
- at91_rtc_write(AT91_RTC_IER, e);
+ at91_rtc_write_ier(e);
@@
expression e;
@@
- at91_rtc_write(AT91_RTC_IDR, e);
+ at91_rtc_write_idr(e);
<|end_of_text|> | 7,097 |
--- initial
+++ final
@@ -1,17 +1,17 @@
static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) {
struct platform_device *pdev = dev_id;
struct rtc_device *rtc = platform_get_drvdata(pdev);
unsigned int rtsr;
unsigned long events = 0;
- rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR);
+ rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr();
if (rtsr) { /* this interrupt is shared! Is it ours? */
if (rtsr & AT91_RTC_ALARM) events |= (RTC_AF | RTC_IRQF);
if (rtsr & AT91_RTC_SECEV) events |= (RTC_UF | RTC_IRQF);
if (rtsr & AT91_RTC_ACKUPD) complete(&at91_rtc_updated);
at91_rtc_write(AT91_RTC_SCCR, rtsr); /* clear status reg */
rtc_update_irq(rtc, 1, events);
dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n", __func__, events >> 8, events & 0x000000FF);
return IRQ_HANDLED;
}
return IRQ_NONE; /* not handled */
}<sep>@@
@@
- at91_rtc_read(AT91_RTC_IMR)
+ at91_rtc_read_imr()
<|end_of_text|> | 7,098 |
--- initial
+++ final
@@ -1,48 +1,48 @@
static int __init at91_rtc_probe(struct platform_device *pdev) {
struct rtc_device *rtc;
struct resource *regs;
int ret = 0;
at91_rtc_config = at91_rtc_get_config(pdev);
if (!at91_rtc_config) return -ENODEV;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
dev_err(&pdev->dev, "no mmio resource defined\n");
return -ENXIO;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no irq resource defined\n");
return -ENXIO;
}
at91_rtc_regs = ioremap(regs->start, resource_size(regs));
if (!at91_rtc_regs) {
dev_err(&pdev->dev, "failed to map registers, aborting.\n");
return -ENOMEM;
}
at91_rtc_write(AT91_RTC_CR, 0);
at91_rtc_write(AT91_RTC_MR, 0); /* 24 hour mode */
/* Disable all interrupts */
- at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | AT91_RTC_SECEV | AT91_RTC_TIMEV | AT91_RTC_CALEV);
+ at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM | AT91_RTC_SECEV | AT91_RTC_TIMEV | AT91_RTC_CALEV);
ret = request_irq(irq, at91_rtc_interrupt, IRQF_SHARED, "at91_rtc", pdev);
if (ret) {
dev_err(&pdev->dev, "IRQ %d already in use.\n", irq);
goto err_unmap;
}
/* cpu init code should really have flagged this device as
* being wake-capable; if it didn't, do that here.
*/
if (!device_can_wakeup(&pdev->dev)) device_init_wakeup(&pdev->dev, 1);
rtc = rtc_device_register(pdev->name, &pdev->dev, &at91_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
ret = PTR_ERR(rtc);
goto err_free_irq;
}
platform_set_drvdata(pdev, rtc);
dev_info(&pdev->dev, "AT91 Real Time Clock driver.\n");
return 0;
err_free_irq:
free_irq(irq, pdev);
err_unmap:
iounmap(at91_rtc_regs);
return ret;
}<sep>@@
expression e;
@@
- at91_rtc_write(AT91_RTC_IDR, e);
+ at91_rtc_write_idr(e);
<|end_of_text|> | 7,099 |
--- initial
+++ final
@@ -1,6 +1,6 @@
static int at91_rtc_proc(struct device *dev, struct seq_file *seq) {
- unsigned long imr = at91_rtc_read(AT91_RTC_IMR);
+ unsigned long imr = at91_rtc_read_imr();
seq_printf(seq, "update_IRQ\t: %s\n", (imr & AT91_RTC_ACKUPD) ? "yes" : "no");
seq_printf(seq, "periodic_IRQ\t: %s\n", (imr & AT91_RTC_SECEV) ? "yes" : "no");
return 0;
}<sep>@@
@@
- at91_rtc_read(AT91_RTC_IMR)
+ at91_rtc_read_imr()
<|end_of_text|> | 7,100 |
--- initial
+++ final
@@ -1,9 +1,9 @@
static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) {
struct rtc_time *tm = &alrm->time;
at91_rtc_decodetime(AT91_RTC_TIMALR, AT91_RTC_CALALR, tm);
tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
tm->tm_year = at91_alarm_year - 1900;
- alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM) ? 1 : 0;
+ alrm->enabled = (at91_rtc_read_imr() & AT91_RTC_ALARM) ? 1 : 0;
dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec);
return 0;
}<sep>@@
@@
- at91_rtc_read(AT91_RTC_IMR)
+ at91_rtc_read_imr()
<|end_of_text|> | 7,101 |
--- initial
+++ final
@@ -1,10 +1,10 @@
static int __exit at91_rtc_remove(struct platform_device *pdev) {
struct rtc_device *rtc = platform_get_drvdata(pdev);
/* Disable all interrupts */
- at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | AT91_RTC_SECEV | AT91_RTC_TIMEV | AT91_RTC_CALEV);
+ at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM | AT91_RTC_SECEV | AT91_RTC_TIMEV | AT91_RTC_CALEV);
free_irq(irq, pdev);
rtc_device_unregister(rtc);
iounmap(at91_rtc_regs);
platform_set_drvdata(pdev, NULL);
return 0;
}<sep>@@
expression e;
@@
- at91_rtc_write(AT91_RTC_IDR, e);
+ at91_rtc_write_idr(e);
<|end_of_text|> | 7,102 |
--- initial
+++ final
@@ -1,9 +1,9 @@
static int at91_rtc_resume(struct device *dev) {
if (at91_rtc_imr) {
if (device_may_wakeup(dev))
disable_irq_wake(irq);
else
- at91_rtc_write(AT91_RTC_IER, at91_rtc_imr);
+ at91_rtc_write_ier(at91_rtc_imr);
}
return 0;
}<sep>@@
expression e;
@@
- at91_rtc_write(AT91_RTC_IER, e);
+ at91_rtc_write_ier(e);
<|end_of_text|> | 7,103 |
--- initial
+++ final
@@ -1,18 +1,18 @@
static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) {
struct rtc_time tm;
at91_rtc_decodetime(AT91_RTC_TIMR, AT91_RTC_CALR, &tm);
at91_alarm_year = tm.tm_year;
tm.tm_hour = alrm->time.tm_hour;
tm.tm_min = alrm->time.tm_min;
tm.tm_sec = alrm->time.tm_sec;
- at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+ at91_rtc_write_idr(AT91_RTC_ALARM);
at91_rtc_write(AT91_RTC_TIMALR, bin2bcd(tm.tm_sec) << 0 | bin2bcd(tm.tm_min) << 8 | bin2bcd(tm.tm_hour) << 16 | AT91_RTC_HOUREN | AT91_RTC_MINEN | AT91_RTC_SECEN);
at91_rtc_write(AT91_RTC_CALALR, bin2bcd(tm.tm_mon + 1) << 16 /* tm_mon starts at zero */
| bin2bcd(tm.tm_mday) << 24 | AT91_RTC_DATEEN | AT91_RTC_MTHEN);
if (alrm->enabled) {
at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
- at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
+ at91_rtc_write_ier(AT91_RTC_ALARM);
}
dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, at91_alarm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
return 0;
}<sep>@@
expression e;
@@
- at91_rtc_write(AT91_RTC_IER, e);
+ at91_rtc_write_ier(e);
@@
expression e;
@@
- at91_rtc_write(AT91_RTC_IDR, e);
+ at91_rtc_write_idr(e);
<|end_of_text|> | 7,104 |
--- initial
+++ final
@@ -1,20 +1,20 @@
static int at91_rtc_settime(struct device *dev, struct rtc_time *tm) {
unsigned long cr;
dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec);
/* Stop Time/Calendar from counting */
cr = at91_rtc_read(AT91_RTC_CR);
at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
- at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
+ at91_rtc_write_ier(AT91_RTC_ACKUPD);
wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
- at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
+ at91_rtc_write_idr(AT91_RTC_ACKUPD);
at91_rtc_write(AT91_RTC_TIMR, bin2bcd(tm->tm_sec) << 0 | bin2bcd(tm->tm_min) << 8 | bin2bcd(tm->tm_hour) << 16);
at91_rtc_write(AT91_RTC_CALR, bin2bcd((tm->tm_year + 1900) / 100) /* century */
| bin2bcd(tm->tm_year % 100) << 8 /* year */
| bin2bcd(tm->tm_mon + 1) << 16 /* tm_mon starts at zero */
| bin2bcd(tm->tm_wday + 1) << 21 /* day of the week [0-6], Sunday=0 */
| bin2bcd(tm->tm_mday) << 24);
/* Restart Time/Calendar */
cr = at91_rtc_read(AT91_RTC_CR);
at91_rtc_write(AT91_RTC_CR, cr & ~(AT91_RTC_UPDCAL | AT91_RTC_UPDTIM));
return 0;
}<sep>@@
expression e;
@@
- at91_rtc_write(AT91_RTC_IER, e);
+ at91_rtc_write_ier(e);
@@
expression e;
@@
- at91_rtc_write(AT91_RTC_IDR, e);
+ at91_rtc_write_idr(e);
<|end_of_text|> | 7,105 |
--- initial
+++ final
@@ -1,13 +1,13 @@
static int at91_rtc_suspend(struct device *dev) {
/* this IRQ is shared with DBGU and other hardware which isn't
* necessarily doing PM like we are...
*/
- at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR) & (AT91_RTC_ALARM | AT91_RTC_SECEV);
+ at91_rtc_imr = at91_rtc_read_imr() & (AT91_RTC_ALARM | AT91_RTC_SECEV);
if (at91_rtc_imr) {
if (device_may_wakeup(dev))
enable_irq_wake(irq);
else
- at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr);
+ at91_rtc_write_idr(at91_rtc_imr);
}
return 0;
}<sep>@@
expression e;
@@
- at91_rtc_write(AT91_RTC_IDR, e);
+ at91_rtc_write_idr(e);
@@
@@
- at91_rtc_read(AT91_RTC_IMR)
+ at91_rtc_read_imr()
<|end_of_text|> | 7,106 |
--- initial
+++ final
@@ -1,16 +1,16 @@
static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb) {
int count;
/*
* If sg table allocation fails, requeue request later.
*/
if (unlikely(sg_alloc_table_chained(&sdb->table, blk_rq_nr_phys_segments(req), sdb->table.sgl))) return BLKPREP_DEFER;
/*
* Next, walk the list, and fill in the addresses and sizes of
* each segment.
*/
count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
BUG_ON(count > sdb->table.nents);
sdb->table.nents = count;
- sdb->length = blk_rq_bytes(req);
+ sdb->length = blk_rq_payload_bytes(req);
return BLKPREP_OK;
}<sep>@@
expression e,rq;
@@
- e = blk_rq_bytes(rq);
+ e = blk_rq_payload_bytes(rq);
<|end_of_text|> | 7,167 |
--- initial
+++ final
@@ -1,20 +1,20 @@
static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) {
struct scsi_cmnd *cmd = req->special;
/*
* BLOCK_PC requests may transfer data, in which case they must
* a bio attached to them. Or they might contain a SCSI command
* that does not transfer data, in which case they may optionally
* submit a request without an attached bio.
*/
if (req->bio) {
int ret = scsi_init_io(cmd);
if (unlikely(ret)) return ret;
} else {
BUG_ON(blk_rq_bytes(req));
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
}
cmd->cmd_len = req->cmd_len;
- cmd->transfersize = blk_rq_bytes(req);
+ cmd->transfersize = blk_rq_payload_bytes(req);
cmd->allowed = req->retries;
return BLKPREP_OK;
}<sep>@@
expression e,rq;
@@
- e = blk_rq_bytes(rq);
+ e = blk_rq_payload_bytes(rq);
<|end_of_text|> | 7,168 |
--- initial
+++ final
@@ -1,24 +1,24 @@
static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, struct request *rq, struct nvme_command *c) {
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
int count, ret;
req->num_sge = 1;
refcount_set(&req->ref, 2); /* send and recv completions */
c->common.flags |= NVME_CMD_SGL_METABUF;
- if (!blk_rq_bytes(rq)) return nvme_rdma_set_sg_null(c);
+ if (!blk_rq_payload_bytes(rq)) return nvme_rdma_set_sg_null(c);
req->sg_table.sgl = req->first_sgl;
ret = sg_alloc_table_chained(&req->sg_table, blk_rq_nr_phys_segments(rq), req->sg_table.sgl);
if (ret) return -ENOMEM;
req->nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
count = ib_dma_map_sg(ibdev, req->sg_table.sgl, req->nents, rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (unlikely(count <= 0)) {
sg_free_table_chained(&req->sg_table, true);
return -EIO;
}
if (count == 1) {
if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) && blk_rq_payload_bytes(rq) <= nvme_rdma_inline_data_size(queue)) return nvme_rdma_map_sg_inline(queue, req, c);
if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) return nvme_rdma_map_sg_single(queue, req, c);
}
return nvme_rdma_map_sg_fr(queue, req, c, count);
}<sep>@@
expression rq;
statement S1,S2;
@@
- if (!blk_rq_bytes(rq))
+ if (!blk_rq_payload_bytes(rq))
S1
<|end_of_text|> | 7,169 |
--- initial
+++ final
@@ -1,13 +1,13 @@
static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, struct request *rq) {
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
- if (!blk_rq_bytes(rq)) return;
+ if (!blk_rq_payload_bytes(rq)) return;
if (req->mr) {
ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
req->mr = NULL;
}
ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
nvme_cleanup_cmd(rq);
sg_free_table_chained(&req->sg_table, true);
}<sep>@@
expression rq;
statement S1,S2;
@@
- if (!blk_rq_bytes(rq))
+ if (!blk_rq_payload_bytes(rq))
S1
<|end_of_text|> | 7,170 |
--- initial
+++ final
@@ -1,29 +1,29 @@
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) {
struct nvme_ns *ns = hctx->queue->queuedata;
struct nvme_loop_queue *queue = hctx->driver_data;
struct request *req = bd->rq;
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret;
ret = nvme_loop_is_ready(queue, req);
if (unlikely(ret)) return ret;
ret = nvme_setup_cmd(ns, req, &iod->cmd);
if (ret) return ret;
iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
iod->req.port = nvmet_loop_port;
if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq, &nvme_loop_ops)) {
nvme_cleanup_cmd(req);
blk_mq_start_request(req);
nvme_loop_queue_response(&iod->req);
return BLK_STS_OK;
}
- if (blk_rq_bytes(req)) {
+ if (blk_rq_payload_bytes(req)) {
iod->sg_table.sgl = iod->first_sgl;
if (sg_alloc_table_chained(&iod->sg_table, blk_rq_nr_phys_segments(req), iod->sg_table.sgl)) return BLK_STS_RESOURCE;
iod->req.sg = iod->sg_table.sgl;
iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
- iod->req.transfer_len = blk_rq_bytes(req);
+ iod->req.transfer_len = blk_rq_payload_bytes(req);
}
blk_mq_start_request(req);
schedule_work(&iod->work);
return BLK_STS_OK;
}<sep>@@
expression rq;
statement S1,S2;
@@
- if (blk_rq_bytes(rq))
+ if (blk_rq_payload_bytes(rq))
S1
@@
expression e,rq;
@@
- e = blk_rq_bytes(rq);
+ e = blk_rq_payload_bytes(rq);
<|end_of_text|> | 7,171 |
--- initial
+++ final
@@ -1,48 +1,37 @@
static int bnx2x_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, void *buffer) {
u32 *p = buffer;
struct bnx2x *bp = netdev_priv(dev);
struct dump_header dump_hdr = {0};
/* Disable parity attentions as long as following dump may
* cause false alarms by reading never written registers. We
* will re-enable parity attentions right after the dump.
*/
/* Disable parity on path 0 */
- bnx2x_pretend_func(bp, 0);
bnx2x_disable_blocks_parity(bp);
/* Disable parity on path 1 */
- bnx2x_pretend_func(bp, 1);
- bnx2x_disable_blocks_parity(bp);
- /* Return to current function */
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
dump_hdr.preset = bp->dump_preset_idx;
dump_hdr.version = BNX2X_DUMP_VERSION;
DP(BNX2X_MSG_ETHTOOL, "Get dump data of preset %d\n", dump_hdr.preset);
/* dump_meta_data presents OR of CHIP and PATH. */
if (CHIP_IS_E1(bp)) {
dump_hdr.dump_meta_data = DUMP_CHIP_E1;
} else if (CHIP_IS_E1H(bp)) {
dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
} else if (CHIP_IS_E2(bp)) {
dump_hdr.dump_meta_data = DUMP_CHIP_E2 | (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
} else if (CHIP_IS_E3A0(bp)) {
dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 | (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
} else if (CHIP_IS_E3B0(bp)) {
dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 | (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
}
memcpy(p, &dump_hdr, sizeof(struct dump_header));
p += dump_hdr.header_size + 1;
/* Actually read the registers */
__bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
/* Re-enable parity attentions on path 0 */
- bnx2x_pretend_func(bp, 0);
bnx2x_clear_blocks_parity(bp);
bnx2x_enable_blocks_parity(bp);
/* Re-enable parity attentions on path 1 */
- bnx2x_pretend_func(bp, 1);
- bnx2x_clear_blocks_parity(bp);
- bnx2x_enable_blocks_parity(bp);
- /* Return to current function */
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
return 0;
}<sep>@@
expression bp;
@@
- bnx2x_pretend_func(bp, 0);
bnx2x_clear_blocks_parity(bp);
bnx2x_enable_blocks_parity(bp);
- bnx2x_pretend_func(bp, 1);
- bnx2x_clear_blocks_parity(bp);
- bnx2x_enable_blocks_parity(bp);
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
@@
expression bp;
@@
- bnx2x_pretend_func(bp, 0);
bnx2x_disable_blocks_parity(bp);
- bnx2x_pretend_func(bp, 1);
- bnx2x_disable_blocks_parity(bp);
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
<|end_of_text|> | 7,172 |
--- initial
+++ final
@@ -1,49 +1,37 @@
static void bnx2x_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) {
u32 *p = _p;
struct bnx2x *bp = netdev_priv(dev);
struct dump_header dump_hdr = {0};
regs->version = 2;
memset(p, 0, regs->len);
if (!netif_running(bp->dev)) return;
/* Disable parity attentions as long as following dump may
* cause false alarms by reading never written registers. We
* will re-enable parity attentions right after the dump.
*/
/* Disable parity on path 0 */
- bnx2x_pretend_func(bp, 0);
bnx2x_disable_blocks_parity(bp);
/* Disable parity on path 1 */
- bnx2x_pretend_func(bp, 1);
- bnx2x_disable_blocks_parity(bp);
- /* Return to current function */
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
dump_hdr.preset = DUMP_ALL_PRESETS;
dump_hdr.version = BNX2X_DUMP_VERSION;
/* dump_meta_data presents OR of CHIP and PATH. */
if (CHIP_IS_E1(bp)) {
dump_hdr.dump_meta_data = DUMP_CHIP_E1;
} else if (CHIP_IS_E1H(bp)) {
dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
} else if (CHIP_IS_E2(bp)) {
dump_hdr.dump_meta_data = DUMP_CHIP_E2 | (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
} else if (CHIP_IS_E3A0(bp)) {
dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 | (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
} else if (CHIP_IS_E3B0(bp)) {
dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 | (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
}
memcpy(p, &dump_hdr, sizeof(struct dump_header));
p += dump_hdr.header_size + 1;
/* Actually read the registers */
__bnx2x_get_regs(bp, p);
/* Re-enable parity attentions on path 0 */
- bnx2x_pretend_func(bp, 0);
bnx2x_clear_blocks_parity(bp);
bnx2x_enable_blocks_parity(bp);
- /* Re-enable parity attentions on path 1 */
- bnx2x_pretend_func(bp, 1);
- bnx2x_clear_blocks_parity(bp);
- bnx2x_enable_blocks_parity(bp);
- /* Return to current function */
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
}<sep>@@
expression bp;
@@
- bnx2x_pretend_func(bp, 0);
bnx2x_clear_blocks_parity(bp);
bnx2x_enable_blocks_parity(bp);
- bnx2x_pretend_func(bp, 1);
- bnx2x_clear_blocks_parity(bp);
- bnx2x_enable_blocks_parity(bp);
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
@@
expression bp;
@@
- bnx2x_pretend_func(bp, 0);
bnx2x_disable_blocks_parity(bp);
- bnx2x_pretend_func(bp, 1);
- bnx2x_disable_blocks_parity(bp);
- bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
<|end_of_text|> | 7,173 |
--- initial
+++ final
@@ -1,148 +1,147 @@
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) {
unsigned int i;
unsigned int valid_states = 0;
unsigned int cpu = policy->cpu;
struct acpi_cpufreq_data *data;
unsigned int result = 0;
struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
struct acpi_processor_performance *perf;
#ifdef CONFIG_SMP
static int blacklisted;
#endif
pr_debug("acpi_cpufreq_cpu_init\n");
#ifdef CONFIG_SMP
if (blacklisted) return blacklisted;
blacklisted = acpi_cpufreq_blacklist(c);
if (blacklisted) return blacklisted;
#endif
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) return -ENOMEM;
if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) {
result = -ENOMEM;
goto err_free;
}
data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
per_cpu(acfreq_data, cpu) = data;
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
result = acpi_processor_register_performance(data->acpi_data, cpu);
if (result) goto err_free_mask;
perf = data->acpi_data;
policy->shared_type = perf->shared_type;
/*
* Will let policy->cpus know about dependency only when software
* coordination is required.
*/
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { cpumask_copy(policy->cpus, perf->shared_cpu_map); }
cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map);
#ifdef CONFIG_SMP
dmi_check_system(sw_any_bug_dmi_table);
if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
cpumask_copy(policy->cpus, cpu_core_mask(cpu));
}
if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
cpumask_clear(policy->cpus);
cpumask_set_cpu(cpu, policy->cpus);
cpumask_copy(data->freqdomain_cpus, cpu_sibling_mask(cpu));
policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
pr_info_once(PFX "overriding BIOS provided _PSD data\n");
}
#endif
/* capability check */
if (perf->state_count <= 1) {
pr_debug("No P-States\n");
result = -ENODEV;
goto err_unreg;
}
if (perf->control_register.space_id != perf->status_register.space_id) {
result = -ENODEV;
goto err_unreg;
}
switch (perf->control_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && boot_cpu_data.x86 == 0xf) {
pr_debug("AMD K8 systems must use native drivers.\n");
result = -ENODEV;
goto err_unreg;
}
pr_debug("SYSTEM IO addr space\n");
data->cpu_feature = SYSTEM_IO_CAPABLE;
break;
case ACPI_ADR_SPACE_FIXED_HARDWARE:
pr_debug("HARDWARE addr space\n");
if (check_est_cpu(cpu)) {
data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
break;
}
if (check_amd_hwpstate_cpu(cpu)) {
data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
break;
}
result = -ENODEV;
goto err_unreg;
default:
pr_debug("Unknown addr space %d\n", (u32)(perf->control_register.space_id));
result = -ENODEV;
goto err_unreg;
}
data->freq_table = kmalloc(sizeof(*data->freq_table) * (perf->state_count + 1), GFP_KERNEL);
if (!data->freq_table) {
result = -ENOMEM;
goto err_unreg;
}
/* detect transition latency */
policy->cpuinfo.transition_latency = 0;
for (i = 0; i < perf->state_count; i++) {
if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency) policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000;
}
/* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && policy->cpuinfo.transition_latency > 20 * 1000) {
policy->cpuinfo.transition_latency = 20 * 1000;
printk_once(KERN_INFO "P-state transition latency capped at 20 uS\n");
}
/* table init */
for (i = 0; i < perf->state_count; i++) {
if (i > 0 && perf->states[i].core_frequency >= data->freq_table[valid_states - 1].frequency / 1000) continue;
data->freq_table[valid_states].driver_data = i;
data->freq_table[valid_states].frequency = perf->states[i].core_frequency * 1000;
valid_states++;
}
data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
perf->state = 0;
- result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+ result = cpufreq_table_validate_and_show(policy, data->freq_table);
if (result) goto err_freqfree;
if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq) printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
switch (perf->control_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
/* Current speed is unknown and not detectable by IO port */
policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
break;
case ACPI_ADR_SPACE_FIXED_HARDWARE:
acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
policy->cur = get_cur_freq_on_cpu(cpu);
break;
default: break;
}
/* notify BIOS that we exist */
acpi_processor_notify_smm(THIS_MODULE);
pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
for (i = 0; i < perf->state_count; i++)
pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", (i == perf->state ? '*' : ' '), i, (u32)perf->states[i].core_frequency, (u32)perf->states[i].power, (u32)perf->states[i].transition_latency);
- cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
/*
* the first call to ->target() should result in us actually
* writing something to the appropriate registers.
*/
data->resume = 1;
return result;
err_freqfree:
kfree(data->freq_table);
err_unreg:
acpi_processor_unregister_performance(perf, cpu);
err_free_mask:
free_cpumask_var(data->freqdomain_cpus);
err_free:
kfree(data);
per_cpu(acfreq_data, cpu) = NULL;
return result;
}<sep>@@
expression policy,table;
@@
- cpufreq_frequency_table_cpuinfo
+ cpufreq_table_validate_and_show
(policy, table)
...
- cpufreq_frequency_table_get_attr(table, policy->cpu);
<|end_of_text|> | 7,174 |
--- initial
+++ final
@@ -1,27 +1,26 @@
static int bL_cpufreq_init(struct cpufreq_policy *policy) {
u32 cur_cluster = cpu_to_cluster(policy->cpu);
struct device *cpu_dev;
int ret;
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__, policy->cpu);
return -ENODEV;
}
ret = get_cluster_clk_and_freq_table(cpu_dev);
if (ret) return ret;
- ret = cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
+ ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
if (ret) {
dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n", policy->cpu, cur_cluster);
put_cluster_clk_and_freq_table(cpu_dev);
return ret;
}
- cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
if (arm_bL_ops->get_transition_latency)
policy->cpuinfo.transition_latency = arm_bL_ops->get_transition_latency(cpu_dev);
else
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
policy->cur = bL_cpufreq_get(policy->cpu);
cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
return 0;
}<sep>@@
expression policy,table;
@@
- cpufreq_frequency_table_cpuinfo
+ cpufreq_table_validate_and_show
(policy, table)
...
- cpufreq_frequency_table_get_attr(table, policy->cpu);
<|end_of_text|> | 7,175 |
--- initial
+++ final
@@ -1,10 +1,9 @@
static int __bfin_cpu_init(struct cpufreq_policy *policy) {
unsigned long cclk, sclk;
cclk = get_cclk() / 1000;
sclk = get_sclk() / 1000;
if (policy->cpu == CPUFREQ_CPU) bfin_init_tables(cclk, sclk);
policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
policy->cur = cclk;
- cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
- return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
+ return cpufreq_table_validate_and_show(policy, bfin_freq_table);
}<sep>@@
expression policy,table;
@@
- cpufreq_frequency_table_get_attr(table, policy->cpu);
...
- cpufreq_frequency_table_cpuinfo(policy, table)
+ cpufreq_table_validate_and_show(policy, table)
<|end_of_text|> | 7,176 |
--- initial
+++ final
@@ -1,18 +1,17 @@
static int cpu0_cpufreq_init(struct cpufreq_policy *policy) {
int ret;
- ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ ret = cpufreq_table_validate_and_show(policy, freq_table);
if (ret) {
pr_err("invalid frequency table: %d\n", ret);
return ret;
}
policy->cpuinfo.transition_latency = transition_latency;
policy->cur = clk_get_rate(cpu_clk) / 1000;
/*
* The driver only supports the SMP configuartion where all processors
* share the clock and voltage and clock. Use cpufreq affected_cpus
* interface to have all CPUs scaled together.
*/
cpumask_setall(policy->cpus);
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
return 0;
}<sep>@@
expression policy,table;
@@
- cpufreq_frequency_table_cpuinfo
+ cpufreq_table_validate_and_show
(policy, table)
...
- cpufreq_frequency_table_get_attr(table, policy->cpu);
<|end_of_text|> | 7,177 |
--- initial
+++ final
@@ -1,161 +1,160 @@
static int eps_cpu_init(struct cpufreq_policy *policy) {
unsigned int i;
u32 lo, hi;
u64 val;
u8 current_multiplier, current_voltage;
u8 max_multiplier, max_voltage;
u8 min_multiplier, min_voltage;
u8 brand = 0;
u32 fsb;
struct eps_cpu_data *centaur;
struct cpuinfo_x86 *c = &cpu_data(0);
struct cpufreq_frequency_table *f_table;
int k, step, voltage;
int ret;
int states;
#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
unsigned int limit;
#endif
if (policy->cpu != 0) return -ENODEV;
/* Check brand */
printk(KERN_INFO "eps: Detected VIA ");
switch (c->x86_model) {
case 10:
rdmsr(0x1153, lo, hi);
brand = (((lo >> 2) ^ lo) >> 18) & 3;
printk(KERN_CONT "Model A ");
break;
case 13:
rdmsr(0x1154, lo, hi);
brand = (((lo >> 4) ^ (lo >> 2))) & 0x000000ff;
printk(KERN_CONT "Model D ");
break;
}
switch (brand) {
case EPS_BRAND_C7M: printk(KERN_CONT "C7-M\n"); break;
case EPS_BRAND_C7: printk(KERN_CONT "C7\n"); break;
case EPS_BRAND_EDEN: printk(KERN_CONT "Eden\n"); break;
case EPS_BRAND_C7D: printk(KERN_CONT "C7-D\n"); break;
case EPS_BRAND_C3:
printk(KERN_CONT "C3\n");
return -ENODEV;
break;
}
/* Enable Enhanced PowerSaver */
rdmsrl(MSR_IA32_MISC_ENABLE, val);
if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
wrmsrl(MSR_IA32_MISC_ENABLE, val);
/* Can be locked at 0 */
rdmsrl(MSR_IA32_MISC_ENABLE, val);
if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n");
return -ENODEV;
}
}
/* Print voltage and multiplier */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
current_voltage = lo & 0xff;
printk(KERN_INFO "eps: Current voltage = %dmV\n", current_voltage * 16 + 700);
current_multiplier = (lo >> 8) & 0xff;
printk(KERN_INFO "eps: Current multiplier = %d\n", current_multiplier);
/* Print limits */
max_voltage = hi & 0xff;
printk(KERN_INFO "eps: Highest voltage = %dmV\n", max_voltage * 16 + 700);
max_multiplier = (hi >> 8) & 0xff;
printk(KERN_INFO "eps: Highest multiplier = %d\n", max_multiplier);
min_voltage = (hi >> 16) & 0xff;
printk(KERN_INFO "eps: Lowest voltage = %dmV\n", min_voltage * 16 + 700);
min_multiplier = (hi >> 24) & 0xff;
printk(KERN_INFO "eps: Lowest multiplier = %d\n", min_multiplier);
/* Sanity checks */
if (current_multiplier == 0 || max_multiplier == 0 || min_multiplier == 0) return -EINVAL;
if (current_multiplier > max_multiplier || max_multiplier <= min_multiplier) return -EINVAL;
if (current_voltage > 0x1f || max_voltage > 0x1f) return -EINVAL;
if (max_voltage < min_voltage || current_voltage < min_voltage || current_voltage > max_voltage) return -EINVAL;
/* Check for systems using underclocked CPU */
if (!freq_failsafe_off && max_multiplier != current_multiplier) {
printk(KERN_INFO "eps: Your processor is running at different "
"frequency then its maximum. Aborting.\n");
printk(KERN_INFO "eps: You can use freq_failsafe_off option "
"to disable this check.\n");
return -EINVAL;
}
if (!voltage_failsafe_off && max_voltage != current_voltage) {
printk(KERN_INFO "eps: Your processor is running at different "
"voltage then its maximum. Aborting.\n");
printk(KERN_INFO "eps: You can use voltage_failsafe_off "
"option to disable this check.\n");
return -EINVAL;
}
/* Calc FSB speed */
fsb = cpu_khz / current_multiplier;
#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
/* Check for ACPI processor speed limit */
if (!ignore_acpi_limit && !eps_acpi_init()) {
if (!acpi_processor_get_bios_limit(policy->cpu, &limit)) {
printk(KERN_INFO "eps: ACPI limit %u.%uGHz\n", limit / 1000000, (limit % 1000000) / 10000);
eps_acpi_exit(policy);
/* Check if max_multiplier is in BIOS limits */
if (limit && max_multiplier * fsb > limit) {
printk(KERN_INFO "eps: Aborting.\n");
return -EINVAL;
}
}
}
#endif
/* Allow user to set lower maximum voltage then that reported
* by processor */
if (brand == EPS_BRAND_C7M && set_max_voltage) {
u32 v;
/* Change mV to something hardware can use */
v = (set_max_voltage - 700) / 16;
/* Check if voltage is within limits */
if (v >= min_voltage && v <= max_voltage) {
printk(KERN_INFO "eps: Setting %dmV as maximum.\n", v * 16 + 700);
max_voltage = v;
}
}
/* Calc number of p-states supported */
if (brand == EPS_BRAND_C7M)
states = max_multiplier - min_multiplier + 1;
else
states = 2;
/* Allocate private data and frequency table for current cpu */
centaur = kzalloc(sizeof(*centaur) + (states + 1) * sizeof(struct cpufreq_frequency_table), GFP_KERNEL);
if (!centaur) return -ENOMEM;
eps_cpu[0] = centaur;
/* Copy basic values */
centaur->fsb = fsb;
#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
centaur->bios_limit = limit;
#endif
/* Fill frequency and MSR value table */
f_table = ¢aur->freq_table[0];
if (brand != EPS_BRAND_C7M) {
f_table[0].frequency = fsb * min_multiplier;
f_table[0].driver_data = (min_multiplier << 8) | min_voltage;
f_table[1].frequency = fsb * max_multiplier;
f_table[1].driver_data = (max_multiplier << 8) | max_voltage;
f_table[2].frequency = CPUFREQ_TABLE_END;
} else {
k = 0;
step = ((max_voltage - min_voltage) * 256) / (max_multiplier - min_multiplier);
for (i = min_multiplier; i <= max_multiplier; i++) {
voltage = (k * step) / 256 + min_voltage;
f_table[k].frequency = fsb * i;
f_table[k].driver_data = (i << 8) | voltage;
k++;
}
f_table[k].frequency = CPUFREQ_TABLE_END;
}
policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */
policy->cur = fsb * current_multiplier;
- ret = cpufreq_frequency_table_cpuinfo(policy, ¢aur->freq_table[0]);
+ ret = cpufreq_table_validate_and_show(policy, ¢aur->freq_table[0]);
if (ret) {
kfree(centaur);
return ret;
}
- cpufreq_frequency_table_get_attr(¢aur->freq_table[0], policy->cpu);
return 0;
}<sep>@@
expression policy,table;
@@
- cpufreq_frequency_table_cpuinfo
+ cpufreq_table_validate_and_show
(policy, table)
...
- cpufreq_frequency_table_get_attr(table, policy->cpu);
<|end_of_text|> | 7,178 |
--- initial
+++ final
@@ -1,8 +1,7 @@
static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) {
policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu);
- cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu);
/* set the transition latency value */
policy->cpuinfo.transition_latency = 100000;
cpumask_setall(policy->cpus);
- return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table);
+ return cpufreq_table_validate_and_show(policy, exynos_info->freq_table);
}<sep>@@
expression policy,table;
@@
- cpufreq_frequency_table_get_attr(table, policy->cpu);
...
- cpufreq_frequency_table_cpuinfo(policy, table)
+ cpufreq_table_validate_and_show(policy, table)
<|end_of_text|> | 7,179 |
--- initial
+++ final
@@ -1,13 +1,12 @@
static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) {
int ret;
- ret = cpufreq_frequency_table_cpuinfo(policy, dvfs_info->freq_table);
+ ret = cpufreq_table_validate_and_show(policy, dvfs_info->freq_table);
if (ret) {
dev_err(dvfs_info->dev, "Invalid frequency table: %d\n", ret);
return ret;
}
policy->cur = dvfs_info->cur_frequency;
policy->cpuinfo.transition_latency = dvfs_info->latency;
cpumask_setall(policy->cpus);
- cpufreq_frequency_table_get_attr(dvfs_info->freq_table, policy->cpu);
return 0;
}<sep>@@
expression policy,table;
@@
- cpufreq_frequency_table_cpuinfo
+ cpufreq_table_validate_and_show
(policy, table)
...
- cpufreq_frequency_table_get_attr(table, policy->cpu);
<|end_of_text|> | 7,180 |
--- initial
+++ final
@@ -1,66 +1,65 @@
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) {
unsigned int i;
unsigned int cpu = policy->cpu;
struct cpufreq_acpi_io *data;
unsigned int result = 0;
pr_debug("acpi_cpufreq_cpu_init\n");
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) return (-ENOMEM);
acpi_io_data[cpu] = data;
result = acpi_processor_register_performance(&data->acpi_data, cpu);
if (result) goto err_free;
/* capability check */
if (data->acpi_data.state_count <= 1) {
pr_debug("No P-States\n");
result = -ENODEV;
goto err_unreg;
}
if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
pr_debug("Unsupported address space [%d, %d]\n", (u32)(data->acpi_data.control_register.space_id), (u32)(data->acpi_data.status_register.space_id));
result = -ENODEV;
goto err_unreg;
}
/* alloc freq_table */
data->freq_table = kmalloc(sizeof(*data->freq_table) * (data->acpi_data.state_count + 1), GFP_KERNEL);
if (!data->freq_table) {
result = -ENOMEM;
goto err_unreg;
}
/* detect transition latency */
policy->cpuinfo.transition_latency = 0;
for (i = 0; i < data->acpi_data.state_count; i++) {
if ((data->acpi_data.states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency) { policy->cpuinfo.transition_latency = data->acpi_data.states[i].transition_latency * 1000; }
}
policy->cur = processor_get_freq(data, policy->cpu);
/* table init */
for (i = 0; i <= data->acpi_data.state_count; i++) {
data->freq_table[i].driver_data = i;
if (i < data->acpi_data.state_count) {
data->freq_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000;
} else {
data->freq_table[i].frequency = CPUFREQ_TABLE_END;
}
}
- result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+ result = cpufreq_table_validate_and_show(policy, data->freq_table);
if (result) { goto err_freqfree; }
/* notify BIOS that we exist */
acpi_processor_notify_smm(THIS_MODULE);
printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management "
"activated.\n",
cpu);
for (i = 0; i < data->acpi_data.state_count; i++)
pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", (i == data->acpi_data.state ? '*' : ' '), i, (u32)data->acpi_data.states[i].core_frequency, (u32)data->acpi_data.states[i].power, (u32)data->acpi_data.states[i].transition_latency, (u32)data->acpi_data.states[i].bus_master_latency, (u32)data->acpi_data.states[i].status, (u32)data->acpi_data.states[i].control);
- cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
/* the first call to ->target() should result in us actually
* writing something to the appropriate registers. */
data->resume = 1;
return (result);
err_freqfree:
kfree(data->freq_table);
err_unreg:
acpi_processor_unregister_performance(&data->acpi_data, cpu);
err_free:
kfree(data);
acpi_io_data[cpu] = NULL;
return (result);
}<sep>@@
expression policy,table;
@@
- cpufreq_frequency_table_cpuinfo
+ cpufreq_table_validate_and_show
(policy, table)
...
- cpufreq_frequency_table_get_attr(table, policy->cpu);
<|end_of_text|> | 7,181 |
--- initial
+++ final
@@ -1,13 +1,12 @@
static int imx6q_cpufreq_init(struct cpufreq_policy *policy) {
int ret;
- ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ ret = cpufreq_table_validate_and_show(policy, freq_table);
if (ret) {
dev_err(cpu_dev, "invalid frequency table: %d\n", ret);
return ret;
}
policy->cpuinfo.transition_latency = transition_latency;
policy->cur = clk_get_rate(arm_clk) / 1000;
cpumask_setall(policy->cpus);
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
return 0;
}<sep>@@
expression policy,table;
@@
- cpufreq_frequency_table_cpuinfo
+ cpufreq_table_validate_and_show
(policy, table)
...
- cpufreq_frequency_table_get_attr(table, policy->cpu);
<|end_of_text|> | 7,182 |
--- initial
+++ final
@@ -1,26 +1,25 @@
static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) {
int i;
unsigned long rate;
int ret;
cpuclk = clk_get(NULL, "cpu_clk");
if (IS_ERR(cpuclk)) {
printk(KERN_ERR "cpufreq: couldn't get CPU clk\n");
return PTR_ERR(cpuclk);
}
rate = cpu_clock_freq / 1000;
if (!rate) {
clk_put(cpuclk);
return -EINVAL;
}
/* clock table init */
for (i = 2; (loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++)
loongson2_clockmod_table[i].frequency = (rate * i) / 8;
ret = clk_set_rate(cpuclk, rate);
if (ret) {
clk_put(cpuclk);
return ret;
}
policy->cur = loongson2_cpufreq_get(policy->cpu);
- cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0], policy->cpu);
- return cpufreq_frequency_table_cpuinfo(policy, &loongson2_clockmod_table[0]);
+ return cpufreq_table_validate_and_show(policy, &loongson2_clockmod_table[0]);
}<sep>@@
expression policy,table;
@@
- cpufreq_frequency_table_get_attr(table, policy->cpu);
...
- cpufreq_frequency_table_cpuinfo(policy, table)
+ cpufreq_table_validate_and_show(policy, table)
<|end_of_text|> | 7,183 |
--- initial
+++ final
@@ -1,10 +1,9 @@
static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy) {
policy->cpuinfo.transition_latency = 12000;
policy->cur = maple_cpu_freqs[maple_scom_query_freq()].frequency;
/* secondary CPUs are tied to the primary one by the
* cpufreq core if in the secondary policy we tell it that
* it actually must be one policy together with all others. */
cpumask_setall(policy->cpus);
- cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu);
- return cpufreq_frequency_table_cpuinfo(policy, maple_cpu_freqs);
+ return cpufreq_table_validate_and_show(policy, maple_cpu_freqs);
}<sep>@@
expression policy,table;
@@
- cpufreq_frequency_table_get_attr(table, policy->cpu);
...
- cpufreq_frequency_table_cpuinfo(policy, table)
+ cpufreq_table_validate_and_show(policy, table)
<|end_of_text|> | 7,184 |
--- initial
+++ final
@@ -1,36 +1,35 @@
static int omap_cpu_init(struct cpufreq_policy *policy) {
int result = 0;
mpu_clk = clk_get(NULL, "cpufreq_ck");
if (IS_ERR(mpu_clk)) return PTR_ERR(mpu_clk);
if (policy->cpu >= NR_CPUS) {
result = -EINVAL;
goto fail_ck;
}
policy->cur = omap_getspeed(policy->cpu);
if (!freq_table) result = opp_init_cpufreq_table(mpu_dev, &freq_table);
if (result) {
dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n", __func__, policy->cpu, result);
goto fail_ck;
}
atomic_inc_return(&freq_table_users);
- result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ result = cpufreq_table_validate_and_show(policy, freq_table);
if (result) goto fail_table;
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
policy->cur = omap_getspeed(policy->cpu);
/*
* On OMAP SMP configuartion, both processors share the voltage
* and clock. So both CPUs needs to be scaled together and hence
* needs software co-ordination. Use cpufreq affected_cpus
* interface to handle this scenario. Additional is_smp() check
* is to keep SMP_ON_UP build working.
*/
if (is_smp()) cpumask_setall(policy->cpus);
/* FIXME: what's the actual transition time? */
policy->cpuinfo.transition_latency = 300 * 1000;
return 0;
fail_table:
freq_table_free();
fail_ck:
clk_put(mpu_clk);
return result;
}<sep>@@
expression policy,table;
@@
- cpufreq_frequency_table_cpuinfo
+ cpufreq_table_validate_and_show
(policy, table)
...
- cpufreq_frequency_table_get_attr(table, policy->cpu);
<|end_of_text|> | 7,185 |
--- initial
+++ final
@@ -1,38 +1,37 @@
static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) {
struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
int cpuid = 0;
unsigned int i;
#ifdef CONFIG_SMP
cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
#endif
/* Errata workaround */
cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
switch (cpuid) {
case 0x0f07:
case 0x0f0a:
case 0x0f11:
case 0x0f12: has_N44_O17_errata[policy->cpu] = 1; pr_debug("has errata -- disabling low frequencies\n");
}
if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D && c->x86_model < 2) {
/* switch to maximum frequency and measure result */
cpufreq_p4_setdc(policy->cpu, DC_DISABLE);
recalibrate_cpu_khz();
}
/* get max frequency */
stock_freq = cpufreq_p4_get_frequency(c);
if (!stock_freq) return -EINVAL;
/* table init */
for (i = 1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
if ((i < 2) && (has_N44_O17_errata[policy->cpu]))
p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
else
p4clockmod_table[i].frequency = (stock_freq * i) / 8;
}
- cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu);
/* cpuinfo and default policy values */
/* the transition latency is set to be 1 higher than the maximum
* transition latency of the ondemand governor */
policy->cpuinfo.transition_latency = 10000001;
policy->cur = stock_freq;
- return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]);
+ return cpufreq_table_validate_and_show(policy, &p4clockmod_table[0]);
}<sep>@@
expression policy,table;
@@
- cpufreq_frequency_table_get_attr(table, policy->cpu);
...
- cpufreq_frequency_table_cpuinfo
+ cpufreq_table_validate_and_show
(policy, &table[0])
<|end_of_text|> | 7,186 |
--- initial
+++ final
@@ -1,67 +1,66 @@
static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) {
const u32 *max_freqp;
u32 max_freq;
int i, cur_astate;
struct resource res;
struct device_node *cpu, *dn;
int err = -ENODEV;
cpu = of_get_cpu_node(policy->cpu, NULL);
if (!cpu) goto out;
dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
if (!dn) dn = of_find_compatible_node(NULL, NULL, "pasemi,pwrficient-sdc");
if (!dn) goto out;
err = of_address_to_resource(dn, 0, &res);
of_node_put(dn);
if (err) goto out;
sdcasr_mapbase = ioremap(res.start + SDCASR_OFFSET, 0x2000);
if (!sdcasr_mapbase) {
err = -EINVAL;
goto out;
}
dn = of_find_compatible_node(NULL, NULL, "1682m-gizmo");
if (!dn) dn = of_find_compatible_node(NULL, NULL, "pasemi,pwrficient-gizmo");
if (!dn) {
err = -ENODEV;
goto out_unmap_sdcasr;
}
err = of_address_to_resource(dn, 0, &res);
of_node_put(dn);
if (err) goto out_unmap_sdcasr;
sdcpwr_mapbase = ioremap(res.start, 0x1000);
if (!sdcpwr_mapbase) {
err = -EINVAL;
goto out_unmap_sdcasr;
}
pr_debug("init cpufreq on CPU %d\n", policy->cpu);
max_freqp = of_get_property(cpu, "clock-frequency", NULL);
if (!max_freqp) {
err = -EINVAL;
goto out_unmap_sdcpwr;
}
/* we need the freq in kHz */
max_freq = *max_freqp / 1000;
pr_debug("max clock-frequency is at %u kHz\n", max_freq);
pr_debug("initializing frequency table\n");
/* initialize frequency table */
for (i = 0; pas_freqs[i].frequency != CPUFREQ_TABLE_END; i++) {
pas_freqs[i].frequency = get_astate_freq(pas_freqs[i].driver_data) * 100000;
pr_debug("%d: %d\n", i, pas_freqs[i].frequency);
}
policy->cpuinfo.transition_latency = get_gizmo_latency();
cur_astate = get_cur_astate(policy->cpu);
pr_debug("current astate is at %d\n", cur_astate);
policy->cur = pas_freqs[cur_astate].frequency;
cpumask_copy(policy->cpus, cpu_online_mask);
ppc_proc_freq = policy->cur * 1000ul;
- cpufreq_frequency_table_get_attr(pas_freqs, policy->cpu);
/* this ensures that policy->cpuinfo_min and policy->cpuinfo_max
* are set correctly
*/
- return cpufreq_frequency_table_cpuinfo(policy, pas_freqs);
+ return cpufreq_table_validate_and_show(policy, pas_freqs);
out_unmap_sdcpwr:
iounmap(sdcpwr_mapbase);
out_unmap_sdcasr:
iounmap(sdcasr_mapbase);
out:
return err;
}<sep>@@
expression policy,table;
@@
- cpufreq_frequency_table_get_attr(table, policy->cpu);
...
- cpufreq_frequency_table_cpuinfo(policy, table)
+ cpufreq_table_validate_and_show(policy, table)
<|end_of_text|> | 7,187 |
--- initial
+++ final
@@ -1,7 +1,6 @@
static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) {
if (policy->cpu != 0) return -ENODEV;
policy->cpuinfo.transition_latency = transition_latency;
policy->cur = cur_freq;
- cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu);
- return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
+ return cpufreq_table_validate_and_show(policy, pmac_cpu_freqs);
}<sep>@@
expression policy,table;
@@
- cpufreq_frequency_table_get_attr(table, policy->cpu);
...
- cpufreq_frequency_table_cpuinfo(policy, table)
+ cpufreq_table_validate_and_show(policy, table)
<|end_of_text|> | 7,188 |
--- initial
+++ final
@@ -1,10 +1,9 @@
static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy) {
policy->cpuinfo.transition_latency = transition_latency;
policy->cur = g5_cpu_freqs[g5_query_freq()].frequency;
/* secondary CPUs are tied to the primary one by the
* cpufreq core if in the secondary policy we tell it that
* it actually must be one policy together with all others. */
cpumask_copy(policy->cpus, cpu_online_mask);
- cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
- return cpufreq_frequency_table_cpuinfo(policy, g5_cpu_freqs);
+ return cpufreq_table_validate_and_show(policy, g5_cpu_freqs);
}<sep>@@
expression policy,table;
@@
- cpufreq_frequency_table_get_attr(table, policy->cpu);
...
- cpufreq_frequency_table_cpuinfo(policy, table)
+ cpufreq_table_validate_and_show(policy, table)
<|end_of_text|> | 7,189 |
--- initial
+++ final
@@ -1,78 +1,77 @@
static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) {
struct device_node *np;
int i, count, ret;
u32 freq, mask;
struct clk *clk;
struct cpufreq_frequency_table *table;
struct cpu_data *data;
unsigned int cpu = policy->cpu;
np = of_get_cpu_node(cpu, NULL);
if (!np) return -ENODEV;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
pr_err("%s: no memory\n", __func__);
goto err_np;
}
data->clk = of_clk_get(np, 0);
if (IS_ERR(data->clk)) {
pr_err("%s: no clock information\n", __func__);
goto err_nomem2;
}
data->parent = of_parse_phandle(np, "clocks", 0);
if (!data->parent) {
pr_err("%s: could not get clock information\n", __func__);
goto err_nomem2;
}
count = of_property_count_strings(data->parent, "clock-names");
table = kcalloc(count + 1, sizeof(*table), GFP_KERNEL);
if (!table) {
pr_err("%s: no memory\n", __func__);
goto err_node;
}
if (fmask)
mask = fmask[get_hard_smp_processor_id(cpu)];
else
mask = 0x0;
for (i = 0; i < count; i++) {
clk = of_clk_get(data->parent, i);
freq = clk_get_rate(clk);
/*
* the clock is valid if its frequency is not masked
* and large than minimum allowed frequency.
*/
if (freq < min_cpufreq || (mask & (1 << i)))
table[i].frequency = CPUFREQ_ENTRY_INVALID;
else
table[i].frequency = freq / 1000;
table[i].driver_data = i;
}
freq_table_redup(table, count);
freq_table_sort(table, count);
table[i].frequency = CPUFREQ_TABLE_END;
/* set the min and max frequency properly */
- ret = cpufreq_frequency_table_cpuinfo(policy, table);
+ ret = cpufreq_table_validate_and_show(policy, table);
if (ret) {
pr_err("invalid frequency table: %d\n", ret);
goto err_nomem1;
}
data->table = table;
per_cpu(cpu_data, cpu) = data;
/* update ->cpus if we have cluster, no harm if not */
cpumask_copy(policy->cpus, per_cpu(cpu_mask, cpu));
for_each_cpu(i, per_cpu(cpu_mask, cpu)) per_cpu(cpu_data, i) = data;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
policy->cur = corenet_cpufreq_get_speed(policy->cpu);
- cpufreq_frequency_table_get_attr(table, cpu);
of_node_put(np);
return 0;
err_nomem1:
kfree(table);
err_node:
of_node_put(data->parent);
err_nomem2:
per_cpu(cpu_data, cpu) = NULL;
kfree(data);
err_np:
of_node_put(np);
return -ENODEV;
}<sep>@@
expression policy,table,e1,e2;
identifier c;
@@
unsigned int c = policy->cpu;
... when != c = e1
- cpufreq_frequency_table_cpuinfo
+ cpufreq_table_validate_and_show
(policy, table)
... when != c = e2
- cpufreq_frequency_table_get_attr(table, c);
<|end_of_text|> | 7,190 |
--- initial
+++ final
@@ -1,41 +1,40 @@
static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) {
const u32 *max_freqp;
u32 max_freq;
int i, cur_pmode;
struct device_node *cpu;
cpu = of_get_cpu_node(policy->cpu, NULL);
if (!cpu) return -ENODEV;
pr_debug("init cpufreq on CPU %d\n", policy->cpu);
/*
* Let's check we can actually get to the CELL regs
*/
if (!cbe_get_cpu_pmd_regs(policy->cpu) || !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
pr_info("invalid CBE regs pointers for cpufreq\n");
return -EINVAL;
}
max_freqp = of_get_property(cpu, "clock-frequency", NULL);
of_node_put(cpu);
if (!max_freqp) return -EINVAL;
/* we need the freq in kHz */
max_freq = *max_freqp / 1000;
pr_debug("max clock-frequency is at %u kHz\n", max_freq);
pr_debug("initializing frequency table\n");
/* initialize frequency table */
for (i = 0; cbe_freqs[i].frequency != CPUFREQ_TABLE_END; i++) {
cbe_freqs[i].frequency = max_freq / cbe_freqs[i].driver_data;
pr_debug("%d: %d\n", i, cbe_freqs[i].frequency);
}
/* if DEBUG is enabled set_pmode() measures the latency
* of a transition */
policy->cpuinfo.transition_latency = 25000;
cur_pmode = cbe_cpufreq_get_pmode(policy->cpu);
pr_debug("current pmode is at %d\n", cur_pmode);
policy->cur = cbe_freqs[cur_pmode].frequency;
#ifdef CONFIG_SMP
cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
#endif
- cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
/* this ensures that policy->cpuinfo_min
* and policy->cpuinfo_max are set correctly */
- return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
+ return cpufreq_table_validate_and_show(policy, cbe_freqs);
}<sep>@@
expression policy,table;
@@
- cpufreq_frequency_table_get_attr(table, policy->cpu);
...
- cpufreq_frequency_table_cpuinfo(policy, table)
+ cpufreq_table_validate_and_show(policy, table)
<|end_of_text|> | 7,191 |
--- initial
+++ final
@@ -1,44 +1,43 @@
static int __init s5pv210_cpu_init(struct cpufreq_policy *policy) {
unsigned long mem_type;
int ret;
cpu_clk = clk_get(NULL, "armclk");
if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk);
dmc0_clk = clk_get(NULL, "sclk_dmc0");
if (IS_ERR(dmc0_clk)) {
ret = PTR_ERR(dmc0_clk);
goto out_dmc0;
}
dmc1_clk = clk_get(NULL, "hclk_msys");
if (IS_ERR(dmc1_clk)) {
ret = PTR_ERR(dmc1_clk);
goto out_dmc1;
}
if (policy->cpu != 0) {
ret = -EINVAL;
goto out_dmc1;
}
/*
* check_mem_type : This driver only support LPDDR & LPDDR2.
* other memory type is not supported.
*/
mem_type = check_mem_type(S5P_VA_DMC0);
if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
printk(KERN_ERR "CPUFreq doesn't support this memory type\n");
ret = -EINVAL;
goto out_dmc1;
}
/* Find current refresh counter and frequency each DMC */
s5pv210_dram_conf[0].refresh = (__raw_readl(S5P_VA_DMC0 + 0x30) * 1000);
s5pv210_dram_conf[0].freq = clk_get_rate(dmc0_clk);
s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
policy->cur = policy->min = policy->max = s5pv210_getspeed(0);
- cpufreq_frequency_table_get_attr(s5pv210_freq_table, policy->cpu);
policy->cpuinfo.transition_latency = 40000;
- return cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table);
+ return cpufreq_table_validate_and_show(policy, s5pv210_freq_table);
out_dmc1:
clk_put(dmc0_clk);
out_dmc0:
clk_put(cpu_clk);
return ret;
}<sep>@@
expression policy,table;
@@
- cpufreq_frequency_table_get_attr(table, policy->cpu);
...
- cpufreq_frequency_table_cpuinfo(policy, table)
+ cpufreq_table_validate_and_show(policy, table)
<|end_of_text|> | 7,192 |
--- initial
+++ final
@@ -1,14 +1,13 @@
static int tegra_cpu_init(struct cpufreq_policy *policy) {
if (policy->cpu >= NUM_CPUS) return -EINVAL;
clk_prepare_enable(emc_clk);
clk_prepare_enable(cpu_clk);
- cpufreq_frequency_table_cpuinfo(policy, freq_table);
- cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
+ cpufreq_table_validate_and_show(policy, freq_table);
policy->cur = tegra_getspeed(policy->cpu);
target_cpu_speed[policy->cpu] = policy->cur;
/* FIXME: what's the actual transition time? */
policy->cpuinfo.transition_latency = 300 * 1000;
cpumask_copy(policy->cpus, cpu_possible_mask);
if (policy->cpu == 0) register_pm_notifier(&tegra_cpu_pm_notifier);
return 0;
}<sep>@@
expression policy,table;
@@
- cpufreq_frequency_table_cpuinfo
+ cpufreq_table_validate_and_show
(policy, table)
...
- cpufreq_frequency_table_get_attr(table, policy->cpu);
<|end_of_text|> | 7,193 |
--- initial
+++ final
@@ -1,47 +1,47 @@
static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy, unsigned int freq_next, unsigned int relation) {
int sensitivity;
long d_actual, d_reference;
struct msr actual, reference;
struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu);
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct dbs_data *od_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = od_data->tuners;
if (!policy->freq_table) return freq_next;
rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &actual.l, &actual.h);
rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_REFERENCE, &reference.l, &reference.h);
actual.h &= 0x00ffffff;
reference.h &= 0x00ffffff;
/* counter wrapped around, so stay on current frequency */
if (actual.q < data->actual || reference.q < data->reference) {
freq_next = policy->cur;
goto out;
}
d_actual = actual.q - data->actual;
d_reference = reference.q - data->reference;
/* divide by 0, so stay on current frequency as well */
if (d_reference == 0) {
freq_next = policy->cur;
goto out;
}
sensitivity = POWERSAVE_BIAS_MAX - (POWERSAVE_BIAS_MAX * (d_reference - d_actual) / d_reference);
clamp(sensitivity, 0, POWERSAVE_BIAS_MAX);
/* this workload is not CPU bound, so choose a lower freq */
if (sensitivity < od_tuners->powersave_bias) {
if (data->freq_prev == policy->cur) freq_next = policy->cur;
if (freq_next > policy->cur)
freq_next = policy->cur;
else if (freq_next < policy->cur)
freq_next = policy->min;
else {
unsigned int index;
- index = cpufreq_frequency_table_target(policy, policy->cur - 1, CPUFREQ_RELATION_H);
+ index = cpufreq_table_find_index_h(policy, policy->cur - 1);
freq_next = policy->freq_table[index].frequency;
}
data->freq_prev = freq_next;
} else
data->freq_prev = 0;
out:
data->actual = actual.q;
data->reference = reference.q;
return freq_next;
}<sep>@@
expression policy, old_freq;
@@
- cpufreq_frequency_table_target(policy, old_freq,
- CPUFREQ_RELATION_H)
+ cpufreq_table_find_index_h(policy, old_freq)
<|end_of_text|> | 7,194 |
--- initial
+++ final
@@ -1,38 +1,38 @@
static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy, unsigned int freq_next, unsigned int relation) {
unsigned int freq_req, freq_reduc, freq_avg;
unsigned int freq_hi, freq_lo;
unsigned int index;
unsigned int delay_hi_us;
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
struct cpufreq_frequency_table *freq_table = policy->freq_table;
if (!freq_table) {
dbs_info->freq_lo = 0;
dbs_info->freq_lo_delay_us = 0;
return freq_next;
}
index = cpufreq_frequency_table_target(policy, freq_next, relation);
freq_req = freq_table[index].frequency;
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
freq_avg = freq_req - freq_reduc;
/* Find freq bounds for freq_avg in freq_table */
- index = cpufreq_frequency_table_target(policy, freq_avg, CPUFREQ_RELATION_H);
+ index = cpufreq_table_find_index_h(policy, freq_avg);
freq_lo = freq_table[index].frequency;
- index = cpufreq_frequency_table_target(policy, freq_avg, CPUFREQ_RELATION_L);
+ index = cpufreq_table_find_index_l(policy, freq_avg);
freq_hi = freq_table[index].frequency;
/* Find out how long we have to be in hi and lo freqs */
if (freq_hi == freq_lo) {
dbs_info->freq_lo = 0;
dbs_info->freq_lo_delay_us = 0;
return freq_lo;
}
delay_hi_us = (freq_avg - freq_lo) * dbs_data->sampling_rate;
delay_hi_us += (freq_hi - freq_lo) / 2;
delay_hi_us /= freq_hi - freq_lo;
dbs_info->freq_hi_delay_us = delay_hi_us;
dbs_info->freq_lo = freq_lo;
dbs_info->freq_lo_delay_us = dbs_data->sampling_rate - delay_hi_us;
return freq_hi;
}<sep>@@
expression policy, old_freq;
@@
- cpufreq_frequency_table_target(policy, old_freq,
- CPUFREQ_RELATION_H)
+ cpufreq_table_find_index_h(policy, old_freq)
@@
expression policy, old_freq;
@@
- cpufreq_frequency_table_target(policy, old_freq,
- CPUFREQ_RELATION_L)
+ cpufreq_table_find_index_l(policy, old_freq)
<|end_of_text|> | 7,195 |
--- initial
+++ final
@@ -1,20 +1,20 @@
void powernv_cpufreq_work_fn(struct work_struct *work) {
struct chip *chip = container_of(work, struct chip, throttle);
unsigned int cpu;
cpumask_t mask;
get_online_cpus();
cpumask_and(&mask, &chip->mask, cpu_online_mask);
smp_call_function_any(&mask, powernv_cpufreq_throttle_check, NULL, 0);
if (!chip->restore) goto out;
chip->restore = false;
for_each_cpu(cpu, &mask) {
int index;
struct cpufreq_policy policy;
cpufreq_get_policy(&policy, cpu);
- index = cpufreq_frequency_table_target(&policy, policy.cur, CPUFREQ_RELATION_C);
+ index = cpufreq_table_find_index_c(&policy, policy.cur);
powernv_cpufreq_target_index(&policy, index);
cpumask_andnot(&mask, &mask, policy.cpus);
}
out:
put_online_cpus();
}<sep>@@
expression policy, old_freq;
@@
- cpufreq_frequency_table_target(policy, old_freq,
- CPUFREQ_RELATION_C)
+ cpufreq_table_find_index_c(policy, old_freq)
<|end_of_text|> | 7,196 |
--- initial
+++ final
@@ -1,22 +1,22 @@
static int charger_extcon_notifier(struct notifier_block *self, unsigned long event, void *ptr) {
struct charger_cable *cable = container_of(self, struct charger_cable, nb);
/*
* The newly state of charger cable.
* If cable is attached, cable->attached is true.
*/
cable->attached = event;
/*
* Setup monitoring to check battery state
* when charger cable is attached.
*/
if (cable->attached && is_polling_required(cable->cm)) {
- if (work_pending(&setup_polling)) cancel_work_sync(&setup_polling);
+ cancel_work_sync(&setup_polling);
schedule_work(&setup_polling);
}
/*
* Setup work for controlling charger(regulator)
* according to charger cable.
*/
schedule_work(&cable->wq);
return NOTIFY_DONE;
}<sep>@@
expression e;
@@
- if (work_pending(e))
cancel_work_sync(e);
<|end_of_text|> | 7,198 |
--- initial
+++ final
@@ -1,28 +1,28 @@
static int charger_manager_remove(struct platform_device *pdev) {
struct charger_manager *cm = platform_get_drvdata(pdev);
struct charger_desc *desc = cm->desc;
int i = 0;
int j = 0;
/* Remove from the list */
mutex_lock(&cm_list_mtx);
list_del(&cm->entry);
mutex_unlock(&cm_list_mtx);
- if (work_pending(&setup_polling)) cancel_work_sync(&setup_polling);
- if (delayed_work_pending(&cm_monitor_work)) cancel_delayed_work_sync(&cm_monitor_work);
+ cancel_work_sync(&setup_polling);
+ cancel_delayed_work_sync(&cm_monitor_work);
for (i = 0; i < desc->num_charger_regulators; i++) {
struct charger_regulator *charger = &desc->charger_regulators[i];
for (j = 0; j < charger->num_cables; j++) {
struct charger_cable *cable = &charger->cables[j];
extcon_unregister_interest(&cable->extcon_dev);
}
}
for (i = 0; i < desc->num_charger_regulators; i++)
regulator_put(desc->charger_regulators[i].consumer);
power_supply_unregister(&cm->charger_psy);
try_charger_enable(cm, false);
kfree(cm->charger_psy.properties);
kfree(cm->charger_stat);
kfree(cm->desc);
kfree(cm);
return 0;
}<sep>@@
expression e;
@@
- if (delayed_work_pending(e))
cancel_delayed_work_sync(e);
@@
expression e;
@@
- if (work_pending(e))
cancel_work_sync(e);
<|end_of_text|> | 7,199 |
--- initial
+++ final
@@ -1,27 +1,27 @@
static int cm_suspend_prepare(struct device *dev) {
struct charger_manager *cm = dev_get_drvdata(dev);
if (!cm_suspended) {
if (rtc_dev) {
struct rtc_time tmp;
unsigned long now;
rtc_read_alarm(rtc_dev, &rtc_wkalarm_save);
rtc_read_time(rtc_dev, &tmp);
if (rtc_wkalarm_save.enabled) {
rtc_tm_to_time(&rtc_wkalarm_save.time, &rtc_wkalarm_save_time);
rtc_tm_to_time(&tmp, &now);
if (now > rtc_wkalarm_save_time) rtc_wkalarm_save_time = 0;
} else {
rtc_wkalarm_save_time = 0;
}
}
cm_suspended = true;
}
- if (delayed_work_pending(&cm->fullbatt_vchk_work)) cancel_delayed_work(&cm->fullbatt_vchk_work);
+ cancel_delayed_work(&cm->fullbatt_vchk_work);
cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm);
cm->status_save_batt = is_batt_present(cm);
if (!cm_rtc_set) {
cm_suspend_duration_ms = 0;
cm_rtc_set = cm_setup_timer();
}
return 0;
}<sep>@@
expression e;
@@
- if (delayed_work_pending(e))
cancel_delayed_work(e);
<|end_of_text|> | 7,200 |
--- initial
+++ final
@@ -1,23 +1,23 @@
static int ab8500_charger_resume(struct platform_device *pdev) {
int ret;
struct ab8500_charger *di = platform_get_drvdata(pdev);
/*
* For ABB revision 1.0 and 1.1 there is a bug in the watchdog
* logic. That means we have to continously kick the charger
* watchdog even when no charger is connected. This is only
* valid once the AC charger has been enabled. This is
* a bug that is not handled by the algorithm and the
* watchdog have to be kicked by the charger driver
* when the AC charger is disabled
*/
if (di->ac_conn && is_ab8500_1p1_or_earlier(di->parent)) {
ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER, AB8500_CHARG_WD_CTRL, CHARG_WD_KICK);
if (ret) dev_err(di->dev, "Failed to kick WD!\n");
/* If not already pending start a new timer */
- if (!delayed_work_pending(&di->kick_wd_work)) { queue_delayed_work(di->charger_wq, &di->kick_wd_work, round_jiffies(WD_KICK_INTERVAL)); }
+ queue_delayed_work(di->charger_wq, &di->kick_wd_work, round_jiffies(WD_KICK_INTERVAL));
}
/* If we still have a HW failure, schedule a new check */
if (di->flags.mainextchnotok || di->flags.vbus_ovv) { queue_delayed_work(di->charger_wq, &di->check_hw_failure_work, 0); }
if (di->flags.vbus_drop_end) queue_delayed_work(di->charger_wq, &di->vbus_drop_end_work, 0);
return 0;
}<sep>@@
expression e1,e2,e3;
@@
- if (!delayed_work_pending(e2))
- {
... queue_delayed_work(e1,e2,e3); ...
- }
<|end_of_text|> | 7,201 |
--- initial
+++ final
@@ -1,17 +1,17 @@
static int ab8500_charger_suspend(struct platform_device *pdev, pm_message_t state) {
struct ab8500_charger *di = platform_get_drvdata(pdev);
/* Cancel any pending HW failure check */
- if (delayed_work_pending(&di->check_hw_failure_work)) cancel_delayed_work(&di->check_hw_failure_work);
- if (delayed_work_pending(&di->vbus_drop_end_work)) cancel_delayed_work(&di->vbus_drop_end_work);
+ cancel_delayed_work(&di->check_hw_failure_work);
+ cancel_delayed_work(&di->vbus_drop_end_work);
flush_delayed_work(&di->attach_work);
flush_delayed_work(&di->usb_charger_attached_work);
flush_delayed_work(&di->ac_charger_attached_work);
flush_delayed_work(&di->check_usbchgnotok_work);
flush_delayed_work(&di->check_vbat_work);
flush_delayed_work(&di->kick_wd_work);
flush_work(&di->usb_link_status_work);
flush_work(&di->ac_work);
flush_work(&di->detect_usb_type_work);
if (atomic_read(&di->current_stepping_sessions)) return -EAGAIN;
return 0;
}<sep>@@
expression e;
@@
- if (delayed_work_pending(e))
cancel_delayed_work(e);
<|end_of_text|> | 7,202 |
--- initial
+++ final
@@ -1,110 +1,110 @@
static int ab8500_charger_usb_en(struct ux500_charger *charger, int enable, int vset, int ich_out) {
int ret;
int volt_index;
int curr_index;
u8 overshoot = 0;
struct ab8500_charger *di = to_ab8500_charger_usb_device_info(charger);
if (enable) {
/* Check if USB is connected */
if (!di->usb.charger_connected) {
dev_err(di->dev, "USB charger not connected\n");
return -ENXIO;
}
/*
* Due to a bug in AB8500, BTEMP_HIGH/LOW interrupts
* will be triggered everytime we enable the VDD ADC supply.
* This will turn off charging for a short while.
* It can be avoided by having the supply on when
* there is a charger enabled. Normally the VDD ADC supply
* is enabled everytime a GPADC conversion is triggered. We will
* force it to be enabled from this driver to have
* the GPADC module independant of the AB8500 chargers
*/
if (!di->vddadc_en_usb) {
regulator_enable(di->regu);
di->vddadc_en_usb = true;
}
/* Enable USB charging */
dev_dbg(di->dev, "Enable USB: %dmV %dmA\n", vset, ich_out);
/* Check if the requested voltage or current is valid */
volt_index = ab8500_voltage_to_regval(vset);
curr_index = ab8500_current_to_regval(di, ich_out);
if (volt_index < 0 || curr_index < 0) {
dev_err(di->dev, "Charger voltage or current too high, "
"charging not started\n");
return -ENXIO;
}
/* ChVoltLevel: max voltage upto which battery can be charged */
ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER, AB8500_CH_VOLT_LVL_REG, (u8)volt_index);
if (ret) {
dev_err(di->dev, "%s write failed\n", __func__);
return ret;
}
/* Check if VBAT overshoot control should be enabled */
if (!di->bm->enable_overshoot) overshoot = USB_CHG_NO_OVERSHOOT_ENA_N;
/* Enable USB Charger */
dev_dbg(di->dev, "Enabling USB with write to AB8500_USBCH_CTRL1_REG\n");
ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER, AB8500_USBCH_CTRL1_REG, USB_CH_ENA | overshoot);
if (ret) {
dev_err(di->dev, "%s write failed\n", __func__);
return ret;
}
/* If success power on charging LED indication */
ret = ab8500_charger_led_en(di, true);
if (ret < 0) dev_err(di->dev, "failed to enable LED\n");
di->usb.charger_online = 1;
/* USBChInputCurr: current that can be drawn from the usb */
ret = ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr.usb_type_max);
if (ret) {
dev_err(di->dev, "setting USBChInputCurr failed\n");
return ret;
}
/* ChOutputCurentLevel: protected output current */
ret = ab8500_charger_set_output_curr(di, ich_out);
if (ret) {
dev_err(di->dev,
"%s "
"Failed to set ChOutputCurentLevel\n",
__func__);
return ret;
}
queue_delayed_work(di->charger_wq, &di->check_vbat_work, HZ);
} else {
/* Disable USB charging */
dev_dbg(di->dev, "%s Disabled USB charging\n", __func__);
ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER, AB8500_USBCH_CTRL1_REG, 0);
if (ret) {
dev_err(di->dev, "%s write failed\n", __func__);
return ret;
}
ret = ab8500_charger_led_en(di, false);
if (ret < 0) dev_err(di->dev, "failed to disable LED\n");
/* USBChInputCurr: current that can be drawn from the usb */
ret = ab8500_charger_set_vbus_in_curr(di, 0);
if (ret) {
dev_err(di->dev, "setting USBChInputCurr failed\n");
return ret;
}
/* ChOutputCurentLevel: protected output current */
ret = ab8500_charger_set_output_curr(di, 0);
if (ret) {
dev_err(di->dev,
"%s "
"Failed to reset ChOutputCurentLevel\n",
__func__);
return ret;
}
di->usb.charger_online = 0;
di->usb.wd_expired = false;
/* Disable regulator if enabled */
if (di->vddadc_en_usb) {
regulator_disable(di->regu);
di->vddadc_en_usb = false;
}
dev_dbg(di->dev, "%s Disabled USB charging\n", __func__);
/* Cancel any pending Vbat check work */
- if (delayed_work_pending(&di->check_vbat_work)) cancel_delayed_work(&di->check_vbat_work);
+ cancel_delayed_work(&di->check_vbat_work);
}
ab8500_power_supply_changed(di, &di->usb_chg.psy);
return ret;
}<sep>@@
expression e;
@@
- if (delayed_work_pending(e))
cancel_delayed_work(e);
<|end_of_text|> | 7,203 |
--- initial
+++ final
@@ -1,3 +1 @@
-static void peak_pciec_start_led_work(struct peak_pciec_card *card) {
- if (!delayed_work_pending(&card->led_work)) schedule_delayed_work(&card->led_work, HZ);
-}
+static void peak_pciec_start_led_work(struct peak_pciec_card *card) { schedule_delayed_work(&card->led_work, HZ); }<sep>@@
expression e1,e2;
@@
- if (!delayed_work_pending(e1))
schedule_delayed_work(e1,e2);
<|end_of_text|> | 7,204 |
--- initial
+++ final
@@ -1,26 +1,26 @@
int wl1251_ps_elp_wakeup(struct wl1251 *wl) {
unsigned long timeout, start;
u32 elp_reg;
- if (delayed_work_pending(&wl->elp_work)) cancel_delayed_work(&wl->elp_work);
+ cancel_delayed_work(&wl->elp_work);
if (!wl->elp) return 0;
wl1251_debug(DEBUG_PSM, "waking up chip from elp");
start = jiffies;
timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT);
wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
/*
* FIXME: we should wait for irq from chip but, as a temporary
* solution to simplify locking, let's poll instead
*/
while (!(elp_reg & ELPCTRL_WLAN_READY)) {
if (time_after(jiffies, timeout)) {
wl1251_error("elp wakeup timeout");
return -ETIMEDOUT;
}
msleep(1);
elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
}
wl1251_debug(DEBUG_PSM, "wakeup time: %u ms", jiffies_to_msecs(jiffies - start));
wl->elp = false;
return 0;
}<sep>@@
expression e;
@@
- if (delayed_work_pending(e))
cancel_delayed_work(e);
<|end_of_text|> | 7,205 |
--- initial
+++ final
@@ -1,25 +1,25 @@
static int pga_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) {
struct snd_soc_codec *codec = w->codec;
struct wm8350_data *wm8350_data = snd_soc_codec_get_drvdata(codec);
struct wm8350_output *out;
switch (w->shift) {
case 0:
case 1: out = &wm8350_data->out1; break;
case 2:
case 3: out = &wm8350_data->out2; break;
default: BUG(); return -1;
}
switch (event) {
case SND_SOC_DAPM_POST_PMU:
out->ramp = WM8350_RAMP_UP;
out->active = 1;
- if (!delayed_work_pending(&codec->dapm.delayed_work)) schedule_delayed_work(&codec->dapm.delayed_work, msecs_to_jiffies(1));
+ schedule_delayed_work(&codec->dapm.delayed_work, msecs_to_jiffies(1));
break;
case SND_SOC_DAPM_PRE_PMD:
out->ramp = WM8350_RAMP_DOWN;
out->active = 0;
- if (!delayed_work_pending(&codec->dapm.delayed_work)) schedule_delayed_work(&codec->dapm.delayed_work, msecs_to_jiffies(1));
+ schedule_delayed_work(&codec->dapm.delayed_work, msecs_to_jiffies(1));
break;
}
return 0;
}<sep>@@
expression e1,e2;
@@
- if (!delayed_work_pending(e1))
schedule_delayed_work(e1,e2);
<|end_of_text|> | 7,206 |
--- initial
+++ final
@@ -1,17 +1,17 @@
int mce_notify_irq(void) {
/* Not more than two messages every minute */
static DEFINE_RATELIMIT_STATE(ratelimit, 60 * HZ, 2);
if (test_and_clear_bit(0, &mce_need_notify)) {
/* wake processes polling /dev/mcelog */
wake_up_interruptible(&mce_chrdev_wait);
/*
* There is no risk of missing notifications because
* work_pending is always cleared before the function is
* executed.
*/
- if (mce_helper[0] && !work_pending(&mce_trigger_work)) schedule_work(&mce_trigger_work);
+ if (mce_helper[0]) schedule_work(&mce_trigger_work);
if (__ratelimit(&ratelimit)) pr_info(HW_ERR "Machine check events logged\n");
return 1;
}
return 0;
}<sep>@@
expression e,e1;
@@
- if (e && !work_pending(e1))
+ if (e)
{ ... schedule_work(e1); ... }
<|end_of_text|> | 7,207 |
--- initial
+++ final
@@ -1,6 +1,6 @@
static void mce_schedule_work(void) {
if (!mce_ring_empty()) {
struct work_struct *work = &__get_cpu_var(mce_work);
- if (!work_pending(work)) schedule_work(work);
+ schedule_work(work);
}
}<sep>@@
expression e;
@@
- if (!work_pending(e))
schedule_work(e);
<|end_of_text|> | 7,208 |
--- initial
+++ final
@@ -1,9 +1,9 @@
static void at91_vbus_timer(unsigned long data) {
struct at91_udc *udc = (struct at91_udc *)data;
/*
* If we are polling vbus it is likely that the gpio is on an
* bus such as i2c or spi which may sleep, so schedule some work
* to read the vbus gpio
*/
- if (!work_pending(&udc->vbus_timer_work)) schedule_work(&udc->vbus_timer_work);
+ schedule_work(&udc->vbus_timer_work);
}<sep>@@
expression e;
@@
- if (!work_pending(e))
schedule_work(e);
<|end_of_text|> | 7,209 |
--- initial
+++ final
@@ -1,13 +1,13 @@
static int __devexit exynos_dp_remove(struct platform_device *pdev) {
struct exynos_dp_platdata *pdata = pdev->dev.platform_data;
struct exynos_dp_device *dp = platform_get_drvdata(pdev);
disable_irq(dp->irq);
- if (work_pending(&dp->hotplug_work)) flush_work(&dp->hotplug_work);
+ flush_work(&dp->hotplug_work);
if (pdev->dev.of_node) {
if (dp->phy_addr) exynos_dp_phy_exit(dp);
} else {
if (pdata->phy_exit) pdata->phy_exit();
}
clk_disable_unprepare(dp->clock);
return 0;
}<sep>@@
expression e;
@@
- if (work_pending(e))
flush_work(e);
<|end_of_text|> | 7,210 |
--- initial
+++ final
@@ -1,12 +1,12 @@
static int exynos_dp_suspend(struct device *dev) {
struct exynos_dp_platdata *pdata = dev->platform_data;
struct exynos_dp_device *dp = dev_get_drvdata(dev);
- if (work_pending(&dp->hotplug_work)) flush_work(&dp->hotplug_work);
+ flush_work(&dp->hotplug_work);
if (dev->of_node) {
if (dp->phy_addr) exynos_dp_phy_exit(dp);
} else {
if (pdata->phy_exit) pdata->phy_exit();
}
clk_disable_unprepare(dp->clock);
return 0;
}<sep>@@
expression e;
@@
- if (work_pending(e))
flush_work(e);
<|end_of_text|> | 7,211 |
--- initial
+++ final
@@ -1,3 +1 @@
-void genpd_queue_power_off_work(struct generic_pm_domain *genpd) {
- if (!work_pending(&genpd->power_off_work)) queue_work(pm_wq, &genpd->power_off_work);
-}
+void genpd_queue_power_off_work(struct generic_pm_domain *genpd) { queue_work(pm_wq, &genpd->power_off_work); }<sep>@@
expression e1,e2;
@@
- if (!work_pending(e2))
queue_work(e1,e2);
<|end_of_text|> | 7,212 |
--- initial
+++ final
@@ -1,3 +1,3 @@
void queue_up_suspend_work(void) {
- if (!work_pending(&suspend_work) && autosleep_state > PM_SUSPEND_ON) queue_work(autosleep_wq, &suspend_work);
+ if (autosleep_state > PM_SUSPEND_ON) queue_work(autosleep_wq, &suspend_work);
}<sep>@@
expression e,e1,e2;
@@
- if (!work_pending(e2) && e)
+ if (e)
{ ... queue_work(e1,e2); ... }
<|end_of_text|> | 7,213 |
--- initial
+++ final
@@ -1,12 +1,12 @@
void pm_qos_remove_request(struct pm_qos_request *req) {
if (!req) /*guard against callers passing in null */
return;
/* silent return to keep pcm code cleaner */
if (!pm_qos_request_active(req)) {
WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
return;
}
- if (delayed_work_pending(&req->work)) cancel_delayed_work_sync(&req->work);
+ cancel_delayed_work_sync(&req->work);
pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints, &req->node, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}<sep>@@
expression e;
@@
- if (delayed_work_pending(e))
cancel_delayed_work_sync(e);
<|end_of_text|> | 7,214 |
--- initial
+++ final
@@ -1,10 +1,10 @@
void pm_qos_update_request(struct pm_qos_request *req, s32 new_value) {
if (!req) /*guard against callers passing in null */
return;
if (!pm_qos_request_active(req)) {
WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
return;
}
- if (delayed_work_pending(&req->work)) cancel_delayed_work_sync(&req->work);
+ cancel_delayed_work_sync(&req->work);
if (new_value != req->node.prio) pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints, &req->node, PM_QOS_UPDATE_REQ, new_value);
}<sep>@@
expression e;
@@
- if (delayed_work_pending(e))
cancel_delayed_work_sync(e);
<|end_of_text|> | 7,215 |
--- initial
+++ final
@@ -1,7 +1,7 @@
void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value, unsigned long timeout_us) {
if (!req) return;
if (WARN(!pm_qos_request_active(req), "%s called for unknown object.", __func__)) return;
- if (delayed_work_pending(&req->work)) cancel_delayed_work_sync(&req->work);
+ cancel_delayed_work_sync(&req->work);
if (new_value != req->node.prio) pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints, &req->node, PM_QOS_UPDATE_REQ, new_value);
schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
}<sep>@@
expression e;
@@
- if (delayed_work_pending(e))
cancel_delayed_work_sync(e);
<|end_of_text|> | 7,216 |
--- initial
+++ final
@@ -1,11 +1,11 @@
static irqreturn_t ad7606_interrupt(int irq, void *dev_id) {
struct iio_dev *indio_dev = dev_id;
struct ad7606_state *st = iio_priv(indio_dev);
if (iio_buffer_enabled(indio_dev)) {
- if (!work_pending(&st->poll_work)) schedule_work(&st->poll_work);
+ schedule_work(&st->poll_work);
} else {
st->done = true;
wake_up_interruptible(&st->wq_data_avail);
}
return IRQ_HANDLED;
}<sep>@@
expression e;
@@
- if (!work_pending(e))
schedule_work(e);
<|end_of_text|> | 7,217 |
--- initial
+++ final
@@ -1,5 +1,5 @@
static void dcon_set_source(struct dcon_priv *dcon, int arg) {
if (dcon->pending_src == arg) return;
dcon->pending_src = arg;
- if ((dcon->curr_src != arg) && !work_pending(&dcon->switch_source)) schedule_work(&dcon->switch_source);
+ if ((dcon->curr_src != arg)) schedule_work(&dcon->switch_source);
}<sep>@@
expression e,e1;
@@
- if (e && !work_pending(e1))
+ if (e)
{ ... schedule_work(e1); ... }
<|end_of_text|> | 7,218 |
--- initial
+++ final
@@ -1,23 +1,23 @@
static int ipc_memory_callback(struct notifier_block *self, unsigned long action, void *arg) {
static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier);
switch (action) {
case MEM_ONLINE: /* memory successfully brought online */
case MEM_OFFLINE: /* or offline: it's time to recompute msgmni */
/*
* This is done by invoking the ipcns notifier chain with the
* IPC_MEMCHANGED event.
* In order not to keep the lock on the hotplug memory chain
* for too long, queue a work item that will, when waken up,
* activate the ipcns notification chain.
* No need to keep several ipc work items on the queue.
*/
- if (!work_pending(&ipc_memory_wq)) schedule_work(&ipc_memory_wq);
+ schedule_work(&ipc_memory_wq);
break;
case MEM_GOING_ONLINE:
case MEM_GOING_OFFLINE:
case MEM_CANCEL_ONLINE:
case MEM_CANCEL_OFFLINE:
default: break;
}
return NOTIFY_OK;
}<sep>@@
expression e;
@@
- if (!work_pending(e))
schedule_work(e);
<|end_of_text|> | 7,219 |
--- initial
+++ final
@@ -1,6 +1,6 @@
static void cyttsp4_watchdog_timer(unsigned long handle) {
struct cyttsp4 *cd = (struct cyttsp4 *)handle;
dev_vdbg(cd->dev, "%s: Watchdog timer triggered\n", __func__);
- if (!work_pending(&cd->watchdog_work)) schedule_work(&cd->watchdog_work);
+ schedule_work(&cd->watchdog_work);
return;
}<sep>@@
expression e;
@@
- if (!work_pending(e))
schedule_work(e);
<|end_of_text|> | 7,220 |
--- initial
+++ final
@@ -1,77 +1,76 @@
static int m41t80_probe(struct i2c_client *client, const struct i2c_device_id *id) {
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int rc = 0;
struct rtc_device *rtc = NULL;
struct rtc_time tm;
struct m41t80_data *m41t80_data = NULL;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK | I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&adapter->dev, "doesn't support I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_I2C_BLOCK\n");
return -ENODEV;
}
m41t80_data = devm_kzalloc(&client->dev, sizeof(*m41t80_data), GFP_KERNEL);
if (!m41t80_data) return -ENOMEM;
m41t80_data->features = id->driver_data;
i2c_set_clientdata(client, m41t80_data);
if (client->irq > 0) {
rc = devm_request_threaded_irq(&client->dev, client->irq, NULL, m41t80_handle_irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "m41t80", client);
if (rc) {
dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
client->irq = 0;
} else {
m41t80_rtc_ops.read_alarm = m41t80_read_alarm;
m41t80_rtc_ops.set_alarm = m41t80_set_alarm;
m41t80_rtc_ops.alarm_irq_enable = m41t80_alarm_irq_enable;
/* Enable the wakealarm */
device_init_wakeup(&client->dev, true);
}
}
rtc = devm_rtc_device_register(&client->dev, client->name, &m41t80_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) return PTR_ERR(rtc);
m41t80_data->rtc = rtc;
/* Make sure HT (Halt Update) bit is cleared */
rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR);
if (rc >= 0 && rc & M41T80_ALHOUR_HT) {
if (m41t80_data->features & M41T80_FEATURE_HT) {
m41t80_get_datetime(client, &tm);
dev_info(&client->dev, "HT bit was set!\n");
dev_info(&client->dev, "Power Down at %04i-%02i-%02i %02i:%02i:%02i\n", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
}
rc = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_HOUR, rc & ~M41T80_ALHOUR_HT);
}
if (rc < 0) {
dev_err(&client->dev, "Can't clear HT bit\n");
return rc;
}
/* Make sure ST (stop) bit is cleared */
rc = i2c_smbus_read_byte_data(client, M41T80_REG_SEC);
if (rc >= 0 && rc & M41T80_SEC_ST) rc = i2c_smbus_write_byte_data(client, M41T80_REG_SEC, rc & ~M41T80_SEC_ST);
if (rc < 0) {
dev_err(&client->dev, "Can't clear ST bit\n");
return rc;
}
/* Export sysfs entries */
rc = sysfs_create_group(&(&client->dev)->kobj, &attr_group);
if (rc) {
dev_err(&client->dev, "Failed to create sysfs group: %d\n", rc);
return rc;
}
- rc = devm_add_action(&client->dev, m41t80_remove_sysfs_group, &client->dev);
+ rc = devm_add_action_or_reset(&client->dev, m41t80_remove_sysfs_group, &client->dev);
if (rc) {
- m41t80_remove_sysfs_group(&client->dev);
dev_err(&client->dev, "Failed to add sysfs cleanup action: %d\n", rc);
return rc;
}
#ifdef CONFIG_RTC_DRV_M41T80_WDT
if (m41t80_data->features & M41T80_FEATURE_HT) {
save_client = client;
rc = misc_register(&wdt_dev);
if (rc) return rc;
rc = register_reboot_notifier(&wdt_notifier);
if (rc) {
misc_deregister(&wdt_dev);
return rc;
}
}
#endif
return 0;
}<sep>@@
expression e1,e2,e3,rc;
@@
rc =
- devm_add_action
+ devm_add_action_or_reset
(e1,e2,e3);
if (rc) {
...
- m41t80_remove_sysfs_group(e3);
...
}
<|end_of_text|> | 7,221 |
--- initial
+++ final
@@ -1,77 +1,76 @@
static int m41t80_probe(struct i2c_client *client, const struct i2c_device_id *id) {
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int rc = 0;
struct rtc_device *rtc = NULL;
struct rtc_time tm;
struct m41t80_data *m41t80_data = NULL;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK | I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_err(&adapter->dev, "doesn't support I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_I2C_BLOCK\n");
return -ENODEV;
}
m41t80_data = devm_kzalloc(&client->dev, sizeof(*m41t80_data), GFP_KERNEL);
if (!m41t80_data) return -ENOMEM;
m41t80_data->features = id->driver_data;
i2c_set_clientdata(client, m41t80_data);
if (client->irq > 0) {
rc = devm_request_threaded_irq(&client->dev, client->irq, NULL, m41t80_handle_irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "m41t80", client);
if (rc) {
dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
client->irq = 0;
} else {
m41t80_rtc_ops.read_alarm = m41t80_read_alarm;
m41t80_rtc_ops.set_alarm = m41t80_set_alarm;
m41t80_rtc_ops.alarm_irq_enable = m41t80_alarm_irq_enable;
/* Enable the wakealarm */
device_init_wakeup(&client->dev, true);
}
}
rtc = devm_rtc_device_register(&client->dev, client->name, &m41t80_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) return PTR_ERR(rtc);
m41t80_data->rtc = rtc;
/* Make sure HT (Halt Update) bit is cleared */
rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR);
if (rc >= 0 && rc & M41T80_ALHOUR_HT) {
if (m41t80_data->features & M41T80_FEATURE_HT) {
m41t80_get_datetime(client, &tm);
dev_info(&client->dev, "HT bit was set!\n");
dev_info(&client->dev, "Power Down at %04i-%02i-%02i %02i:%02i:%02i\n", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
}
rc = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_HOUR, rc & ~M41T80_ALHOUR_HT);
}
if (rc < 0) {
dev_err(&client->dev, "Can't clear HT bit\n");
return rc;
}
/* Make sure ST (stop) bit is cleared */
rc = i2c_smbus_read_byte_data(client, M41T80_REG_SEC);
if (rc >= 0 && rc & M41T80_SEC_ST) rc = i2c_smbus_write_byte_data(client, M41T80_REG_SEC, rc & ~M41T80_SEC_ST);
if (rc < 0) {
dev_err(&client->dev, "Can't clear ST bit\n");
return rc;
}
/* Export sysfs entries */
rc = sysfs_create_group(&(&client->dev)->kobj, &attr_group);
if (rc) {
dev_err(&client->dev, "Failed to create sysfs group: %d\n", rc);
return rc;
}
- rc = devm_add_action(&client->dev, m41t80_remove_sysfs_group, &client->dev);
+ rc = devm_add_action_or_reset(&client->dev, m41t80_remove_sysfs_group, &client->dev);
if (rc) {
- m41t80_remove_sysfs_group(&client->dev);
dev_err(&client->dev, "Failed to add sysfs cleanup action: %d\n", rc);
return rc;
}
#ifdef CONFIG_RTC_DRV_M41T80_WDT
if (m41t80_data->features & M41T80_FEATURE_HT) {
save_client = client;
rc = misc_register(&wdt_dev);
if (rc) return rc;
rc = register_reboot_notifier(&wdt_notifier);
if (rc) {
misc_deregister(&wdt_dev);
return rc;
}
}
#endif
return 0;
}<sep>@@
expression e1,e2,e3,rc;
@@
rc =
- devm_add_action
+ devm_add_action_or_reset
(e1,e2,e3);
if (rc) {
...
- m41t80_remove_sysfs_group(e3);
...
}
<|end_of_text|> | 7,222 |
--- initial
+++ final
@@ -1,32 +1,31 @@
static int lm90_init_client(struct i2c_client *client, struct lm90_data *data) {
int config, convrate;
convrate = lm90_read_reg(client, LM90_REG_R_CONVRATE);
if (convrate < 0) return convrate;
data->convrate_orig = convrate;
/*
* Start the conversions.
*/
lm90_set_convrate(client, data, 500); /* 500ms; 2Hz conversion rate */
config = lm90_read_reg(client, LM90_REG_R_CONFIG1);
if (config < 0) return config;
data->config_orig = config;
/* Check Temperature Range Select */
if (data->kind == adt7461 || data->kind == tmp451) {
if (config & 0x04) data->flags |= LM90_FLAG_ADT7461_EXT;
}
/*
* Put MAX6680/MAX8881 into extended resolution (bit 0x10,
* 0.125 degree resolution) and range (0x08, extend range
* to -64 degree) mode for the remote temperature sensor.
*/
if (data->kind == max6680) config |= 0x18;
/*
* Select external channel 0 for max6695/96
*/
if (data->kind == max6696) config &= ~0x08;
config &= 0xBF; /* run */
if (config != data->config_orig) /* Only write if changed */
i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config);
- devm_add_action(&client->dev, lm90_restore_conf, data);
- return 0;
+ return devm_add_action_or_reset(&client->dev, lm90_restore_conf, data);
}<sep>@@
expression e1,e2,e3;
@@
- devm_add_action(e1,e2,e3);
- return 0;
+ return devm_add_action_or_reset(e1,e2,e3);
<|end_of_text|> | 7,223 |
--- initial
+++ final
@@ -1,65 +1,67 @@
static int lm90_probe(struct i2c_client *client, const struct i2c_device_id *id) {
struct device *dev = &client->dev;
struct i2c_adapter *adapter = to_i2c_adapter(dev->parent);
struct lm90_data *data;
struct regulator *regulator;
struct device *hwmon_dev;
int groups = 0;
int err;
regulator = devm_regulator_get(dev, "vcc");
if (IS_ERR(regulator)) return PTR_ERR(regulator);
err = regulator_enable(regulator);
if (err < 0) {
dev_err(dev, "Failed to enable regulator: %d\n", err);
return err;
}
- devm_add_action(dev, lm90_regulator_disable, regulator);
+ err = devm_add_action_or_reset(dev, lm90_regulator_disable, regulator);
+ if (err) return err;
data = devm_kzalloc(dev, sizeof(struct lm90_data), GFP_KERNEL);
if (!data) return -ENOMEM;
data->client = client;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
/* Set the device type */
data->kind = id->driver_data;
if (data->kind == adm1032) {
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) client->flags &= ~I2C_CLIENT_PEC;
}
/*
* Different devices have different alarm bits triggering the
* ALERT# output
*/
data->alert_alarms = lm90_params[data->kind].alert_alarms;
/* Set chip capabilities */
data->flags = lm90_params[data->kind].flags;
data->reg_local_ext = lm90_params[data->kind].reg_local_ext;
/* Set maximum conversion rate */
data->max_convrate = lm90_params[data->kind].max_convrate;
/* Initialize the LM90 chip */
err = lm90_init_client(client, data);
if (err < 0) {
dev_err(dev, "Failed to initialize device\n");
return err;
}
/* Register sysfs hooks */
data->groups[groups++] = &lm90_group;
if (data->flags & LM90_HAVE_OFFSET) data->groups[groups++] = &lm90_temp2_offset_group;
if (data->flags & LM90_HAVE_EMERGENCY) data->groups[groups++] = &lm90_emergency_group;
if (data->flags & LM90_HAVE_EMERGENCY_ALARM) data->groups[groups++] = &lm90_emergency_alarm_group;
if (data->flags & LM90_HAVE_TEMP3) data->groups[groups++] = &lm90_temp3_group;
if (client->flags & I2C_CLIENT_PEC) {
err = device_create_file(dev, &dev_attr_pec);
if (err) return err;
- devm_add_action(dev, lm90_remove_pec, dev);
+ err = devm_add_action_or_reset(dev, lm90_remove_pec, dev);
+ if (err) return err;
}
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, data, data->groups);
if (IS_ERR(hwmon_dev)) return PTR_ERR(hwmon_dev);
if (client->irq) {
dev_dbg(dev, "IRQ: %d\n", client->irq);
err = devm_request_threaded_irq(dev, client->irq, NULL, lm90_irq_thread, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "lm90", client);
if (err < 0) {
dev_err(dev, "cannot request IRQ %d\n", client->irq);
return err;
}
}
return 0;
}<sep>@@
expression e1,e2,e3;
int err;
@@
if (...) { ... return err; }
... when any
- devm_add_action(e1,e2,e3);
+ err = devm_add_action_or_reset(e1,e2,e3);
+ if (err)
+ return err;
<|end_of_text|> | 7,224 |
--- initial
+++ final
@@ -1,32 +1,31 @@
static int lm90_init_client(struct i2c_client *client, struct lm90_data *data) {
int config, convrate;
convrate = lm90_read_reg(client, LM90_REG_R_CONVRATE);
if (convrate < 0) return convrate;
data->convrate_orig = convrate;
/*
* Start the conversions.
*/
lm90_set_convrate(client, data, 500); /* 500ms; 2Hz conversion rate */
config = lm90_read_reg(client, LM90_REG_R_CONFIG1);
if (config < 0) return config;
data->config_orig = config;
/* Check Temperature Range Select */
if (data->kind == adt7461 || data->kind == tmp451) {
if (config & 0x04) data->flags |= LM90_FLAG_ADT7461_EXT;
}
/*
* Put MAX6680/MAX8881 into extended resolution (bit 0x10,
* 0.125 degree resolution) and range (0x08, extend range
* to -64 degree) mode for the remote temperature sensor.
*/
if (data->kind == max6680) config |= 0x18;
/*
* Select external channel 0 for max6695/96
*/
if (data->kind == max6696) config &= ~0x08;
config &= 0xBF; /* run */
if (config != data->config_orig) /* Only write if changed */
i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config);
- devm_add_action(&client->dev, lm90_restore_conf, data);
- return 0;
+ return devm_add_action_or_reset(&client->dev, lm90_restore_conf, data);
}<sep>@@
expression e1,e2,e3;
@@
- devm_add_action(e1,e2,e3);
- return 0;
+ return devm_add_action_or_reset(e1,e2,e3);
<|end_of_text|> | 7,225 |
--- initial
+++ final
@@ -1,65 +1,67 @@
static int lm90_probe(struct i2c_client *client, const struct i2c_device_id *id) {
struct device *dev = &client->dev;
struct i2c_adapter *adapter = to_i2c_adapter(dev->parent);
struct lm90_data *data;
struct regulator *regulator;
struct device *hwmon_dev;
int groups = 0;
int err;
regulator = devm_regulator_get(dev, "vcc");
if (IS_ERR(regulator)) return PTR_ERR(regulator);
err = regulator_enable(regulator);
if (err < 0) {
dev_err(dev, "Failed to enable regulator: %d\n", err);
return err;
}
- devm_add_action(dev, lm90_regulator_disable, regulator);
+ err = devm_add_action_or_reset(dev, lm90_regulator_disable, regulator);
+ if (err) return err;
data = devm_kzalloc(dev, sizeof(struct lm90_data), GFP_KERNEL);
if (!data) return -ENOMEM;
data->client = client;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
/* Set the device type */
data->kind = id->driver_data;
if (data->kind == adm1032) {
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE)) client->flags &= ~I2C_CLIENT_PEC;
}
/*
* Different devices have different alarm bits triggering the
* ALERT# output
*/
data->alert_alarms = lm90_params[data->kind].alert_alarms;
/* Set chip capabilities */
data->flags = lm90_params[data->kind].flags;
data->reg_local_ext = lm90_params[data->kind].reg_local_ext;
/* Set maximum conversion rate */
data->max_convrate = lm90_params[data->kind].max_convrate;
/* Initialize the LM90 chip */
err = lm90_init_client(client, data);
if (err < 0) {
dev_err(dev, "Failed to initialize device\n");
return err;
}
/* Register sysfs hooks */
data->groups[groups++] = &lm90_group;
if (data->flags & LM90_HAVE_OFFSET) data->groups[groups++] = &lm90_temp2_offset_group;
if (data->flags & LM90_HAVE_EMERGENCY) data->groups[groups++] = &lm90_emergency_group;
if (data->flags & LM90_HAVE_EMERGENCY_ALARM) data->groups[groups++] = &lm90_emergency_alarm_group;
if (data->flags & LM90_HAVE_TEMP3) data->groups[groups++] = &lm90_temp3_group;
if (client->flags & I2C_CLIENT_PEC) {
err = device_create_file(dev, &dev_attr_pec);
if (err) return err;
- devm_add_action(dev, lm90_remove_pec, dev);
+ err = devm_add_action_or_reset(dev, lm90_remove_pec, dev);
+ if (err) return err;
}
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name, data, data->groups);
if (IS_ERR(hwmon_dev)) return PTR_ERR(hwmon_dev);
if (client->irq) {
dev_dbg(dev, "IRQ: %d\n", client->irq);
err = devm_request_threaded_irq(dev, client->irq, NULL, lm90_irq_thread, IRQF_TRIGGER_LOW | IRQF_ONESHOT, "lm90", client);
if (err < 0) {
dev_err(dev, "cannot request IRQ %d\n", client->irq);
return err;
}
}
return 0;
}<sep>@@
expression e1,e2,e3;
int err;
@@
if (...) { ... return err; }
... when any
- devm_add_action(e1,e2,e3);
+ err = devm_add_action_or_reset(e1,e2,e3);
+ if (err)
+ return err;
<|end_of_text|> | 7,226 |
--- initial
+++ final
@@ -1,73 +1,73 @@
static int b44_init_one(struct ssb_device *sdev, const struct ssb_device_id *ent) {
struct net_device *dev;
struct b44 *bp;
int err;
instance++;
pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
dev = alloc_etherdev(sizeof(*bp));
if (!dev) {
err = -ENOMEM;
goto out;
}
SET_NETDEV_DEV(dev, sdev->dev);
/* No interesting netdevice features in this card... */
dev->features |= 0;
bp = netdev_priv(dev);
bp->sdev = sdev;
bp->dev = dev;
bp->force_copybreak = 0;
bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
spin_lock_init(&bp->lock);
bp->rx_pending = B44_DEF_RX_RING_PENDING;
bp->tx_pending = B44_DEF_TX_RING_PENDING;
dev->netdev_ops = &b44_netdev_ops;
netif_napi_add(dev, &bp->napi, b44_poll, 64);
dev->watchdog_timeo = B44_TX_TIMEOUT;
dev->irq = sdev->irq;
SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
err = ssb_bus_powerup(sdev->bus, 0);
if (err) {
dev_err(sdev->dev, "Failed to powerup the bus\n");
goto err_out_free_dev;
}
- if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) || dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
+ if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
dev_err(sdev->dev, "Required 30BIT DMA mask unsupported by the system\n");
goto err_out_powerdown;
}
err = b44_get_invariants(bp);
if (err) {
dev_err(sdev->dev, "Problem fetching invariants of chip, aborting\n");
goto err_out_powerdown;
}
bp->mii_if.dev = dev;
bp->mii_if.mdio_read = b44_mii_read;
bp->mii_if.mdio_write = b44_mii_write;
bp->mii_if.phy_id = bp->phy_addr;
bp->mii_if.phy_id_mask = 0x1f;
bp->mii_if.reg_num_mask = 0x1f;
/* By default, advertise all speed/duplex settings. */
bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL | B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
/* By default, auto-negotiate PAUSE. */
bp->flags |= B44_FLAG_PAUSE_AUTO;
err = register_netdev(dev);
if (err) {
dev_err(sdev->dev, "Cannot register net device, aborting\n");
goto err_out_powerdown;
}
netif_carrier_off(dev);
ssb_set_drvdata(sdev, dev);
/* Chip reset provides power to the b44 MAC & PCI cores, which
* is necessary for MAC register access.
*/
b44_chip_reset(bp, B44_CHIP_RESET_FULL);
/* do a phy reset to test if there is an active phy */
if (b44_phy_reset(bp) < 0) bp->phy_addr = B44_PHY_ADDR_NO_PHY;
netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
return 0;
err_out_powerdown:
ssb_bus_may_powerdown(sdev->bus);
err_out_free_dev:
free_netdev(dev);
out:
return err;
}<sep>@@
expression e,v;
statement S;
@@
- if (dma_set_mask(e, v) || dma_set_coherent_mask(e, v))
+ if (dma_set_mask_and_coherent(e, v))
S<|end_of_text|> | 7,227 |
--- initial
+++ final
@@ -1,42 +1,42 @@
static int ssb_hcd_probe(struct ssb_device *dev, const struct ssb_device_id *id) {
int err, tmp;
int start, len;
u16 chipid_top;
u16 coreid = dev->id.coreid;
struct ssb_hcd_device *usb_dev;
/* USBcores are only connected on embedded devices. */
chipid_top = (dev->bus->chip_id & 0xFF00);
if (chipid_top != 0x4700 && chipid_top != 0x5300) return -ENODEV;
/* TODO: Probably need checks here; is the core connected? */
- if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) || dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32))) return -EOPNOTSUPP;
+ if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32))) return -EOPNOTSUPP;
usb_dev = kzalloc(sizeof(struct ssb_hcd_device), GFP_KERNEL);
if (!usb_dev) return -ENOMEM;
/* We currently always attach SSB_DEV_USB11_HOSTDEV
* as HOST OHCI. If we want to attach it as Client device,
* we must branch here and call into the (yet to
* be written) Client mode driver. Same for remove(). */
usb_dev->enable_flags = ssb_hcd_init_chip(dev);
tmp = ssb_read32(dev, SSB_ADMATCH0);
start = ssb_admatch_base(tmp);
len = (coreid == SSB_DEV_USB20_HOST) ? 0x800 : ssb_admatch_size(tmp);
usb_dev->ohci_dev = ssb_hcd_create_pdev(dev, true, start, len);
if (IS_ERR(usb_dev->ohci_dev)) {
err = PTR_ERR(usb_dev->ohci_dev);
goto err_free_usb_dev;
}
if (coreid == SSB_DEV_USB20_HOST) {
start = ssb_admatch_base(tmp) + 0x800; /* ehci core offset */
usb_dev->ehci_dev = ssb_hcd_create_pdev(dev, false, start, len);
if (IS_ERR(usb_dev->ehci_dev)) {
err = PTR_ERR(usb_dev->ehci_dev);
goto err_unregister_ohci_dev;
}
}
ssb_set_drvdata(dev, usb_dev);
return 0;
err_unregister_ohci_dev:
platform_device_unregister(usb_dev->ohci_dev);
err_free_usb_dev:
kfree(usb_dev);
return err;
}<sep>@@
expression e,v;
statement S;
@@
- if (dma_set_mask(e, v) || dma_set_coherent_mask(e, v))
+ if (dma_set_mask_and_coherent(e, v))
S<|end_of_text|> | 7,228 |
--- initial
+++ final
@@ -1,98 +1,97 @@
struct vio_dev *vio_register_device_node(struct device_node *of_node) {
struct vio_dev *viodev;
struct device_node *parent_node;
const __be32 *prop;
enum vio_dev_family family;
const char *of_node_name = of_node->name ? of_node->name : "<unknown>";
/*
* Determine if this node is a under the /vdevice node or under the
* /ibm,platform-facilities node. This decides the device's family.
*/
parent_node = of_get_parent(of_node);
if (parent_node) {
if (!strcmp(parent_node->full_name, "/ibm,platform-facilities"))
family = PFO;
else if (!strcmp(parent_node->full_name, "/vdevice"))
family = VDEVICE;
else {
pr_warn("%s: parent(%s) of %s not recognized.\n", __func__, parent_node->full_name, of_node_name);
of_node_put(parent_node);
return NULL;
}
of_node_put(parent_node);
} else {
pr_warn("%s: could not determine the parent of node %s.\n", __func__, of_node_name);
return NULL;
}
if (family == PFO) {
if (of_get_property(of_node, "interrupt-controller", NULL)) {
pr_debug("%s: Skipping the interrupt controller %s.\n", __func__, of_node_name);
return NULL;
}
}
/* allocate a vio_dev for this node */
viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
if (viodev == NULL) {
pr_warn("%s: allocation failure for VIO device.\n", __func__);
return NULL;
}
/* we need the 'device_type' property, in order to match with drivers */
viodev->family = family;
if (viodev->family == VDEVICE) {
unsigned int unit_address;
if (of_node->type != NULL)
viodev->type = of_node->type;
else {
pr_warn("%s: node %s is missing the 'device_type' "
"property.\n",
__func__, of_node_name);
goto out;
}
prop = of_get_property(of_node, "reg", NULL);
if (prop == NULL) {
pr_warn("%s: node %s missing 'reg'\n", __func__, of_node_name);
goto out;
}
unit_address = of_read_number(prop, 1);
dev_set_name(&viodev->dev, "%x", unit_address);
viodev->irq = irq_of_parse_and_map(of_node, 0);
viodev->unit_address = unit_address;
} else {
/* PFO devices need their resource_id for submitting COP_OPs
* This is an optional field for devices, but is required when
* performing synchronous ops */
prop = of_get_property(of_node, "ibm,resource-id", NULL);
if (prop != NULL) viodev->resource_id = of_read_number(prop, 1);
dev_set_name(&viodev->dev, "%s", of_node_name);
viodev->type = of_node_name;
viodev->irq = 0;
}
viodev->name = of_node->name;
viodev->dev.of_node = of_node_get(of_node);
set_dev_node(&viodev->dev, of_node_to_nid(of_node));
/* init generic 'struct device' fields: */
viodev->dev.parent = &vio_bus_device.dev;
viodev->dev.bus = &vio_bus_type;
viodev->dev.release = vio_dev_release;
if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) {
if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_set_dma_ops(viodev);
else
set_dma_ops(&viodev->dev, &dma_iommu_ops);
set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev));
/* needed to ensure proper operation of coherent allocations
* later, in case driver doesn't set it explicitly */
- dma_set_mask(&viodev->dev, DMA_BIT_MASK(64));
- dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
+ dma_set_mask_and_coherent(&viodev->dev, DMA_BIT_MASK(64));
}
/* register with generic device framework */
if (device_register(&viodev->dev)) {
printk(KERN_ERR "%s: failed to register device %s\n", __func__, dev_name(&viodev->dev));
put_device(&viodev->dev);
return NULL;
}
return viodev;
out: /* Use this exit point for any return prior to device_register */
kfree(viodev);
return NULL;
}<sep>@@
expression e,v;
@@
- dma_set_mask(e, v);
- dma_set_coherent_mask(e, v);
+ dma_set_mask_and_coherent(e, v);
<|end_of_text|> | 7,229 |
--- initial
+++ final
@@ -1,36 +1,36 @@
static int bcma_hcd_probe(struct bcma_device *dev) {
int err;
u16 chipid_top;
u32 ohci_addr;
struct bcma_hcd_device *usb_dev;
struct bcma_chipinfo *chipinfo;
chipinfo = &dev->bus->chipinfo;
/* USBcores are only connected on embedded devices. */
chipid_top = (chipinfo->id & 0xFF00);
if (chipid_top != 0x4700 && chipid_top != 0x5300) return -ENODEV;
/* TODO: Probably need checks here; is the core connected? */
- if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) || dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32))) return -EOPNOTSUPP;
+ if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32))) return -EOPNOTSUPP;
usb_dev = kzalloc(sizeof(struct bcma_hcd_device), GFP_KERNEL);
if (!usb_dev) return -ENOMEM;
bcma_hcd_init_chip(dev);
/* In AI chips EHCI is addrspace 0, OHCI is 1 */
ohci_addr = dev->addr1;
if ((chipinfo->id == 0x5357 || chipinfo->id == 0x4749) && chipinfo->rev == 0) ohci_addr = 0x18009000;
usb_dev->ohci_dev = bcma_hcd_create_pdev(dev, true, ohci_addr);
if (IS_ERR(usb_dev->ohci_dev)) {
err = PTR_ERR(usb_dev->ohci_dev);
goto err_free_usb_dev;
}
usb_dev->ehci_dev = bcma_hcd_create_pdev(dev, false, dev->addr);
if (IS_ERR(usb_dev->ehci_dev)) {
err = PTR_ERR(usb_dev->ehci_dev);
goto err_unregister_ohci_dev;
}
bcma_set_drvdata(dev, usb_dev);
return 0;
err_unregister_ohci_dev:
platform_device_unregister(usb_dev->ohci_dev);
err_free_usb_dev:
kfree(usb_dev);
return err;
}<sep>@@
expression e,v;
statement S;
@@
- if (dma_set_mask(e, v) || dma_set_coherent_mask(e, v))
+ if (dma_set_mask_and_coherent(e, v))
S<|end_of_text|> | 7,231 |
--- initial
+++ final
@@ -1,70 +1,69 @@
static int ci_hdrc_imx_probe(struct platform_device *pdev) {
struct ci_hdrc_imx_data *data;
struct ci_hdrc_platform_data pdata = {
.name = "ci_hdrc_imx",
.capoffset = DEF_CAPOFFSET,
.flags = CI_HDRC_REQUIRE_TRANSCEIVER | CI_HDRC_DISABLE_STREAMING,
};
int ret;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data) {
dev_err(&pdev->dev, "Failed to allocate ci_hdrc-imx data!\n");
return -ENOMEM;
}
data->usbmisc_data = usbmisc_get_init_data(&pdev->dev);
if (IS_ERR(data->usbmisc_data)) return PTR_ERR(data->usbmisc_data);
data->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(data->clk)) {
dev_err(&pdev->dev, "Failed to get clock, err=%ld\n", PTR_ERR(data->clk));
return PTR_ERR(data->clk);
}
ret = clk_prepare_enable(data->clk);
if (ret) {
dev_err(&pdev->dev, "Failed to prepare or enable clock, err=%d\n", ret);
return ret;
}
data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0);
if (!IS_ERR(data->phy)) {
ret = usb_phy_init(data->phy);
if (ret) {
dev_err(&pdev->dev, "unable to init phy: %d\n", ret);
goto err_clk;
}
} else if (PTR_ERR(data->phy) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto err_clk;
}
pdata.phy = data->phy;
- if (!pdev->dev.dma_mask) pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) goto err_clk;
if (data->usbmisc_data) {
ret = imx_usbmisc_init(data->usbmisc_data);
if (ret) {
dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n", ret);
goto err_clk;
}
}
data->ci_pdev = ci_hdrc_add_device(&pdev->dev, pdev->resource, pdev->num_resources, &pdata);
if (IS_ERR(data->ci_pdev)) {
ret = PTR_ERR(data->ci_pdev);
dev_err(&pdev->dev, "Can't register ci_hdrc platform device, err=%d\n", ret);
goto err_clk;
}
if (data->usbmisc_data) {
ret = imx_usbmisc_init_post(data->usbmisc_data);
if (ret) {
dev_err(&pdev->dev, "usbmisc post failed, ret=%d\n", ret);
goto disable_device;
}
}
platform_set_drvdata(pdev, data);
pm_runtime_no_callbacks(&pdev->dev);
pm_runtime_enable(&pdev->dev);
return 0;
disable_device:
ci_hdrc_remove_device(data->ci_pdev);
err_clk:
clk_disable_unprepare(data->clk);
return ret;
}<sep>@@
expression ret,dev,v;
@@
- if (!dev.dma_mask)
- dev.dma_mask = &dev.coherent_dma_mask;
- ret = dma_set_coherent_mask(&dev, v);
+ ret = dma_coerce_mask_and_coherent(&dev, v);
<|end_of_text|> | 7,232 |
--- initial
+++ final
@@ -1,51 +1,50 @@
static int dwc3_exynos_probe(struct platform_device *pdev) {
struct dwc3_exynos *exynos;
struct clk *clk;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
int ret = -ENOMEM;
exynos = devm_kzalloc(dev, sizeof(*exynos), GFP_KERNEL);
if (!exynos) {
dev_err(dev, "not enough memory\n");
goto err1;
}
/*
* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we move to full device tree support this will vanish off.
*/
- if (!dev->dma_mask) dev->dma_mask = &dev->coherent_dma_mask;
- ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) goto err1;
platform_set_drvdata(pdev, exynos);
ret = dwc3_exynos_register_phys(exynos);
if (ret) {
dev_err(dev, "couldn't register PHYs\n");
goto err1;
}
clk = devm_clk_get(dev, "usbdrd30");
if (IS_ERR(clk)) {
dev_err(dev, "couldn't get clock\n");
ret = -EINVAL;
goto err1;
}
exynos->dev = dev;
exynos->clk = clk;
clk_prepare_enable(exynos->clk);
if (node) {
ret = of_platform_populate(node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to add dwc3 core\n");
goto err2;
}
} else {
dev_err(dev, "no device node, failed to add dwc3 core\n");
ret = -ENODEV;
goto err2;
}
return 0;
err2:
clk_disable_unprepare(clk);
err1:
return ret;
}<sep>@@
expression ret,dev,v;
@@
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
- ret = dma_set_coherent_mask(dev, v);
+ ret = dma_coerce_mask_and_coherent(dev, v);
<|end_of_text|> | 7,233 |
--- initial
+++ final
@@ -1,67 +1,66 @@
static int ehci_atmel_drv_probe(struct platform_device *pdev) {
struct usb_hcd *hcd;
const struct hc_driver *driver = &ehci_atmel_hc_driver;
struct resource *res;
struct ehci_hcd *ehci;
int irq;
int retval;
if (usb_disabled()) return -ENODEV;
pr_debug("Initializing Atmel-SoC USB Host Controller\n");
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_err(&pdev->dev, "Found HC with no IRQ. Check %s setup!\n", dev_name(&pdev->dev));
retval = -ENODEV;
goto fail_create_hcd;
}
/* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask) pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- retval = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) goto fail_create_hcd;
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
retval = -ENOMEM;
goto fail_create_hcd;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "Found HC with no register addr. Check %s setup!\n", dev_name(&pdev->dev));
retval = -ENODEV;
goto fail_request_resource;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto fail_request_resource;
}
iclk = devm_clk_get(&pdev->dev, "ehci_clk");
if (IS_ERR(iclk)) {
dev_err(&pdev->dev, "Error getting interface clock\n");
retval = -ENOENT;
goto fail_request_resource;
}
fclk = devm_clk_get(&pdev->dev, "uhpck");
if (IS_ERR(fclk)) {
dev_err(&pdev->dev, "Error getting function clock\n");
retval = -ENOENT;
goto fail_request_resource;
}
ehci = hcd_to_ehci(hcd);
/* registers start at offset 0x0 */
ehci->caps = hcd->regs;
atmel_start_ehci(pdev);
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval) goto fail_add_hcd;
return retval;
fail_add_hcd:
atmel_stop_ehci(pdev);
fail_request_resource:
usb_put_hcd(hcd);
fail_create_hcd:
dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), retval);
return retval;
}<sep>@@
expression ret,dev,v;
@@
- if (!dev.dma_mask)
- dev.dma_mask = &dev.coherent_dma_mask;
- ret = dma_set_coherent_mask(&dev, v);
+ ret = dma_coerce_mask_and_coherent(&dev, v);
<|end_of_text|> | 7,234 |
--- initial
+++ final
@@ -1,114 +1,113 @@
static int ehci_hcd_omap_probe(struct platform_device *pdev) {
struct device *dev = &pdev->dev;
struct usbhs_omap_platform_data *pdata = dev_get_platdata(dev);
struct resource *res;
struct usb_hcd *hcd;
void __iomem *regs;
int ret;
int irq;
int i;
struct omap_hcd *omap;
if (usb_disabled()) return -ENODEV;
if (!dev->parent) {
dev_err(dev, "Missing parent device\n");
return -ENODEV;
}
/* For DT boot, get platform data from parent. i.e. usbhshost */
if (dev->of_node) {
pdata = dev_get_platdata(dev->parent);
dev->platform_data = pdata;
}
if (!pdata) {
dev_err(dev, "Missing platform data\n");
return -ENODEV;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "EHCI irq failed\n");
return -ENODEV;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(dev, res);
if (IS_ERR(regs)) return PTR_ERR(regs);
/*
* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!dev->dma_mask) dev->dma_mask = &dev->coherent_dma_mask;
- ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) return ret;
ret = -ENODEV;
hcd = usb_create_hcd(&ehci_omap_hc_driver, dev, dev_name(dev));
if (!hcd) {
dev_err(dev, "Failed to create HCD\n");
return -ENOMEM;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
hcd->regs = regs;
hcd_to_ehci(hcd)->caps = regs;
omap = (struct omap_hcd *)hcd_to_ehci(hcd)->priv;
omap->nports = pdata->nports;
platform_set_drvdata(pdev, hcd);
/* get the PHY devices if needed */
for (i = 0; i < omap->nports; i++) {
struct usb_phy *phy;
/* get the PHY device */
if (dev->of_node)
phy = devm_usb_get_phy_by_phandle(dev, "phys", i);
else
phy = devm_usb_get_phy_dev(dev, i);
if (IS_ERR(phy)) {
/* Don't bail out if PHY is not absolutely necessary */
if (pdata->port_mode[i] != OMAP_EHCI_PORT_MODE_PHY) continue;
ret = PTR_ERR(phy);
dev_err(dev, "Can't get PHY device for port %d: %d\n", i, ret);
goto err_phy;
}
omap->phy[i] = phy;
if (pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY) {
usb_phy_init(omap->phy[i]);
/* bring PHY out of suspend */
usb_phy_set_suspend(omap->phy[i], 0);
}
}
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
/*
* An undocumented "feature" in the OMAP3 EHCI controller,
* causes suspended ports to be taken out of suspend when
* the USBCMD.Run/Stop bit is cleared (for example when
* we do ehci_bus_suspend).
* This breaks suspend-resume if the root-hub is allowed
* to suspend. Writing 1 to this undocumented register bit
* disables this feature and restores normal behavior.
*/
ehci_write(regs, EHCI_INSNREG04, EHCI_INSNREG04_DISABLE_UNSUSPEND);
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret) {
dev_err(dev, "failed to add hcd with err %d\n", ret);
goto err_pm_runtime;
}
/*
* Bring PHYs out of reset for non PHY modes.
* Even though HSIC mode is a PHY-less mode, the reset
* line exists between the chips and can be modelled
* as a PHY device for reset control.
*/
for (i = 0; i < omap->nports; i++) {
if (!omap->phy[i] || pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY) continue;
usb_phy_init(omap->phy[i]);
/* bring PHY out of suspend */
usb_phy_set_suspend(omap->phy[i], 0);
}
return 0;
err_pm_runtime:
pm_runtime_put_sync(dev);
err_phy:
for (i = 0; i < omap->nports; i++) {
if (omap->phy[i]) usb_phy_shutdown(omap->phy[i]);
}
usb_put_hcd(hcd);
return ret;
}<sep>@@
expression ret,dev,v;
@@
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
- ret = dma_set_coherent_mask(dev, v);
+ ret = dma_coerce_mask_and_coherent(dev, v);
<|end_of_text|> | 7,235 |
--- initial
+++ final
@@ -1,100 +1,99 @@
static int ehci_orion_drv_probe(struct platform_device *pdev) {
struct orion_ehci_data *pd = dev_get_platdata(&pdev->dev);
const struct mbus_dram_target_info *dram;
struct resource *res;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
struct clk *clk;
void __iomem *regs;
int irq, err;
enum orion_ehci_phy_ver phy_version;
if (usb_disabled()) return -ENODEV;
pr_debug("Initializing Orion-SoC USB Host Controller\n");
if (pdev->dev.of_node)
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
else
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_err(&pdev->dev, "Found HC with no IRQ. Check %s setup!\n", dev_name(&pdev->dev));
err = -ENODEV;
goto err1;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "Found HC with no register addr. Check %s setup!\n", dev_name(&pdev->dev));
err = -ENODEV;
goto err1;
}
/*
* Right now device-tree probed devices don't get dma_mask
* set. Since shared usb code relies on it, set it here for
* now. Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask) pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) goto err1;
if (!request_mem_region(res->start, resource_size(res), ehci_orion_hc_driver.description)) {
dev_dbg(&pdev->dev, "controller already in use\n");
err = -EBUSY;
goto err1;
}
regs = ioremap(res->start, resource_size(res));
if (regs == NULL) {
dev_dbg(&pdev->dev, "error mapping memory\n");
err = -EFAULT;
goto err2;
}
/* Not all platforms can gate the clock, so it is not
an error if the clock does not exists. */
clk = clk_get(&pdev->dev, NULL);
if (!IS_ERR(clk)) {
clk_prepare_enable(clk);
clk_put(clk);
}
hcd = usb_create_hcd(&ehci_orion_hc_driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
err = -ENOMEM;
goto err3;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
hcd->regs = regs;
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs + 0x100;
hcd->has_tt = 1;
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
dram = mv_mbus_dram_info();
if (dram) ehci_orion_conf_mbus_windows(hcd, dram);
/*
* setup Orion USB controller.
*/
if (pdev->dev.of_node)
phy_version = EHCI_PHY_NA;
else
phy_version = pd->phy_version;
switch (phy_version) {
case EHCI_PHY_NA: /* dont change USB phy settings */ break;
case EHCI_PHY_ORION: orion_usb_phy_v1_setup(hcd); break;
case EHCI_PHY_DD:
case EHCI_PHY_KW:
default: printk(KERN_WARNING "Orion ehci -USB phy version isn't supported.\n");
}
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err) goto err4;
return 0;
err4:
usb_put_hcd(hcd);
err3:
if (!IS_ERR(clk)) {
clk_disable_unprepare(clk);
clk_put(clk);
}
iounmap(regs);
err2:
release_mem_region(res->start, resource_size(res));
err1:
dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), err);
return err;
}<sep>@@
expression ret,dev,v;
@@
- if (!dev.dma_mask)
- dev.dma_mask = &dev.coherent_dma_mask;
- ret = dma_set_coherent_mask(&dev, v);
+ ret = dma_coerce_mask_and_coherent(&dev, v);
<|end_of_text|> | 7,236 |
--- initial
+++ final
@@ -1,52 +1,51 @@
static int ehci_platform_probe(struct platform_device *dev) {
struct usb_hcd *hcd;
struct resource *res_mem;
struct usb_ehci_pdata *pdata;
int irq;
int err;
if (usb_disabled()) return -ENODEV;
/*
* use reasonable defaults so platforms don't have to provide these.
* with DT probing on ARM, none of these are set.
*/
if (!dev_get_platdata(&dev->dev)) dev->dev.platform_data = &ehci_platform_defaults;
- if (!dev->dev.dma_mask) dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
- err = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
+ err = dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
if (err) return err;
pdata = dev_get_platdata(&dev->dev);
irq = platform_get_irq(dev, 0);
if (irq < 0) {
dev_err(&dev->dev, "no irq provided");
return irq;
}
res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res_mem) {
dev_err(&dev->dev, "no memory resource provided");
return -ENXIO;
}
if (pdata->power_on) {
err = pdata->power_on(dev);
if (err < 0) return err;
}
hcd = usb_create_hcd(&ehci_platform_hc_driver, &dev->dev, dev_name(&dev->dev));
if (!hcd) {
err = -ENOMEM;
goto err_power;
}
hcd->rsrc_start = res_mem->start;
hcd->rsrc_len = resource_size(res_mem);
hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
goto err_put_hcd;
}
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err) goto err_put_hcd;
platform_set_drvdata(dev, hcd);
return err;
err_put_hcd:
usb_put_hcd(hcd);
err_power:
if (pdata->power_off) pdata->power_off(dev);
return err;
}<sep>@@
expression ret,dev,v;
@@
- if (!dev.dma_mask)
- dev.dma_mask = &dev.coherent_dma_mask;
- ret = dma_set_coherent_mask(&dev, v);
+ ret = dma_coerce_mask_and_coherent(&dev, v);
<|end_of_text|> | 7,237 |
--- initial
+++ final
@@ -1,98 +1,97 @@
static int s5p_ehci_probe(struct platform_device *pdev) {
struct s5p_ehci_platdata *pdata = dev_get_platdata(&pdev->dev);
struct s5p_ehci_hcd *s5p_ehci;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
struct resource *res;
struct usb_phy *phy;
int irq;
int err;
/*
* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we move to full device tree support this will vanish off.
*/
- if (!pdev->dev.dma_mask) pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) return err;
s5p_setup_vbus_gpio(pdev);
hcd = usb_create_hcd(&s5p_ehci_hc_driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
dev_err(&pdev->dev, "Unable to create HCD\n");
return -ENOMEM;
}
s5p_ehci = to_s5p_ehci(hcd);
if (of_device_is_compatible(pdev->dev.of_node, "samsung,exynos5440-ehci")) {
s5p_ehci->pdata = &empty_platdata;
goto skip_phy;
}
phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
if (IS_ERR(phy)) {
/* Fallback to pdata */
if (!pdata) {
usb_put_hcd(hcd);
dev_warn(&pdev->dev, "no platform data or transceiver defined\n");
return -EPROBE_DEFER;
} else {
s5p_ehci->pdata = pdata;
}
} else {
s5p_ehci->phy = phy;
s5p_ehci->otg = phy->otg;
}
skip_phy:
s5p_ehci->clk = devm_clk_get(&pdev->dev, "usbhost");
if (IS_ERR(s5p_ehci->clk)) {
dev_err(&pdev->dev, "Failed to get usbhost clock\n");
err = PTR_ERR(s5p_ehci->clk);
goto fail_clk;
}
err = clk_prepare_enable(s5p_ehci->clk);
if (err) goto fail_clk;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "Failed to get I/O memory\n");
err = -ENXIO;
goto fail_io;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
hcd->regs = devm_ioremap(&pdev->dev, res->start, hcd->rsrc_len);
if (!hcd->regs) {
dev_err(&pdev->dev, "Failed to remap I/O memory\n");
err = -ENOMEM;
goto fail_io;
}
irq = platform_get_irq(pdev, 0);
if (!irq) {
dev_err(&pdev->dev, "Failed to get IRQ\n");
err = -ENODEV;
goto fail_io;
}
if (s5p_ehci->otg) s5p_ehci->otg->set_host(s5p_ehci->otg, &hcd->self);
if (s5p_ehci->phy)
usb_phy_init(s5p_ehci->phy);
else if (s5p_ehci->pdata->phy_init)
s5p_ehci->pdata->phy_init(pdev, USB_PHY_TYPE_HOST);
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs;
/* DMA burst Enable */
writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs));
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err) {
dev_err(&pdev->dev, "Failed to add USB HCD\n");
goto fail_add_hcd;
}
platform_set_drvdata(pdev, hcd);
return 0;
fail_add_hcd:
if (s5p_ehci->phy)
usb_phy_shutdown(s5p_ehci->phy);
else if (s5p_ehci->pdata->phy_exit)
s5p_ehci->pdata->phy_exit(pdev, USB_PHY_TYPE_HOST);
fail_io:
clk_disable_unprepare(s5p_ehci->clk);
fail_clk:
usb_put_hcd(hcd);
return err;
}<sep>@@
expression ret,dev,v;
@@
- if (!dev.dma_mask)
- dev.dma_mask = &dev.coherent_dma_mask;
- ret = dma_set_coherent_mask(&dev, v);
+ ret = dma_coerce_mask_and_coherent(&dev, v);
<|end_of_text|> | 7,238 |
--- initial
+++ final
@@ -1,65 +1,64 @@
static int spear_ehci_hcd_drv_probe(struct platform_device *pdev) {
struct usb_hcd *hcd;
struct spear_ehci *sehci;
struct resource *res;
struct clk *usbh_clk;
const struct hc_driver *driver = &ehci_spear_hc_driver;
int irq, retval;
if (usb_disabled()) return -ENODEV;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
retval = irq;
goto fail;
}
/*
* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask) pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- retval = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) goto fail;
usbh_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(usbh_clk)) {
dev_err(&pdev->dev, "Error getting interface clock\n");
retval = PTR_ERR(usbh_clk);
goto fail;
}
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
retval = -ENOMEM;
goto fail;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
retval = -ENODEV;
goto err_put_hcd;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
if (!devm_request_mem_region(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len, driver->description)) {
retval = -EBUSY;
goto err_put_hcd;
}
hcd->regs = devm_ioremap(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len);
if (hcd->regs == NULL) {
dev_dbg(&pdev->dev, "error mapping memory\n");
retval = -ENOMEM;
goto err_put_hcd;
}
sehci = to_spear_ehci(hcd);
sehci->clk = usbh_clk;
/* registers start at offset 0x0 */
hcd_to_ehci(hcd)->caps = hcd->regs;
clk_prepare_enable(sehci->clk);
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval) goto err_stop_ehci;
return retval;
err_stop_ehci:
clk_disable_unprepare(sehci->clk);
err_put_hcd:
usb_put_hcd(hcd);
fail:
dev_err(&pdev->dev, "init fail, %d\n", retval);
return retval;
}<sep>@@
expression ret,dev,v;
@@
- if (!dev.dma_mask)
- dev.dma_mask = &dev.coherent_dma_mask;
- ret = dma_set_coherent_mask(&dev, v);
+ ret = dma_coerce_mask_and_coherent(&dev, v);
<|end_of_text|> | 7,239 |
--- initial
+++ final
@@ -1,108 +1,107 @@
static int tegra_ehci_probe(struct platform_device *pdev) {
const struct of_device_id *match;
const struct tegra_ehci_soc_config *soc_config;
struct resource *res;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
struct tegra_ehci_hcd *tegra;
int err = 0;
int irq;
struct usb_phy *u_phy;
match = of_match_device(tegra_ehci_of_match, &pdev->dev);
if (!match) {
dev_err(&pdev->dev, "Error: No device match found\n");
return -ENODEV;
}
soc_config = match->data;
/* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask) pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) return err;
hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
dev_err(&pdev->dev, "Unable to create HCD\n");
return -ENOMEM;
}
platform_set_drvdata(pdev, hcd);
ehci = hcd_to_ehci(hcd);
tegra = (struct tegra_ehci_hcd *)ehci->priv;
hcd->has_tt = 1;
tegra->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(tegra->clk)) {
dev_err(&pdev->dev, "Can't get ehci clock\n");
err = PTR_ERR(tegra->clk);
goto cleanup_hcd_create;
}
err = clk_prepare_enable(tegra->clk);
if (err) goto cleanup_clk_get;
tegra_periph_reset_assert(tegra->clk);
udelay(1);
tegra_periph_reset_deassert(tegra->clk);
u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
if (IS_ERR(u_phy)) {
err = PTR_ERR(u_phy);
goto cleanup_clk_en;
}
hcd->phy = u_phy;
tegra->needs_double_reset = of_property_read_bool(pdev->dev.of_node, "nvidia,needs-double-reset");
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "Failed to get I/O memory\n");
err = -ENXIO;
goto cleanup_clk_en;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
hcd->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!hcd->regs) {
dev_err(&pdev->dev, "Failed to remap I/O memory\n");
err = -ENOMEM;
goto cleanup_clk_en;
}
ehci->caps = hcd->regs + 0x100;
ehci->has_hostpc = soc_config->has_hostpc;
err = usb_phy_init(hcd->phy);
if (err) {
dev_err(&pdev->dev, "Failed to initialize phy\n");
goto cleanup_clk_en;
}
u_phy->otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg), GFP_KERNEL);
if (!u_phy->otg) {
dev_err(&pdev->dev, "Failed to alloc memory for otg\n");
err = -ENOMEM;
goto cleanup_phy;
}
u_phy->otg->host = hcd_to_bus(hcd);
err = usb_phy_set_suspend(hcd->phy, 0);
if (err) {
dev_err(&pdev->dev, "Failed to power on the phy\n");
goto cleanup_phy;
}
irq = platform_get_irq(pdev, 0);
if (!irq) {
dev_err(&pdev->dev, "Failed to get IRQ\n");
err = -ENODEV;
goto cleanup_phy;
}
otg_set_host(u_phy->otg, &hcd->self);
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err) {
dev_err(&pdev->dev, "Failed to add USB HCD\n");
goto cleanup_otg_set_host;
}
return err;
cleanup_otg_set_host:
otg_set_host(u_phy->otg, NULL);
cleanup_phy:
usb_phy_shutdown(hcd->phy);
cleanup_clk_en:
clk_disable_unprepare(tegra->clk);
cleanup_clk_get:
clk_put(tegra->clk);
cleanup_hcd_create:
usb_put_hcd(hcd);
return err;
}<sep>@@
expression ret,dev,v;
@@
- if (!dev.dma_mask)
- dev.dma_mask = &dev.coherent_dma_mask;
- ret = dma_set_coherent_mask(&dev, v);
+ ret = dma_coerce_mask_and_coherent(&dev, v);
<|end_of_text|> | 7,240 |
--- initial
+++ final
@@ -1,88 +1,87 @@
static int exynos_ohci_probe(struct platform_device *pdev) {
struct exynos4_ohci_platdata *pdata = dev_get_platdata(&pdev->dev);
struct exynos_ohci_hcd *exynos_ohci;
struct usb_hcd *hcd;
struct ohci_hcd *ohci;
struct resource *res;
struct usb_phy *phy;
int irq;
int err;
/*
* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we move to full device tree support this will vanish off.
*/
- if (!pdev->dev.dma_mask) pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) return err;
exynos_ohci = devm_kzalloc(&pdev->dev, sizeof(struct exynos_ohci_hcd), GFP_KERNEL);
if (!exynos_ohci) return -ENOMEM;
if (of_device_is_compatible(pdev->dev.of_node, "samsung,exynos5440-ohci")) goto skip_phy;
phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
if (IS_ERR(phy)) {
/* Fallback to pdata */
if (!pdata) {
dev_warn(&pdev->dev, "no platform data or transceiver defined\n");
return -EPROBE_DEFER;
} else {
exynos_ohci->pdata = pdata;
}
} else {
exynos_ohci->phy = phy;
exynos_ohci->otg = phy->otg;
}
skip_phy:
exynos_ohci->dev = &pdev->dev;
hcd = usb_create_hcd(&exynos_ohci_hc_driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
dev_err(&pdev->dev, "Unable to create HCD\n");
return -ENOMEM;
}
exynos_ohci->hcd = hcd;
exynos_ohci->clk = devm_clk_get(&pdev->dev, "usbhost");
if (IS_ERR(exynos_ohci->clk)) {
dev_err(&pdev->dev, "Failed to get usbhost clock\n");
err = PTR_ERR(exynos_ohci->clk);
goto fail_clk;
}
err = clk_prepare_enable(exynos_ohci->clk);
if (err) goto fail_clk;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "Failed to get I/O memory\n");
err = -ENXIO;
goto fail_io;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
hcd->regs = devm_ioremap(&pdev->dev, res->start, hcd->rsrc_len);
if (!hcd->regs) {
dev_err(&pdev->dev, "Failed to remap I/O memory\n");
err = -ENOMEM;
goto fail_io;
}
irq = platform_get_irq(pdev, 0);
if (!irq) {
dev_err(&pdev->dev, "Failed to get IRQ\n");
err = -ENODEV;
goto fail_io;
}
if (exynos_ohci->otg) exynos_ohci->otg->set_host(exynos_ohci->otg, &exynos_ohci->hcd->self);
exynos_ohci_phy_enable(exynos_ohci);
ohci = hcd_to_ohci(hcd);
ohci_hcd_init(ohci);
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err) {
dev_err(&pdev->dev, "Failed to add USB HCD\n");
goto fail_add_hcd;
}
platform_set_drvdata(pdev, exynos_ohci);
return 0;
fail_add_hcd:
exynos_ohci_phy_disable(exynos_ohci);
fail_io:
clk_disable_unprepare(exynos_ohci->clk);
fail_clk:
usb_put_hcd(hcd);
return err;
}<sep>@@
expression ret,dev,v;
@@
- if (!dev.dma_mask)
- dev.dma_mask = &dev.coherent_dma_mask;
- ret = dma_set_coherent_mask(&dev, v);
+ ret = dma_coerce_mask_and_coherent(&dev, v);
<|end_of_text|> | 7,241 |
--- initial
+++ final
@@ -1,115 +1,114 @@
static int usb_hcd_nxp_probe(struct platform_device *pdev) {
struct usb_hcd *hcd = 0;
struct ohci_hcd *ohci;
const struct hc_driver *driver = &ohci_nxp_hc_driver;
struct resource *res;
int ret = 0, irq;
struct device_node *isp1301_node;
if (pdev->dev.of_node) {
isp1301_node = of_parse_phandle(pdev->dev.of_node, "transceiver", 0);
} else {
isp1301_node = NULL;
}
isp1301_i2c_client = isp1301_get_client(isp1301_node);
if (!isp1301_i2c_client) { return -EPROBE_DEFER; }
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) goto fail_disable;
dev_dbg(&pdev->dev, "%s: " DRIVER_DESC " (nxp)\n", hcd_name);
if (usb_disabled()) {
dev_err(&pdev->dev, "USB is disabled\n");
ret = -ENODEV;
goto fail_disable;
}
/* Enable AHB slave USB clock, needed for further USB clock control */
__raw_writel(USB_SLAVE_HCLK_EN | PAD_CONTROL_LAST_DRIVEN, USB_CTRL);
/* Enable USB PLL */
usb_pll_clk = clk_get(&pdev->dev, "ck_pll5");
if (IS_ERR(usb_pll_clk)) {
dev_err(&pdev->dev, "failed to acquire USB PLL\n");
ret = PTR_ERR(usb_pll_clk);
goto fail_pll;
}
ret = clk_enable(usb_pll_clk);
if (ret < 0) {
dev_err(&pdev->dev, "failed to start USB PLL\n");
goto fail_pllen;
}
ret = clk_set_rate(usb_pll_clk, 48000);
if (ret < 0) {
dev_err(&pdev->dev, "failed to set USB clock rate\n");
goto fail_rate;
}
/* Enable USB device clock */
usb_dev_clk = clk_get(&pdev->dev, "ck_usbd");
if (IS_ERR(usb_dev_clk)) {
dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n");
ret = PTR_ERR(usb_dev_clk);
goto fail_dev;
}
ret = clk_enable(usb_dev_clk);
if (ret < 0) {
dev_err(&pdev->dev, "failed to start USB DEV Clock\n");
goto fail_deven;
}
/* Enable USB otg clocks */
usb_otg_clk = clk_get(&pdev->dev, "ck_usb_otg");
if (IS_ERR(usb_otg_clk)) {
dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n");
ret = PTR_ERR(usb_otg_clk);
goto fail_otg;
}
__raw_writel(__raw_readl(USB_CTRL) | USB_HOST_NEED_CLK_EN, USB_CTRL);
ret = clk_enable(usb_otg_clk);
if (ret < 0) {
dev_err(&pdev->dev, "failed to start USB DEV Clock\n");
goto fail_otgen;
}
isp1301_configure();
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
dev_err(&pdev->dev, "Failed to allocate HC buffer\n");
ret = -ENOMEM;
goto fail_hcd;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto fail_resource;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENXIO;
goto fail_resource;
}
nxp_start_hc();
platform_set_drvdata(pdev, hcd);
ohci = hcd_to_ohci(hcd);
ohci_hcd_init(ohci);
dev_info(&pdev->dev, "at 0x%p, irq %d\n", hcd->regs, hcd->irq);
ret = usb_add_hcd(hcd, irq, 0);
if (ret == 0) return ret;
nxp_stop_hc();
fail_resource:
usb_put_hcd(hcd);
fail_hcd:
clk_disable(usb_otg_clk);
fail_otgen:
clk_put(usb_otg_clk);
fail_otg:
clk_disable(usb_dev_clk);
fail_deven:
clk_put(usb_dev_clk);
fail_dev:
fail_rate:
clk_disable(usb_pll_clk);
fail_pllen:
clk_put(usb_pll_clk);
fail_pll:
fail_disable:
isp1301_i2c_client = NULL;
return ret;
}<sep>@@
expression ret,dev,v;
@@
- dev.dma_mask = &dev.coherent_dma_mask;
- ret = dma_set_coherent_mask(&dev, v);
+ ret = dma_coerce_mask_and_coherent(&dev, v);
<|end_of_text|> | 7,242 |
--- initial
+++ final
@@ -1,61 +1,60 @@
static int ohci_octeon_drv_probe(struct platform_device *pdev) {
struct usb_hcd *hcd;
struct ohci_hcd *ohci;
void *reg_base;
struct resource *res_mem;
int irq;
int ret;
if (usb_disabled()) return -ENODEV;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "No irq assigned\n");
return -ENODEV;
}
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res_mem == NULL) {
dev_err(&pdev->dev, "No register space assigned\n");
return -ENODEV;
}
/* Ohci is a 32-bit device. */
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) return ret;
hcd = usb_create_hcd(&ohci_octeon_hc_driver, &pdev->dev, "octeon");
if (!hcd) return -ENOMEM;
hcd->rsrc_start = res_mem->start;
hcd->rsrc_len = resource_size(res_mem);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, OCTEON_OHCI_HCD_NAME)) {
dev_err(&pdev->dev, "request_mem_region failed\n");
ret = -EBUSY;
goto err1;
}
reg_base = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!reg_base) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -ENOMEM;
goto err2;
}
ohci_octeon_hw_start();
hcd->regs = reg_base;
ohci = hcd_to_ohci(hcd);
/* Octeon OHCI matches CPU endianness. */
#ifdef __BIG_ENDIAN
ohci->flags |= OHCI_QUIRK_BE_MMIO;
#endif
ohci_hcd_init(ohci);
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret) {
dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
goto err3;
}
platform_set_drvdata(pdev, hcd);
return 0;
err3:
ohci_octeon_hw_stop();
iounmap(hcd->regs);
err2:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err1:
usb_put_hcd(hcd);
return ret;
}<sep>@@
expression ret,dev,v;
@@
- dev.dma_mask = &dev.coherent_dma_mask;
- ret = dma_set_coherent_mask(&dev, v);
+ ret = dma_coerce_mask_and_coherent(&dev, v);
<|end_of_text|> | 7,243 |
--- initial
+++ final
@@ -1,60 +1,59 @@
static int ohci_hcd_omap3_probe(struct platform_device *pdev) {
struct device *dev = &pdev->dev;
struct usb_hcd *hcd = NULL;
void __iomem *regs = NULL;
struct resource *res;
int ret;
int irq;
if (usb_disabled()) return -ENODEV;
if (!dev->parent) {
dev_err(dev, "Missing parent device\n");
return -ENODEV;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "OHCI irq failed\n");
return -ENODEV;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "UHH OHCI get resource failed\n");
return -ENOMEM;
}
regs = ioremap(res->start, resource_size(res));
if (!regs) {
dev_err(dev, "UHH OHCI ioremap failed\n");
return -ENOMEM;
}
/*
* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!dev->dma_mask) dev->dma_mask = &dev->coherent_dma_mask;
- ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) goto err_io;
ret = -ENODEV;
hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev, dev_name(dev));
if (!hcd) {
dev_err(dev, "usb_create_hcd failed\n");
goto err_io;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
hcd->regs = regs;
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
ohci_hcd_init(hcd_to_ohci(hcd));
ret = usb_add_hcd(hcd, irq, 0);
if (ret) {
dev_dbg(dev, "failed to add hcd with err %d\n", ret);
goto err_add_hcd;
}
return 0;
err_add_hcd:
pm_runtime_put_sync(dev);
usb_put_hcd(hcd);
err_io:
iounmap(regs);
return ret;
}<sep>@@
expression ret,dev,v;
@@
- if (!dev->dma_mask)
- dev->dma_mask = &dev->coherent_dma_mask;
- ret = dma_set_coherent_mask(dev, v);
+ ret = dma_coerce_mask_and_coherent(dev, v);
<|end_of_text|> | 7,244 |
--- initial
+++ final
@@ -1,62 +1,61 @@
static int spear_ohci_hcd_drv_probe(struct platform_device *pdev) {
const struct hc_driver *driver = &ohci_spear_hc_driver;
struct usb_hcd *hcd = NULL;
struct clk *usbh_clk;
struct spear_ohci *ohci_p;
struct resource *res;
int retval, irq;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
retval = irq;
goto fail;
}
/*
* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask) pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- retval = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) goto fail;
usbh_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(usbh_clk)) {
dev_err(&pdev->dev, "Error getting interface clock\n");
retval = PTR_ERR(usbh_clk);
goto fail;
}
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
retval = -ENOMEM;
goto fail;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
retval = -ENODEV;
goto err_put_hcd;
}
hcd->rsrc_start = pdev->resource[0].start;
hcd->rsrc_len = resource_size(res);
if (!devm_request_mem_region(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dev_dbg(&pdev->dev, "request_mem_region failed\n");
retval = -EBUSY;
goto err_put_hcd;
}
hcd->regs = devm_ioremap(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
dev_dbg(&pdev->dev, "ioremap failed\n");
retval = -ENOMEM;
goto err_put_hcd;
}
ohci_p = (struct spear_ohci *)hcd_to_ohci(hcd);
ohci_p->clk = usbh_clk;
spear_start_ohci(ohci_p);
ohci_hcd_init(hcd_to_ohci(hcd));
retval = usb_add_hcd(hcd, platform_get_irq(pdev, 0), 0);
if (retval == 0) return retval;
spear_stop_ohci(ohci_p);
err_put_hcd:
usb_put_hcd(hcd);
fail:
dev_err(&pdev->dev, "init fail, %d\n", retval);
return retval;
}<sep>@@
expression ret,dev,v;
@@
- if (!dev.dma_mask)
- dev.dma_mask = &dev.coherent_dma_mask;
- ret = dma_set_coherent_mask(&dev, v);
+ ret = dma_coerce_mask_and_coherent(&dev, v);
<|end_of_text|> | 7,245 |
--- initial
+++ final
@@ -1,43 +1,42 @@
static int uhci_hcd_platform_probe(struct platform_device *pdev) {
struct usb_hcd *hcd;
struct uhci_hcd *uhci;
struct resource *res;
int ret;
if (usb_disabled()) return -ENODEV;
/*
* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
* Once we have dma capability bindings this can go away.
*/
- if (!pdev->dev.dma_mask) pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) return ret;
hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev, pdev->name);
if (!hcd) return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
pr_err("%s: request_mem_region failed\n", __func__);
ret = -EBUSY;
goto err_rmr;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
pr_err("%s: ioremap failed\n", __func__);
ret = -ENOMEM;
goto err_irq;
}
uhci = hcd_to_uhci(hcd);
uhci->regs = hcd->regs;
ret = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_DISABLED | IRQF_SHARED);
if (ret) goto err_uhci;
return 0;
err_uhci:
iounmap(hcd->regs);
err_irq:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err_rmr:
usb_put_hcd(hcd);
return ret;
}<sep>@@
expression ret,dev,v;
@@
- if (!dev.dma_mask)
- dev.dma_mask = &dev.coherent_dma_mask;
- ret = dma_set_coherent_mask(&dev, v);
+ ret = dma_coerce_mask_and_coherent(&dev, v);
<|end_of_text|> | 7,246 |
--- initial
+++ final
@@ -1,8 +1,7 @@
static void hash_sock_destruct(struct sock *sk) {
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
- memzero_explicit(ctx->result, crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
- sock_kfree_s(sk, ctx->result, crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
+ sock_kzfree_s(sk, ctx->result, crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}<sep>@@
expression e1,e2,e3;
@@
- memzero_explicit(e2, e3);
- sock_kfree_s(e1, e2, e3);
+ sock_kzfree_s(e1, e2, e3);
<|end_of_text|> | 8,350 |
--- initial
+++ final
@@ -1,10 +1,9 @@
static void skcipher_sock_destruct(struct sock *sk) {
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
skcipher_free_sgl(sk);
- memzero_explicit(ctx->iv, crypto_ablkcipher_ivsize(tfm));
- sock_kfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
+ sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}<sep>@@
expression e1,e2,e3;
@@
- memzero_explicit(e2, e3);
- sock_kfree_s(e1, e2, e3);
+ sock_kzfree_s(e1, e2, e3);
<|end_of_text|> | 8,351 |
--- initial
+++ final
@@ -1,44 +1,44 @@
int lnet_connect(struct socket **sockp, lnet_nid_t peer_nid, __u32 local_ip, __u32 peer_ip, int peer_port) {
struct lnet_acceptor_connreq cr;
struct socket *sock;
int rc;
int port;
int fatal;
- CLASSERT(sizeof(cr) <= 16); /* not too big to be on the stack */
+ BUILD_BUG_ON(sizeof(cr) > 16); /* not too big to be on the stack */
for (port = LNET_ACCEPTOR_MAX_RESERVED_PORT; port >= LNET_ACCEPTOR_MIN_RESERVED_PORT; --port) {
/* Iterate through reserved ports. */
rc = lnet_sock_connect(&sock, &fatal, local_ip, port, peer_ip, peer_port);
if (rc) {
if (fatal) goto failed;
continue;
}
- CLASSERT(LNET_PROTO_ACCEPTOR_VERSION == 1);
+ BUILD_BUG_ON(LNET_PROTO_ACCEPTOR_VERSION != 1);
cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC;
cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION;
cr.acr_nid = peer_nid;
if (the_lnet.ln_testprotocompat) {
/* single-shot proto check */
lnet_net_lock(LNET_LOCK_EX);
if (the_lnet.ln_testprotocompat & 4) {
cr.acr_version++;
the_lnet.ln_testprotocompat &= ~4;
}
if (the_lnet.ln_testprotocompat & 8) {
cr.acr_magic = LNET_PROTO_MAGIC;
the_lnet.ln_testprotocompat &= ~8;
}
lnet_net_unlock(LNET_LOCK_EX);
}
rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout);
if (rc) goto failed_sock;
*sockp = sock;
return 0;
}
rc = -EADDRINUSE;
goto failed;
failed_sock:
sock_release(sock);
failed:
lnet_connect_console_error(rc, peer_nid, peer_ip, peer_port);
return rc;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
@@
expression e1,e2;
@@
- CLASSERT(e1 <= e2);
+ BUILD_BUG_ON(e1 > e2);
<|end_of_text|> | 8,412 |
--- initial
+++ final
@@ -1,14 +1,14 @@
void ptlrpc_init_xid(void) {
time64_t now = ktime_get_real_seconds();
spin_lock_init(&ptlrpc_last_xid_lock);
if (now < YEAR_2004) {
cfs_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
ptlrpc_last_xid >>= 2;
ptlrpc_last_xid |= (1ULL << 61);
} else {
ptlrpc_last_xid = (__u64)now << 20;
}
/* Always need to be aligned to a power-of-two for multi-bulk BRW */
- CLASSERT(((PTLRPC_BULK_OPS_COUNT - 1) & PTLRPC_BULK_OPS_COUNT) == 0);
+ BUILD_BUG_ON(((PTLRPC_BULK_OPS_COUNT - 1) & PTLRPC_BULK_OPS_COUNT) != 0);
ptlrpc_last_xid &= PTLRPC_BULK_OPS_MASK;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
<|end_of_text|> | 8,414 |
Subsets and Splits