This is the 4.9.219 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl6UI7sACgkQONu9yGCS aT5png//QOOP2eJhRXJ1fljGpHhS8TizKPG4HsvWYEvbJpiPgap4+K//MHHPQJBH c7C03STaMoFEwb2kmWIWmQWwUlRIRaG0ubuCGYLboncts0jWOnbuN3u51Z31V2/w LjJ3BKAt+EDGxltxV+KFE7Gp5DQ5bvN4PzlkNDpyF90QtjCZSDBpFR0c466lkUsy 26RYU3BG2RbuNDGPqgzhIsTvc4hpmiPM/pvkXbAHqjf7TDeDPZnQntfwvqjtSUVu fDey9pBNSkfam++29t14bYVeRIp1i73ASFZe7IOajb3+U5B+mUGD1efurDAAMjiE hJpw1p2SNidDntUfW53xF6xiSNDmxUpOQexWNgMQZZrLow0EObm4eRkXwJLgch18 BmtgHg2NKHfcosXP+yTzmwiyBOes0/q7EnIR6IeCWdYH+9TXwSy4BL+eMyJUZR7+ A1FEIt3+SuIee88N+b+rn4u94r4vvxzRBZ5flpX8TJDSkQ2FhAf4to7dXHA/BZkg +IKNaW/zvcgTRGROOzODVenbXcGseOseoNMXzp4PciAtZvWfGXo0LU82gD/SivS4 tkA+C/bBu5WBzPr0Pzl+fhlHn5maLpIIukNPOGsSASBh/d11BVE4951GzlhEFjgT km4bfo/85QtD43s68emrJUNoUE7RfKlulhlbN5Aa+Xg/N/DcTo8= =/6B3 -----END PGP SIGNATURE----- Merge 4.9.219 into android-4.9-q Changes in 4.9.219 l2tp: ensure sessions are freed after their PPPOL2TP socket l2tp: fix race between l2tp_session_delete() and l2tp_tunnel_closeall() drm/bochs: downgrade pci_request_region failure from error to warning ipv4: fix a RCU-list lock in fib_triestat_seq_show net, ip_tunnel: fix interface lookup with no key sctp: fix refcount bug in sctp_wfree sctp: fix possibly using a bad saddr with a given dst drm/etnaviv: replace MMU flush marker with flush sequence blk-mq: sync the update nr_hw_queues with blk_mq_queue_tag_busy_iter blk-mq: Allow blocking queue tag iter callbacks coresight: do not use the BIT() macro in the UAPI header net: dsa: tag_brcm: Fix skb->fwd_offload_mark location padata: always acquire cpu_hotplug_lock before pinst->lock mm: mempolicy: require at least one nodeid for MPOL_PREFERRED net: dsa: bcm_sf2: Ensure correct sub-node is parsed net: stmmac: dwmac1000: fix out-of-bounds mac address reg setting slcan: Don't transmit uninitialized stack data in padding net: phy: micrel: kszphy_resume(): add delay after genphy_resume() before accessing PHY registers random: always use batched entropy for get_random_u{32,64} tools/accounting/getdelays.c: fix netlink attribute length ASoC: jz4740-i2s: Fix divider written at incorrect offset in register IB/hfi1: Call kobject_put() when kobject_init_and_add() fails IB/hfi1: Fix memory leaks in sysfs registration and unregistration ceph: remove the extra slashes in the server path ceph: canonicalize server path in place Bluetooth: RFCOMM: fix ODEBUG bug in rfcomm_dev_ioctl RDMA/cm: Update num_paths in cma_resolve_iboe_route error flow clk: qcom: rcg: Return failure for RCG update drm/msm: stop abusing dma_map/unmap for cache arm64: Fix size of __early_cpu_boot_status usb: dwc3: don't set gadget->is_otg flag drm_dp_mst_topology: fix broken drm_dp_sideband_parse_remote_dpcd_read() drm/msm: Use the correct dma_sync calls in msm_gem Linux 4.9.219 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ic787d6aa9374864ce928ac1ef802077ac55ca232
This commit is contained in:
commit
6ee8be4d80
38 changed files with 232 additions and 107 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 218
|
||||
SUBLEVEL = 219
|
||||
EXTRAVERSION =
|
||||
NAME = Roaring Lionus
|
||||
|
||||
|
|
|
@ -651,7 +651,7 @@ ENTRY(__boot_cpu_mode)
|
|||
* with MMU turned off.
|
||||
*/
|
||||
ENTRY(__early_cpu_boot_status)
|
||||
.long 0
|
||||
.quad 0
|
||||
|
||||
.popsection
|
||||
|
||||
|
|
|
@ -336,6 +336,13 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
|
|||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
|
||||
* queue_hw_ctx after freeze the queue, so we use q_usage_counter
|
||||
* to avoid race with it.
|
||||
*/
|
||||
if (!percpu_ref_tryget(&q->q_usage_counter))
|
||||
return;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
struct blk_mq_tags *tags = hctx->tags;
|
||||
|
@ -351,7 +358,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
|
|||
bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
|
||||
bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
|
||||
}
|
||||
|
||||
blk_queue_exit(q);
|
||||
}
|
||||
|
||||
static unsigned int bt_unused_tags(const struct sbitmap_queue *bt)
|
||||
|
|
|
@ -2346,6 +2346,10 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
|
|||
|
||||
list_for_each_entry(q, &set->tag_list, tag_set_list)
|
||||
blk_mq_unfreeze_queue(q);
|
||||
/*
|
||||
* Sync with blk_mq_queue_tag_busy_iter.
|
||||
*/
|
||||
synchronize_rcu();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
|
||||
|
||||
|
|
|
@ -2119,8 +2119,8 @@ struct batched_entropy {
|
|||
|
||||
/*
|
||||
* Get a random word for internal kernel use only. The quality of the random
|
||||
* number is either as good as RDRAND or as good as /dev/urandom, with the
|
||||
* goal of being quite fast and not depleting entropy.
|
||||
* number is good as /dev/urandom, but there is no backtrack protection, with
|
||||
* the goal of being quite fast and not depleting entropy.
|
||||
*/
|
||||
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_long);
|
||||
unsigned long get_random_long(void)
|
||||
|
@ -2128,9 +2128,6 @@ unsigned long get_random_long(void)
|
|||
unsigned long ret;
|
||||
struct batched_entropy *batch;
|
||||
|
||||
if (arch_get_random_long(&ret))
|
||||
return ret;
|
||||
|
||||
batch = &get_cpu_var(batched_entropy_long);
|
||||
if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) {
|
||||
extract_crng((u8 *)batch->entropy_long);
|
||||
|
@ -2154,9 +2151,6 @@ unsigned int get_random_int(void)
|
|||
unsigned int ret;
|
||||
struct batched_entropy *batch;
|
||||
|
||||
if (arch_get_random_int(&ret))
|
||||
return ret;
|
||||
|
||||
batch = &get_cpu_var(batched_entropy_int);
|
||||
if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) {
|
||||
extract_crng((u8 *)batch->entropy_int);
|
||||
|
|
|
@ -107,7 +107,7 @@ static int update_config(struct clk_rcg2 *rcg)
|
|||
}
|
||||
|
||||
WARN(1, "%s: rcg didn't update its configuration.", name);
|
||||
return 0;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
|
||||
|
|
|
@ -97,10 +97,8 @@ int bochs_hw_init(struct drm_device *dev, uint32_t flags)
|
|||
size = min(size, mem);
|
||||
}
|
||||
|
||||
if (pci_request_region(pdev, 0, "bochs-drm") != 0) {
|
||||
DRM_ERROR("Cannot request framebuffer\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
if (pci_request_region(pdev, 0, "bochs-drm") != 0)
|
||||
DRM_WARN("Cannot request framebuffer, boot fb still active?\n");
|
||||
|
||||
bochs->fb_map = ioremap(addr, size);
|
||||
if (bochs->fb_map == NULL) {
|
||||
|
|
|
@ -431,6 +431,7 @@ static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx
|
|||
if (idx > raw->curlen)
|
||||
goto fail_len;
|
||||
repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
|
||||
idx++;
|
||||
if (idx > raw->curlen)
|
||||
goto fail_len;
|
||||
|
||||
|
|
|
@ -257,6 +257,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
|
|||
unsigned int waitlink_offset = buffer->user_size - 16;
|
||||
u32 return_target, return_dwords;
|
||||
u32 link_target, link_dwords;
|
||||
unsigned int new_flush_seq = READ_ONCE(gpu->mmu->flush_seq);
|
||||
bool need_flush = gpu->flush_seq != new_flush_seq;
|
||||
|
||||
if (drm_debug & DRM_UT_DRIVER)
|
||||
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
|
||||
|
@ -269,14 +271,14 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
|
|||
* need to append a mmu flush load state, followed by a new
|
||||
* link to this buffer - a total of four additional words.
|
||||
*/
|
||||
if (gpu->mmu->need_flush || gpu->switch_context) {
|
||||
if (need_flush || gpu->switch_context) {
|
||||
u32 target, extra_dwords;
|
||||
|
||||
/* link command */
|
||||
extra_dwords = 1;
|
||||
|
||||
/* flush command */
|
||||
if (gpu->mmu->need_flush) {
|
||||
if (need_flush) {
|
||||
if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
|
||||
extra_dwords += 1;
|
||||
else
|
||||
|
@ -289,7 +291,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
|
|||
|
||||
target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
|
||||
|
||||
if (gpu->mmu->need_flush) {
|
||||
if (need_flush) {
|
||||
/* Add the MMU flush */
|
||||
if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
|
||||
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
|
||||
|
@ -309,7 +311,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
|
|||
SYNC_RECIPIENT_PE);
|
||||
}
|
||||
|
||||
gpu->mmu->need_flush = false;
|
||||
gpu->flush_seq = new_flush_seq;
|
||||
}
|
||||
|
||||
if (gpu->switch_context) {
|
||||
|
|
|
@ -1313,7 +1313,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
|
|||
gpu->active_fence = submit->fence;
|
||||
|
||||
if (gpu->lastctx != cmdbuf->ctx) {
|
||||
gpu->mmu->need_flush = true;
|
||||
gpu->mmu->flush_seq++;
|
||||
gpu->switch_context = true;
|
||||
gpu->lastctx = cmdbuf->ctx;
|
||||
}
|
||||
|
|
|
@ -135,6 +135,7 @@ struct etnaviv_gpu {
|
|||
int irq;
|
||||
|
||||
struct etnaviv_iommu *mmu;
|
||||
unsigned int flush_seq;
|
||||
|
||||
/* Power Control: */
|
||||
struct clk *clk_bus;
|
||||
|
|
|
@ -134,7 +134,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
|
|||
*/
|
||||
if (mmu->last_iova) {
|
||||
mmu->last_iova = 0;
|
||||
mmu->need_flush = true;
|
||||
mmu->flush_seq++;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -197,7 +197,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
|
|||
* associated commit requesting this mapping, and retry the
|
||||
* allocation one more time.
|
||||
*/
|
||||
mmu->need_flush = true;
|
||||
mmu->flush_seq++;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -354,7 +354,7 @@ u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
|
|||
* that the FE MMU prefetch won't load invalid entries.
|
||||
*/
|
||||
mmu->last_iova = buf->vram_node.start + buf->size + SZ_64K;
|
||||
gpu->mmu->need_flush = true;
|
||||
mmu->flush_seq++;
|
||||
mutex_unlock(&mmu->lock);
|
||||
|
||||
return (u32)buf->vram_node.start;
|
||||
|
|
|
@ -44,7 +44,7 @@ struct etnaviv_iommu {
|
|||
struct list_head mappings;
|
||||
struct drm_mm mm;
|
||||
u32 last_iova;
|
||||
bool need_flush;
|
||||
unsigned int flush_seq;
|
||||
};
|
||||
|
||||
struct etnaviv_gem_object;
|
||||
|
|
|
@ -40,6 +40,46 @@ static bool use_pages(struct drm_gem_object *obj)
|
|||
return !msm_obj->vram_node;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cache sync.. this is a bit over-complicated, to fit dma-mapping
|
||||
* API. Really GPU cache is out of scope here (handled on cmdstream)
|
||||
* and all we need to do is invalidate newly allocated pages before
|
||||
* mapping to CPU as uncached/writecombine.
|
||||
*
|
||||
* On top of this, we have the added headache, that depending on
|
||||
* display generation, the display's iommu may be wired up to either
|
||||
* the toplevel drm device (mdss), or to the mdp sub-node, meaning
|
||||
* that here we either have dma-direct or iommu ops.
|
||||
*
|
||||
* Let this be a cautionary tail of abstraction gone wrong.
|
||||
*/
|
||||
|
||||
static void sync_for_device(struct msm_gem_object *msm_obj)
|
||||
{
|
||||
struct device *dev = msm_obj->base.dev->dev;
|
||||
|
||||
if (get_dma_ops(dev)) {
|
||||
dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
|
||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
||||
} else {
|
||||
dma_map_sg(dev, msm_obj->sgt->sgl,
|
||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
||||
}
|
||||
}
|
||||
|
||||
static void sync_for_cpu(struct msm_gem_object *msm_obj)
|
||||
{
|
||||
struct device *dev = msm_obj->base.dev->dev;
|
||||
|
||||
if (get_dma_ops(dev)) {
|
||||
dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
|
||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
||||
} else {
|
||||
dma_unmap_sg(dev, msm_obj->sgt->sgl,
|
||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
||||
}
|
||||
}
|
||||
|
||||
/* allocate pages from VRAM carveout, used when no IOMMU: */
|
||||
static struct page **get_pages_vram(struct drm_gem_object *obj,
|
||||
int npages)
|
||||
|
@ -106,8 +146,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
|
|||
* because display controller, GPU, etc. are not coherent:
|
||||
*/
|
||||
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
|
||||
dma_map_sg(dev->dev, msm_obj->sgt->sgl,
|
||||
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
|
||||
sync_for_device(msm_obj);
|
||||
}
|
||||
|
||||
return msm_obj->pages;
|
||||
|
@ -124,9 +163,7 @@ static void put_pages(struct drm_gem_object *obj)
|
|||
* GPU, etc. are not coherent:
|
||||
*/
|
||||
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
|
||||
dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
|
||||
msm_obj->sgt->nents,
|
||||
DMA_BIDIRECTIONAL);
|
||||
sync_for_cpu(msm_obj);
|
||||
|
||||
sg_free_table(msm_obj->sgt);
|
||||
kfree(msm_obj->sgt);
|
||||
|
|
|
@ -2568,6 +2568,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
|
|||
err2:
|
||||
kfree(route->path_rec);
|
||||
route->path_rec = NULL;
|
||||
route->num_paths = 0;
|
||||
err1:
|
||||
kfree(work);
|
||||
return ret;
|
||||
|
|
|
@ -670,7 +670,11 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
|
|||
dd_dev_err(dd,
|
||||
"Skipping sc2vl sysfs info, (err %d) port %u\n",
|
||||
ret, port_num);
|
||||
goto bail;
|
||||
/*
|
||||
* Based on the documentation for kobject_init_and_add(), the
|
||||
* caller should call kobject_put even if this call fails.
|
||||
*/
|
||||
goto bail_sc2vl;
|
||||
}
|
||||
kobject_uevent(&ppd->sc2vl_kobj, KOBJ_ADD);
|
||||
|
||||
|
@ -680,7 +684,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
|
|||
dd_dev_err(dd,
|
||||
"Skipping sl2sc sysfs info, (err %d) port %u\n",
|
||||
ret, port_num);
|
||||
goto bail_sc2vl;
|
||||
goto bail_sl2sc;
|
||||
}
|
||||
kobject_uevent(&ppd->sl2sc_kobj, KOBJ_ADD);
|
||||
|
||||
|
@ -690,7 +694,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
|
|||
dd_dev_err(dd,
|
||||
"Skipping vl2mtu sysfs info, (err %d) port %u\n",
|
||||
ret, port_num);
|
||||
goto bail_sl2sc;
|
||||
goto bail_vl2mtu;
|
||||
}
|
||||
kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD);
|
||||
|
||||
|
@ -700,7 +704,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
|
|||
dd_dev_err(dd,
|
||||
"Skipping Congestion Control sysfs info, (err %d) port %u\n",
|
||||
ret, port_num);
|
||||
goto bail_vl2mtu;
|
||||
goto bail_cc;
|
||||
}
|
||||
|
||||
kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
|
||||
|
@ -738,7 +742,6 @@ bail_sl2sc:
|
|||
kobject_put(&ppd->sl2sc_kobj);
|
||||
bail_sc2vl:
|
||||
kobject_put(&ppd->sc2vl_kobj);
|
||||
bail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -858,8 +861,13 @@ bail:
|
|||
for (i = 0; i < ARRAY_SIZE(hfi1_attributes); ++i)
|
||||
device_remove_file(&dev->dev, hfi1_attributes[i]);
|
||||
|
||||
for (i = 0; i < dd->num_sdma; i++)
|
||||
kobject_del(&dd->per_sdma[i].kobj);
|
||||
/*
|
||||
* The function kobject_put() will call kobject_del() if the kobject
|
||||
* has been added successfully. The sysfs files created under the
|
||||
* kobject directory will also be removed during the process.
|
||||
*/
|
||||
for (; i >= 0; i--)
|
||||
kobject_put(&dd->per_sdma[i].kobj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -872,6 +880,10 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd)
|
|||
struct hfi1_pportdata *ppd;
|
||||
int i;
|
||||
|
||||
/* Unwind operations in hfi1_verbs_register_sysfs() */
|
||||
for (i = 0; i < dd->num_sdma; i++)
|
||||
kobject_put(&dd->per_sdma[i].kobj);
|
||||
|
||||
for (i = 0; i < dd->num_pports; i++) {
|
||||
ppd = &dd->pport[i];
|
||||
|
||||
|
|
|
@ -147,7 +147,7 @@ static void slc_bump(struct slcan *sl)
|
|||
u32 tmpid;
|
||||
char *cmd = sl->rbuff;
|
||||
|
||||
cf.can_id = 0;
|
||||
memset(&cf, 0, sizeof(cf));
|
||||
|
||||
switch (*cmd) {
|
||||
case 'r':
|
||||
|
@ -186,8 +186,6 @@ static void slc_bump(struct slcan *sl)
|
|||
else
|
||||
return;
|
||||
|
||||
*(u64 *) (&cf.data) = 0; /* clear payload */
|
||||
|
||||
/* RTR frames may have a dlc > 0 but they never have any data bytes */
|
||||
if (!(cf.can_id & CAN_RTR_FLAG)) {
|
||||
for (i = 0; i < cf.can_dlc; i++) {
|
||||
|
|
|
@ -976,6 +976,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
|
|||
struct device_node *dn = pdev->dev.of_node;
|
||||
struct b53_platform_data *pdata;
|
||||
struct dsa_switch_ops *ops;
|
||||
struct device_node *ports;
|
||||
struct bcm_sf2_priv *priv;
|
||||
struct b53_device *dev;
|
||||
struct dsa_switch *ds;
|
||||
|
@ -1038,7 +1039,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
|
|||
spin_lock_init(&priv->indir_lock);
|
||||
mutex_init(&priv->stats_mutex);
|
||||
|
||||
bcm_sf2_identify_ports(priv, dn->child);
|
||||
ports = of_find_node_by_name(dn, "ports");
|
||||
if (ports) {
|
||||
bcm_sf2_identify_ports(priv, ports);
|
||||
of_node_put(ports);
|
||||
}
|
||||
|
||||
priv->irq0 = irq_of_parse_and_map(dn, 0);
|
||||
priv->irq1 = irq_of_parse_and_map(dn, 1);
|
||||
|
|
|
@ -214,7 +214,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
|
|||
reg++;
|
||||
}
|
||||
|
||||
while (reg <= perfect_addr_number) {
|
||||
while (reg < perfect_addr_number) {
|
||||
writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
|
||||
writel(0, ioaddr + GMAC_ADDR_LOW(reg));
|
||||
reg++;
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <linux/micrel_phy.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
/* Operation Mode Strap Override */
|
||||
#define MII_KSZPHY_OMSO 0x16
|
||||
|
@ -728,6 +729,12 @@ static int kszphy_resume(struct phy_device *phydev)
|
|||
{
|
||||
genphy_resume(phydev);
|
||||
|
||||
/* After switching from power-down to normal mode, an internal global
|
||||
* reset is automatically generated. Wait a minimum of 1 ms before
|
||||
* read/write access to the PHY registers.
|
||||
*/
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
/* Enable PHY Interrupts */
|
||||
if (phy_interrupt_is_valid(phydev)) {
|
||||
phydev->interrupts = PHY_INTERRUPT_ENABLED;
|
||||
|
|
|
@ -2996,7 +2996,6 @@ int dwc3_gadget_init(struct dwc3 *dwc)
|
|||
dwc->gadget.speed = USB_SPEED_UNKNOWN;
|
||||
dwc->gadget.sg_supported = true;
|
||||
dwc->gadget.name = "dwc3-gadget";
|
||||
dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG;
|
||||
|
||||
/*
|
||||
* FIXME We might be setting max_speed to <SUPER, however versions
|
||||
|
|
|
@ -85,7 +85,6 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int ceph_sync_fs(struct super_block *sb, int wait)
|
||||
{
|
||||
struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
|
||||
|
@ -178,6 +177,26 @@ static match_table_t fsopt_tokens = {
|
|||
{-1, NULL}
|
||||
};
|
||||
|
||||
/*
|
||||
* Remove adjacent slashes and then the trailing slash, unless it is
|
||||
* the only remaining character.
|
||||
*
|
||||
* E.g. "//dir1////dir2///" --> "/dir1/dir2", "///" --> "/".
|
||||
*/
|
||||
static void canonicalize_path(char *path)
|
||||
{
|
||||
int i, j = 0;
|
||||
|
||||
for (i = 0; path[i] != '\0'; i++) {
|
||||
if (path[i] != '/' || j < 1 || path[j - 1] != '/')
|
||||
path[j++] = path[i];
|
||||
}
|
||||
|
||||
if (j > 1 && path[j - 1] == '/')
|
||||
j--;
|
||||
path[j] = '\0';
|
||||
}
|
||||
|
||||
static int parse_fsopt_token(char *c, void *private)
|
||||
{
|
||||
struct ceph_mount_options *fsopt = private;
|
||||
|
@ -337,6 +356,7 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
|
|||
ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -396,13 +416,17 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt,
|
|||
*/
|
||||
dev_name_end = strchr(dev_name, '/');
|
||||
if (dev_name_end) {
|
||||
if (strlen(dev_name_end) > 1) {
|
||||
fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
|
||||
if (!fsopt->server_path) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
* The server_path will include the whole chars from userland
|
||||
* including the leading '/'.
|
||||
*/
|
||||
fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
|
||||
if (!fsopt->server_path) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
canonicalize_path(fsopt->server_path);
|
||||
} else {
|
||||
dev_name_end = dev_name + strlen(dev_name);
|
||||
}
|
||||
|
@ -725,7 +749,6 @@ static void destroy_caches(void)
|
|||
ceph_fscache_unregister();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* ceph_umount_begin - initiate forced umount. Tear down down the
|
||||
* mount, skipping steps that may hang while waiting for server(s).
|
||||
|
@ -812,9 +835,6 @@ out:
|
|||
return root;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* mount: join the ceph cluster, and open root directory.
|
||||
*/
|
||||
|
@ -828,18 +848,14 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
|
|||
mutex_lock(&fsc->client->mount_mutex);
|
||||
|
||||
if (!fsc->sb->s_root) {
|
||||
const char *path;
|
||||
const char *path = fsc->mount_options->server_path ?
|
||||
fsc->mount_options->server_path + 1 : "";
|
||||
|
||||
err = __ceph_open_session(fsc->client, started);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
if (!fsc->mount_options->server_path) {
|
||||
path = "";
|
||||
dout("mount opening path \\t\n");
|
||||
} else {
|
||||
path = fsc->mount_options->server_path + 1;
|
||||
dout("mount opening path %s\n", path);
|
||||
}
|
||||
dout("mount opening path '%s'\n", path);
|
||||
|
||||
err = ceph_fs_debugfs_init(fsc);
|
||||
if (err < 0)
|
||||
|
|
|
@ -70,7 +70,7 @@ struct ceph_mount_options {
|
|||
|
||||
char *snapdir_name; /* default ".snap" */
|
||||
char *mds_namespace; /* default NULL */
|
||||
char *server_path; /* default "/" */
|
||||
char *server_path; /* default NULL (means "/") */
|
||||
};
|
||||
|
||||
struct ceph_fs_client {
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
#ifndef __UAPI_CORESIGHT_STM_H_
|
||||
#define __UAPI_CORESIGHT_STM_H_
|
||||
|
||||
#define STM_FLAG_TIMESTAMPED BIT(3)
|
||||
#define STM_FLAG_GUARANTEED BIT(7)
|
||||
#include <linux/const.h>
|
||||
|
||||
#define STM_FLAG_TIMESTAMPED _BITUL(3)
|
||||
#define STM_FLAG_GUARANTEED _BITUL(7)
|
||||
|
||||
/*
|
||||
* The CoreSight STM supports guaranteed and invariant timing
|
||||
|
|
|
@ -614,8 +614,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
|
|||
struct cpumask *serial_mask, *parallel_mask;
|
||||
int err = -EINVAL;
|
||||
|
||||
mutex_lock(&pinst->lock);
|
||||
get_online_cpus();
|
||||
mutex_lock(&pinst->lock);
|
||||
|
||||
switch (cpumask_type) {
|
||||
case PADATA_CPU_PARALLEL:
|
||||
|
@ -633,8 +633,8 @@ int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
|
|||
err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
|
||||
|
||||
out:
|
||||
put_online_cpus();
|
||||
mutex_unlock(&pinst->lock);
|
||||
put_online_cpus();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -2769,7 +2769,9 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
|
|||
switch (mode) {
|
||||
case MPOL_PREFERRED:
|
||||
/*
|
||||
* Insist on a nodelist of one node only
|
||||
* Insist on a nodelist of one node only, although later
|
||||
* we use first_node(nodes) to grab a single node, so here
|
||||
* nodelist (or nodes) cannot be empty.
|
||||
*/
|
||||
if (nodelist) {
|
||||
char *rest = nodelist;
|
||||
|
@ -2777,6 +2779,8 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
|
|||
rest++;
|
||||
if (*rest)
|
||||
goto out;
|
||||
if (nodes_empty(nodes))
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
case MPOL_INTERLEAVE:
|
||||
|
|
|
@ -413,10 +413,8 @@ static int __rfcomm_create_dev(struct sock *sk, void __user *arg)
|
|||
dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel);
|
||||
if (IS_ERR(dlc))
|
||||
return PTR_ERR(dlc);
|
||||
else if (dlc) {
|
||||
rfcomm_dlc_put(dlc);
|
||||
if (dlc)
|
||||
return -EBUSY;
|
||||
}
|
||||
dlc = rfcomm_dlc_alloc(GFP_KERNEL);
|
||||
if (!dlc)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -84,8 +84,6 @@ static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev
|
|||
brcm_tag[2] = BRCM_IG_DSTMAP2_MASK;
|
||||
brcm_tag[3] = (1 << p->port) & BRCM_IG_DSTMAP1_MASK;
|
||||
|
||||
skb->offload_fwd_mark = 1;
|
||||
|
||||
return skb;
|
||||
|
||||
out_free:
|
||||
|
@ -148,6 +146,8 @@ static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
|
|||
skb->dev->stats.rx_packets++;
|
||||
skb->dev->stats.rx_bytes += skb->len;
|
||||
|
||||
skb->offload_fwd_mark = 1;
|
||||
|
||||
netif_receive_skb(skb);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -2256,6 +2256,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
|
|||
" %Zd bytes, size of tnode: %Zd bytes.\n",
|
||||
LEAF_SIZE, TNODE_SIZE(0));
|
||||
|
||||
rcu_read_lock();
|
||||
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
|
||||
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
|
||||
struct fib_table *tb;
|
||||
|
@ -2275,7 +2276,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v)
|
|||
trie_show_usage(seq, t->stats);
|
||||
#endif
|
||||
}
|
||||
cond_resched_rcu();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -155,11 +155,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
|
|||
cand = t;
|
||||
}
|
||||
|
||||
if (flags & TUNNEL_NO_KEY)
|
||||
goto skip_key_lookup;
|
||||
|
||||
hlist_for_each_entry_rcu(t, head, hash_node) {
|
||||
if (t->parms.i_key != key ||
|
||||
if ((!(flags & TUNNEL_NO_KEY) && t->parms.i_key != key) ||
|
||||
t->parms.iph.saddr != 0 ||
|
||||
t->parms.iph.daddr != 0 ||
|
||||
!(t->dev->flags & IFF_UP))
|
||||
|
@ -171,7 +168,6 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
|
|||
cand = t;
|
||||
}
|
||||
|
||||
skip_key_lookup:
|
||||
if (cand)
|
||||
return cand;
|
||||
|
||||
|
|
|
@ -1343,6 +1343,9 @@ again:
|
|||
|
||||
hlist_del_init(&session->hlist);
|
||||
|
||||
if (test_and_set_bit(0, &session->dead))
|
||||
goto again;
|
||||
|
||||
if (session->ref != NULL)
|
||||
(*session->ref)(session);
|
||||
|
||||
|
@ -1791,6 +1794,9 @@ EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
|
|||
*/
|
||||
int l2tp_session_delete(struct l2tp_session *session)
|
||||
{
|
||||
if (test_and_set_bit(0, &session->dead))
|
||||
return 0;
|
||||
|
||||
if (session->ref)
|
||||
(*session->ref)(session);
|
||||
__l2tp_session_unhash(session);
|
||||
|
|
|
@ -84,6 +84,7 @@ struct l2tp_session_cfg {
|
|||
struct l2tp_session {
|
||||
int magic; /* should be
|
||||
* L2TP_SESSION_MAGIC */
|
||||
long dead;
|
||||
|
||||
struct l2tp_tunnel *tunnel; /* back pointer to tunnel
|
||||
* context */
|
||||
|
|
|
@ -429,11 +429,11 @@ static void pppol2tp_session_close(struct l2tp_session *session)
|
|||
|
||||
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
|
||||
|
||||
if (sock) {
|
||||
if (sock)
|
||||
inet_shutdown(sock, SEND_SHUTDOWN);
|
||||
/* Don't let the session go away before our socket does */
|
||||
l2tp_session_inc_refcount(session);
|
||||
}
|
||||
|
||||
/* Don't let the session go away before our socket does */
|
||||
l2tp_session_inc_refcount(session);
|
||||
}
|
||||
|
||||
/* Really kill the session socket. (Called from sock_put() if
|
||||
|
|
|
@ -235,7 +235,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||
{
|
||||
struct sctp_association *asoc = t->asoc;
|
||||
struct dst_entry *dst = NULL;
|
||||
struct flowi6 *fl6 = &fl->u.ip6;
|
||||
struct flowi _fl;
|
||||
struct flowi6 *fl6 = &_fl.u.ip6;
|
||||
struct sctp_bind_addr *bp;
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
struct sctp_sockaddr_entry *laddr;
|
||||
|
@ -245,7 +246,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||
__u8 matchlen = 0;
|
||||
sctp_scope_t scope;
|
||||
|
||||
memset(fl6, 0, sizeof(struct flowi6));
|
||||
memset(&_fl, 0, sizeof(_fl));
|
||||
fl6->daddr = daddr->v6.sin6_addr;
|
||||
fl6->fl6_dport = daddr->v6.sin6_port;
|
||||
fl6->flowi6_proto = IPPROTO_SCTP;
|
||||
|
@ -269,8 +270,11 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||
rcu_read_unlock();
|
||||
|
||||
dst = ip6_dst_lookup_flow(sk, fl6, final_p);
|
||||
if (!asoc || saddr)
|
||||
if (!asoc || saddr) {
|
||||
t->dst = dst;
|
||||
memcpy(fl, &_fl, sizeof(_fl));
|
||||
goto out;
|
||||
}
|
||||
|
||||
bp = &asoc->base.bind_addr;
|
||||
scope = sctp_scope(daddr);
|
||||
|
@ -293,6 +297,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||
if ((laddr->a.sa.sa_family == AF_INET6) &&
|
||||
(sctp_v6_cmp_addr(&dst_saddr, &laddr->a))) {
|
||||
rcu_read_unlock();
|
||||
t->dst = dst;
|
||||
memcpy(fl, &_fl, sizeof(_fl));
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -331,6 +337,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||
if (!IS_ERR_OR_NULL(dst))
|
||||
dst_release(dst);
|
||||
dst = bdst;
|
||||
t->dst = dst;
|
||||
memcpy(fl, &_fl, sizeof(_fl));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -344,6 +352,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||
dst_release(dst);
|
||||
dst = bdst;
|
||||
matchlen = bmatchlen;
|
||||
t->dst = dst;
|
||||
memcpy(fl, &_fl, sizeof(_fl));
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -352,14 +362,12 @@ out:
|
|||
struct rt6_info *rt;
|
||||
|
||||
rt = (struct rt6_info *)dst;
|
||||
t->dst = dst;
|
||||
t->dst_cookie = rt6_get_cookie(rt);
|
||||
pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n",
|
||||
&rt->rt6i_dst.addr, rt->rt6i_dst.plen,
|
||||
&fl6->saddr);
|
||||
&fl->u.ip6.saddr);
|
||||
} else {
|
||||
t->dst = NULL;
|
||||
|
||||
pr_debug("no route\n");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -430,14 +430,15 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||
{
|
||||
struct sctp_association *asoc = t->asoc;
|
||||
struct rtable *rt;
|
||||
struct flowi4 *fl4 = &fl->u.ip4;
|
||||
struct flowi _fl;
|
||||
struct flowi4 *fl4 = &_fl.u.ip4;
|
||||
struct sctp_bind_addr *bp;
|
||||
struct sctp_sockaddr_entry *laddr;
|
||||
struct dst_entry *dst = NULL;
|
||||
union sctp_addr *daddr = &t->ipaddr;
|
||||
union sctp_addr dst_saddr;
|
||||
|
||||
memset(fl4, 0x0, sizeof(struct flowi4));
|
||||
memset(&_fl, 0x0, sizeof(_fl));
|
||||
fl4->daddr = daddr->v4.sin_addr.s_addr;
|
||||
fl4->fl4_dport = daddr->v4.sin_port;
|
||||
fl4->flowi4_proto = IPPROTO_SCTP;
|
||||
|
@ -455,8 +456,11 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||
&fl4->saddr);
|
||||
|
||||
rt = ip_route_output_key(sock_net(sk), fl4);
|
||||
if (!IS_ERR(rt))
|
||||
if (!IS_ERR(rt)) {
|
||||
dst = &rt->dst;
|
||||
t->dst = dst;
|
||||
memcpy(fl, &_fl, sizeof(_fl));
|
||||
}
|
||||
|
||||
/* If there is no association or if a source address is passed, no
|
||||
* more validation is required.
|
||||
|
@ -519,27 +523,33 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
|
|||
odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
|
||||
false);
|
||||
if (!odev || odev->ifindex != fl4->flowi4_oif) {
|
||||
if (!dst)
|
||||
if (!dst) {
|
||||
dst = &rt->dst;
|
||||
else
|
||||
t->dst = dst;
|
||||
memcpy(fl, &_fl, sizeof(_fl));
|
||||
} else {
|
||||
dst_release(&rt->dst);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
dst_release(dst);
|
||||
dst = &rt->dst;
|
||||
t->dst = dst;
|
||||
memcpy(fl, &_fl, sizeof(_fl));
|
||||
break;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
t->dst = dst;
|
||||
if (dst)
|
||||
if (dst) {
|
||||
pr_debug("rt_dst:%pI4, rt_src:%pI4\n",
|
||||
&fl4->daddr, &fl4->saddr);
|
||||
else
|
||||
&fl->u.ip4.daddr, &fl->u.ip4.saddr);
|
||||
} else {
|
||||
t->dst = NULL;
|
||||
pr_debug("no route\n");
|
||||
}
|
||||
}
|
||||
|
||||
/* For v4, the source address is cached in the route entry(dst). So no need
|
||||
|
|
|
@ -173,29 +173,44 @@ static void sctp_clear_owner_w(struct sctp_chunk *chunk)
|
|||
skb_orphan(chunk->skb);
|
||||
}
|
||||
|
||||
#define traverse_and_process() \
|
||||
do { \
|
||||
msg = chunk->msg; \
|
||||
if (msg == prev_msg) \
|
||||
continue; \
|
||||
list_for_each_entry(c, &msg->chunks, frag_list) { \
|
||||
if ((clear && asoc->base.sk == c->skb->sk) || \
|
||||
(!clear && asoc->base.sk != c->skb->sk)) \
|
||||
cb(c); \
|
||||
} \
|
||||
prev_msg = msg; \
|
||||
} while (0)
|
||||
|
||||
static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
|
||||
bool clear,
|
||||
void (*cb)(struct sctp_chunk *))
|
||||
|
||||
{
|
||||
struct sctp_datamsg *msg, *prev_msg = NULL;
|
||||
struct sctp_outq *q = &asoc->outqueue;
|
||||
struct sctp_chunk *chunk, *c;
|
||||
struct sctp_transport *t;
|
||||
struct sctp_chunk *chunk;
|
||||
|
||||
list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
|
||||
list_for_each_entry(chunk, &t->transmitted, transmitted_list)
|
||||
cb(chunk);
|
||||
traverse_and_process();
|
||||
|
||||
list_for_each_entry(chunk, &q->retransmit, transmitted_list)
|
||||
cb(chunk);
|
||||
traverse_and_process();
|
||||
|
||||
list_for_each_entry(chunk, &q->sacked, transmitted_list)
|
||||
cb(chunk);
|
||||
traverse_and_process();
|
||||
|
||||
list_for_each_entry(chunk, &q->abandoned, transmitted_list)
|
||||
cb(chunk);
|
||||
traverse_and_process();
|
||||
|
||||
list_for_each_entry(chunk, &q->out_chunk_list, list)
|
||||
cb(chunk);
|
||||
traverse_and_process();
|
||||
}
|
||||
|
||||
/* Verify that this is a valid address. */
|
||||
|
@ -7878,9 +7893,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
|
|||
* paths won't try to lock it and then oldsk.
|
||||
*/
|
||||
lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
|
||||
sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
|
||||
sctp_for_each_tx_datachunk(assoc, true, sctp_clear_owner_w);
|
||||
sctp_assoc_migrate(assoc, newsk);
|
||||
sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
|
||||
sctp_for_each_tx_datachunk(assoc, false, sctp_set_owner_w);
|
||||
|
||||
/* If the association on the newsk is already closed before accept()
|
||||
* is called, set RCV_SHUTDOWN flag.
|
||||
|
|
|
@ -92,7 +92,7 @@
|
|||
#define JZ_AIC_I2S_STATUS_BUSY BIT(2)
|
||||
|
||||
#define JZ_AIC_CLK_DIV_MASK 0xf
|
||||
#define I2SDIV_DV_SHIFT 8
|
||||
#define I2SDIV_DV_SHIFT 0
|
||||
#define I2SDIV_DV_MASK (0xf << I2SDIV_DV_SHIFT)
|
||||
#define I2SDIV_IDV_SHIFT 8
|
||||
#define I2SDIV_IDV_MASK (0xf << I2SDIV_IDV_SHIFT)
|
||||
|
|
|
@ -135,7 +135,7 @@ static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
|
|||
msg.g.version = 0x1;
|
||||
na = (struct nlattr *) GENLMSG_DATA(&msg);
|
||||
na->nla_type = nla_type;
|
||||
na->nla_len = nla_len + 1 + NLA_HDRLEN;
|
||||
na->nla_len = nla_len + NLA_HDRLEN;
|
||||
memcpy(NLA_DATA(na), nla_data, nla_len);
|
||||
msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue