This is the 4.9.38 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAllp69gACgkQONu9yGCS aT6WLQ/8CVbMTorw1Xb4x9vsIP7v/ozsPYDN41EbpsH19/3xxEyhR0LsN056GxA4 HPweVL7ridF6/IMzGg+RoBJsjsXR/+bxkhgS/7dcT37BKqAlUOSNV+AdVz7z+NgM UOm+s6OjRSRz2A01jqd/k0emGWx0+3Cd2C/637RBgv+vo4D5lDTLhNTFbEM/45oL QG0fz/Ba0C9dvrPMwMvMwLzS/Bi+h9PeUazjVgAQHCXUkeSvJYJTk8TmYVUGIJIc E4CNaKl3qUEMph/7YujPpdXYaWR/SYnoBv0UYZovksxdmU0DegF4V3BVO8VKt0ma K0gf8k3YRAXiEOW9D7yfffnZxbqA0Cwe4hau1gY+IVQYxwP6YaygBMHLbssTKbY1 WJ36WzQMmqiZ7YD7Yf7cHjDG+d1bl9UYACr0BE8rlWpHa1bJme6H8BdLVqnP7OM8 KpBgmRAZbQ5TBX/fCrd+PzgfS66393qMPiXzPrpTMyo4HfBAOs8FVCeyxzX+3Qwm Qd35CH0wIcHSJ48d+3FCuxYQ654IoLIZieStnFYyXKGhFe5Cgd+6F91oJiRAvY59 BssBfl2RqnZB9l+tBMliYbCHTOzZKZv70Z9/+vw9ogNqiv7ZlTW7maobLzk7AQOf HiCC6GpnxM2FzDSjD/vfQZM9LTSUhXw5owebA+vwCc3KZgpNrn0= =zHg+ -----END PGP SIGNATURE----- Merge 4.9.38 into android-4.9 Changes in 4.9.38 mqueue: fix a use-after-free in sys_mq_notify() Add "shutdown" to "struct class". tpm: Issue a TPM2_Shutdown for TPM2 devices. tools include: Add a __fallthrough statement tools string: Use __fallthrough in perf_atoll() tools strfilter: Use __fallthrough perf top: Use __fallthrough perf thread_map: Correctly size buffer used with dirent->dt_name perf intel-pt: Use __fallthrough perf tests: Avoid possible truncation with dirent->d_name + snprintf perf bench numa: Avoid possible truncation when using snprintf() perf header: Fix handling of PERF_EVENT_UPDATE__SCALE perf scripting perl: Fix compile error with some perl5 versions perf probe: Fix to probe on gcc generated symbols for offline kernel perf probe: Add error checks to offline probe post-processing md: fix incorrect use of lexx_to_cpu in does_sb_need_changing md: fix super_offset endianness in super_1_rdev_size_change locking/rwsem-spinlock: Fix EINTR branch in __down_write_common() staging: vt6556: vnt_start Fix missing call to vnt_key_init_table. staging: comedi: fix clean-up of comedi_class in comedi_init() crypto: caam - fix gfp allocation flags (part I) crypto: rsa-pkcs1pad - use constant time memory comparison for MACs ext4: check return value of kstrtoull correctly in reserved_clusters_store x86/mm/pat: Don't report PAT on CPUs that don't support it saa7134: fix warm Medion 7134 EEPROM read Linux 4.9.38 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
3f353c3ed4
29 changed files with 206 additions and 39 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 37
|
||||
SUBLEVEL = 38
|
||||
EXTRAVERSION =
|
||||
NAME = Roaring Lionus
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
bool pat_enabled(void);
|
||||
void pat_disable(const char *reason);
|
||||
extern void pat_init(void);
|
||||
extern void init_cache_modes(void);
|
||||
|
||||
extern int reserve_memtype(u64 start, u64 end,
|
||||
enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
|
||||
|
|
|
@ -1053,6 +1053,13 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
max_possible_pfn = max_pfn;
|
||||
|
||||
/*
|
||||
* This call is required when the CPU does not support PAT. If
|
||||
* mtrr_bp_init() invoked it already via pat_init() the call has no
|
||||
* effect.
|
||||
*/
|
||||
init_cache_modes();
|
||||
|
||||
/*
|
||||
* Define random base addresses for memory sections after max_pfn is
|
||||
* defined and before each memory section base is used.
|
||||
|
|
|
@ -36,14 +36,14 @@
|
|||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "" fmt
|
||||
|
||||
static bool boot_cpu_done;
|
||||
|
||||
static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
|
||||
static void init_cache_modes(void);
|
||||
static bool __read_mostly boot_cpu_done;
|
||||
static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
|
||||
static bool __read_mostly pat_initialized;
|
||||
static bool __read_mostly init_cm_done;
|
||||
|
||||
void pat_disable(const char *reason)
|
||||
{
|
||||
if (!__pat_enabled)
|
||||
if (pat_disabled)
|
||||
return;
|
||||
|
||||
if (boot_cpu_done) {
|
||||
|
@ -51,10 +51,8 @@ void pat_disable(const char *reason)
|
|||
return;
|
||||
}
|
||||
|
||||
__pat_enabled = 0;
|
||||
pat_disabled = true;
|
||||
pr_info("x86/PAT: %s\n", reason);
|
||||
|
||||
init_cache_modes();
|
||||
}
|
||||
|
||||
static int __init nopat(char *str)
|
||||
|
@ -66,7 +64,7 @@ early_param("nopat", nopat);
|
|||
|
||||
bool pat_enabled(void)
|
||||
{
|
||||
return !!__pat_enabled;
|
||||
return pat_initialized;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pat_enabled);
|
||||
|
||||
|
@ -204,6 +202,8 @@ static void __init_cache_modes(u64 pat)
|
|||
update_cache_mode_entry(i, cache);
|
||||
}
|
||||
pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
|
||||
|
||||
init_cm_done = true;
|
||||
}
|
||||
|
||||
#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
|
||||
|
@ -224,6 +224,7 @@ static void pat_bsp_init(u64 pat)
|
|||
}
|
||||
|
||||
wrmsrl(MSR_IA32_CR_PAT, pat);
|
||||
pat_initialized = true;
|
||||
|
||||
__init_cache_modes(pat);
|
||||
}
|
||||
|
@ -241,10 +242,9 @@ static void pat_ap_init(u64 pat)
|
|||
wrmsrl(MSR_IA32_CR_PAT, pat);
|
||||
}
|
||||
|
||||
static void init_cache_modes(void)
|
||||
void init_cache_modes(void)
|
||||
{
|
||||
u64 pat = 0;
|
||||
static int init_cm_done;
|
||||
|
||||
if (init_cm_done)
|
||||
return;
|
||||
|
@ -286,8 +286,6 @@ static void init_cache_modes(void)
|
|||
}
|
||||
|
||||
__init_cache_modes(pat);
|
||||
|
||||
init_cm_done = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -305,10 +303,8 @@ void pat_init(void)
|
|||
u64 pat;
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
|
||||
if (!pat_enabled()) {
|
||||
init_cache_modes();
|
||||
if (pat_disabled)
|
||||
return;
|
||||
}
|
||||
|
||||
if ((c->x86_vendor == X86_VENDOR_INTEL) &&
|
||||
(((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
|
||||
|
|
|
@ -496,7 +496,7 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
|
|||
goto done;
|
||||
pos++;
|
||||
|
||||
if (memcmp(out_buf + pos, digest_info->data, digest_info->size))
|
||||
if (crypto_memneq(out_buf + pos, digest_info->data, digest_info->size))
|
||||
goto done;
|
||||
|
||||
pos += digest_info->size;
|
||||
|
|
|
@ -2095,7 +2095,11 @@ void device_shutdown(void)
|
|||
pm_runtime_get_noresume(dev);
|
||||
pm_runtime_barrier(dev);
|
||||
|
||||
if (dev->bus && dev->bus->shutdown) {
|
||||
if (dev->class && dev->class->shutdown) {
|
||||
if (initcall_debug)
|
||||
dev_info(dev, "shutdown\n");
|
||||
dev->class->shutdown(dev);
|
||||
} else if (dev->bus && dev->bus->shutdown) {
|
||||
if (initcall_debug)
|
||||
dev_info(dev, "shutdown\n");
|
||||
dev->bus->shutdown(dev);
|
||||
|
|
|
@ -130,6 +130,41 @@ static void tpm_dev_release(struct device *dev)
|
|||
kfree(chip);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* tpm_class_shutdown() - prepare the TPM device for loss of power.
|
||||
* @dev: device to which the chip is associated.
|
||||
*
|
||||
* Issues a TPM2_Shutdown command prior to loss of power, as required by the
|
||||
* TPM 2.0 spec.
|
||||
* Then, calls bus- and device- specific shutdown code.
|
||||
*
|
||||
* XXX: This codepath relies on the fact that sysfs is not enabled for
|
||||
* TPM2: sysfs uses an implicit lock on chip->ops, so this could race if TPM2
|
||||
* has sysfs support enabled before TPM sysfs's implicit locking is fixed.
|
||||
*/
|
||||
static int tpm_class_shutdown(struct device *dev)
|
||||
{
|
||||
struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
|
||||
|
||||
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
|
||||
down_write(&chip->ops_sem);
|
||||
tpm2_shutdown(chip, TPM2_SU_CLEAR);
|
||||
chip->ops = NULL;
|
||||
up_write(&chip->ops_sem);
|
||||
}
|
||||
/* Allow bus- and device-specific code to run. Note: since chip->ops
|
||||
* is NULL, more-specific shutdown code will not be able to issue TPM
|
||||
* commands.
|
||||
*/
|
||||
if (dev->bus && dev->bus->shutdown)
|
||||
dev->bus->shutdown(dev);
|
||||
else if (dev->driver && dev->driver->shutdown)
|
||||
dev->driver->shutdown(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* tpm_chip_alloc() - allocate a new struct tpm_chip instance
|
||||
* @pdev: device to which the chip is associated
|
||||
|
@ -168,6 +203,7 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
|
|||
device_initialize(&chip->dev);
|
||||
|
||||
chip->dev.class = tpm_class;
|
||||
chip->dev.class->shutdown = tpm_class_shutdown;
|
||||
chip->dev.release = tpm_dev_release;
|
||||
chip->dev.parent = pdev;
|
||||
chip->dev.groups = chip->groups;
|
||||
|
|
|
@ -285,6 +285,11 @@ static const struct attribute_group tpm_dev_group = {
|
|||
|
||||
void tpm_sysfs_add_device(struct tpm_chip *chip)
|
||||
{
|
||||
/* XXX: If you wish to remove this restriction, you must first update
|
||||
* tpm_sysfs to explicitly lock chip->ops.
|
||||
*/
|
||||
if (chip->flags & TPM_CHIP_FLAG_TPM2)
|
||||
return;
|
||||
/* The sysfs routines rely on an implicit tpm_try_get_ops, device_del
|
||||
* is called before ops is null'd and the sysfs core synchronizes this
|
||||
* removal so that no callbacks are running or can run again
|
||||
|
|
|
@ -2601,8 +2601,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
|
|||
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
|
||||
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
|
||||
struct device *jrdev = ctx->jrdev;
|
||||
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP)) ?
|
||||
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
|
||||
GFP_KERNEL : GFP_ATOMIC;
|
||||
int src_nents, dst_nents = 0, sec4_sg_bytes;
|
||||
struct ablkcipher_edesc *edesc;
|
||||
|
|
|
@ -1861,7 +1861,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
|
|||
}
|
||||
sb = page_address(rdev->sb_page);
|
||||
sb->data_size = cpu_to_le64(num_sectors);
|
||||
sb->super_offset = rdev->sb_start;
|
||||
sb->super_offset = cpu_to_le64(rdev->sb_start);
|
||||
sb->sb_csum = calc_sb_1_csum(sb);
|
||||
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
|
||||
rdev->sb_page);
|
||||
|
@ -2270,7 +2270,7 @@ static bool does_sb_need_changing(struct mddev *mddev)
|
|||
/* Check if any mddev parameters have changed */
|
||||
if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
|
||||
(mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
|
||||
(mddev->layout != le64_to_cpu(sb->layout)) ||
|
||||
(mddev->layout != le32_to_cpu(sb->layout)) ||
|
||||
(mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
|
||||
(mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
|
||||
return true;
|
||||
|
|
|
@ -355,12 +355,43 @@ static struct i2c_client saa7134_client_template = {
|
|||
|
||||
/* ----------------------------------------------------------- */
|
||||
|
||||
/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */
|
||||
static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
|
||||
{
|
||||
u8 subaddr = 0x7, dmdregval;
|
||||
u8 data[2];
|
||||
int ret;
|
||||
struct i2c_msg i2cgatemsg_r[] = { {.addr = 0x08, .flags = 0,
|
||||
.buf = &subaddr, .len = 1},
|
||||
{.addr = 0x08,
|
||||
.flags = I2C_M_RD,
|
||||
.buf = &dmdregval, .len = 1}
|
||||
};
|
||||
struct i2c_msg i2cgatemsg_w[] = { {.addr = 0x08, .flags = 0,
|
||||
.buf = data, .len = 2} };
|
||||
|
||||
ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2);
|
||||
if ((ret == 2) && (dmdregval & 0x2)) {
|
||||
pr_debug("%s: DVB-T demod i2c gate was left closed\n",
|
||||
dev->name);
|
||||
|
||||
data[0] = subaddr;
|
||||
data[1] = (dmdregval & ~0x2);
|
||||
if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1)
|
||||
pr_err("%s: EEPROM i2c gate open failure\n",
|
||||
dev->name);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
saa7134_i2c_eeprom(struct saa7134_dev *dev, unsigned char *eedata, int len)
|
||||
{
|
||||
unsigned char buf;
|
||||
int i,err;
|
||||
|
||||
if (dev->board == SAA7134_BOARD_MD7134)
|
||||
saa7134_i2c_eeprom_md7134_gate(dev);
|
||||
|
||||
dev->i2c_client.addr = 0xa0 >> 1;
|
||||
buf = 0;
|
||||
if (1 != (err = i2c_master_send(&dev->i2c_client,&buf,1))) {
|
||||
|
|
|
@ -2908,6 +2908,7 @@ static int __init comedi_init(void)
|
|||
dev = comedi_alloc_board_minor(NULL);
|
||||
if (IS_ERR(dev)) {
|
||||
comedi_cleanup_board_minors();
|
||||
class_destroy(comedi_class);
|
||||
cdev_del(&comedi_cdev);
|
||||
unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
|
||||
COMEDI_NUM_MINORS);
|
||||
|
|
|
@ -522,6 +522,9 @@ static int vnt_start(struct ieee80211_hw *hw)
|
|||
goto free_all;
|
||||
}
|
||||
|
||||
if (vnt_key_init_table(priv))
|
||||
goto free_all;
|
||||
|
||||
priv->int_interval = 1; /* bInterval is set to 1 */
|
||||
|
||||
vnt_int_start_interrupt(priv);
|
||||
|
|
|
@ -100,7 +100,7 @@ static ssize_t reserved_clusters_store(struct ext4_attr *a,
|
|||
int ret;
|
||||
|
||||
ret = kstrtoull(skip_spaces(buf), 0, &val);
|
||||
if (!ret || val >= clusters)
|
||||
if (ret || val >= clusters)
|
||||
return -EINVAL;
|
||||
|
||||
atomic64_set(&sbi->s_resv_clusters, val);
|
||||
|
|
|
@ -373,6 +373,7 @@ int subsys_virtual_register(struct bus_type *subsys,
|
|||
* @suspend: Used to put the device to sleep mode, usually to a low power
|
||||
* state.
|
||||
* @resume: Used to bring the device from the sleep mode.
|
||||
* @shutdown: Called at shut-down time to quiesce the device.
|
||||
* @ns_type: Callbacks so sysfs can detemine namespaces.
|
||||
* @namespace: Namespace of the device belongs to this class.
|
||||
* @pm: The default device power management operations of this class.
|
||||
|
@ -401,6 +402,7 @@ struct class {
|
|||
|
||||
int (*suspend)(struct device *dev, pm_message_t state);
|
||||
int (*resume)(struct device *dev);
|
||||
int (*shutdown)(struct device *dev);
|
||||
|
||||
const struct kobj_ns_type_operations *ns_type;
|
||||
const void *(*namespace)(struct device *dev);
|
||||
|
|
|
@ -1249,8 +1249,10 @@ retry:
|
|||
|
||||
timeo = MAX_SCHEDULE_TIMEOUT;
|
||||
ret = netlink_attachskb(sock, nc, &timeo, NULL);
|
||||
if (ret == 1)
|
||||
if (ret == 1) {
|
||||
sock = NULL;
|
||||
goto retry;
|
||||
}
|
||||
if (ret) {
|
||||
sock = NULL;
|
||||
nc = NULL;
|
||||
|
|
|
@ -233,8 +233,8 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
|
|||
|
||||
out_nolock:
|
||||
list_del(&waiter.list);
|
||||
if (!list_empty(&sem->wait_list))
|
||||
__rwsem_do_wake(sem, 1);
|
||||
if (!list_empty(&sem->wait_list) && sem->count >= 0)
|
||||
__rwsem_do_wake(sem, 0);
|
||||
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
|
||||
|
||||
return -EINTR;
|
||||
|
|
|
@ -126,4 +126,13 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
|
|||
#define WRITE_ONCE(x, val) \
|
||||
({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
|
||||
|
||||
|
||||
#ifndef __fallthrough
|
||||
# if defined(__GNUC__) && __GNUC__ >= 7
|
||||
# define __fallthrough __attribute__ ((fallthrough))
|
||||
# else
|
||||
# define __fallthrough
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#endif /* _TOOLS_LINUX_COMPILER_H */
|
||||
|
|
|
@ -1573,13 +1573,13 @@ static int __bench_numa(const char *name)
|
|||
"GB/sec,", "total-speed", "GB/sec total speed");
|
||||
|
||||
if (g->p.show_details >= 2) {
|
||||
char tname[32];
|
||||
char tname[14 + 2 * 10 + 1];
|
||||
struct thread_data *td;
|
||||
for (p = 0; p < g->p.nr_proc; p++) {
|
||||
for (t = 0; t < g->p.nr_threads; t++) {
|
||||
memset(tname, 0, 32);
|
||||
memset(tname, 0, sizeof(tname));
|
||||
td = g->threads + p*g->p.nr_threads + t;
|
||||
snprintf(tname, 32, "process%d:thread%d", p, t);
|
||||
snprintf(tname, sizeof(tname), "process%d:thread%d", p, t);
|
||||
print_res(tname, td->speed_gbs,
|
||||
"GB/sec", "thread-speed", "GB/sec/thread speed");
|
||||
print_res(tname, td->system_time_ns / NSEC_PER_SEC,
|
||||
|
|
|
@ -643,7 +643,7 @@ repeat:
|
|||
case -1:
|
||||
if (errno == EINTR)
|
||||
continue;
|
||||
/* Fall trhu */
|
||||
__fallthrough;
|
||||
default:
|
||||
c = getc(stdin);
|
||||
tcsetattr(0, TCSAFLUSH, &save);
|
||||
|
|
|
@ -1779,15 +1779,14 @@ static int test_pmu_events(void)
|
|||
}
|
||||
|
||||
while (!ret && (ent = readdir(dir))) {
|
||||
#define MAX_NAME 100
|
||||
struct evlist_test e;
|
||||
char name[MAX_NAME];
|
||||
char name[2 * NAME_MAX + 1 + 12 + 3];
|
||||
|
||||
/* Names containing . are special and cannot be used directly */
|
||||
if (strchr(ent->d_name, '.'))
|
||||
continue;
|
||||
|
||||
snprintf(name, MAX_NAME, "cpu/event=%s/u", ent->d_name);
|
||||
snprintf(name, sizeof(name), "cpu/event=%s/u", ent->d_name);
|
||||
|
||||
e.name = name;
|
||||
e.check = test__checkevent_pmu_events;
|
||||
|
@ -1795,11 +1794,10 @@ static int test_pmu_events(void)
|
|||
ret = test_event(&e);
|
||||
if (ret)
|
||||
break;
|
||||
snprintf(name, MAX_NAME, "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name);
|
||||
snprintf(name, sizeof(name), "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name);
|
||||
e.name = name;
|
||||
e.check = test__checkevent_pmu_events_mix;
|
||||
ret = test_event(&e);
|
||||
#undef MAX_NAME
|
||||
}
|
||||
|
||||
closedir(dir);
|
||||
|
|
|
@ -3184,6 +3184,7 @@ int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
|
|||
case PERF_EVENT_UPDATE__SCALE:
|
||||
ev_scale = (struct event_update_event_scale *) ev->data;
|
||||
evsel->scale = ev_scale->scale;
|
||||
break;
|
||||
case PERF_EVENT_UPDATE__CPUS:
|
||||
ev_cpus = (struct event_update_event_cpus *) ev->data;
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <errno.h>
|
||||
#include <stdint.h>
|
||||
#include <inttypes.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#include "../cache.h"
|
||||
#include "../util.h"
|
||||
|
@ -1744,6 +1745,7 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
|
|||
switch (decoder->packet.type) {
|
||||
case INTEL_PT_TIP_PGD:
|
||||
decoder->continuous_period = false;
|
||||
__fallthrough;
|
||||
case INTEL_PT_TIP_PGE:
|
||||
case INTEL_PT_TIP:
|
||||
intel_pt_log("ERROR: Unexpected packet\n");
|
||||
|
@ -1797,6 +1799,8 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
|
|||
decoder->pge = false;
|
||||
decoder->continuous_period = false;
|
||||
intel_pt_clear_tx_flags(decoder);
|
||||
__fallthrough;
|
||||
|
||||
case INTEL_PT_TNT:
|
||||
decoder->have_tma = false;
|
||||
intel_pt_log("ERROR: Unexpected packet\n");
|
||||
|
@ -1837,6 +1841,7 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
|
|||
switch (decoder->packet.type) {
|
||||
case INTEL_PT_TIP_PGD:
|
||||
decoder->continuous_period = false;
|
||||
__fallthrough;
|
||||
case INTEL_PT_TIP_PGE:
|
||||
case INTEL_PT_TIP:
|
||||
decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD;
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <string.h>
|
||||
#include <endian.h>
|
||||
#include <byteswap.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#include "intel-pt-pkt-decoder.h"
|
||||
|
||||
|
@ -498,6 +499,7 @@ int intel_pt_pkt_desc(const struct intel_pt_pkt *packet, char *buf,
|
|||
case INTEL_PT_FUP:
|
||||
if (!(packet->count))
|
||||
return snprintf(buf, buf_len, "%s no ip", name);
|
||||
__fallthrough;
|
||||
case INTEL_PT_CYC:
|
||||
case INTEL_PT_VMCS:
|
||||
case INTEL_PT_MTC:
|
||||
|
|
|
@ -618,6 +618,67 @@ error:
|
|||
return ret ? : -ENOENT;
|
||||
}
|
||||
|
||||
/* Adjust symbol name and address */
|
||||
static int post_process_probe_trace_point(struct probe_trace_point *tp,
|
||||
struct map *map, unsigned long offs)
|
||||
{
|
||||
struct symbol *sym;
|
||||
u64 addr = tp->address + tp->offset - offs;
|
||||
|
||||
sym = map__find_symbol(map, addr);
|
||||
if (!sym)
|
||||
return -ENOENT;
|
||||
|
||||
if (strcmp(sym->name, tp->symbol)) {
|
||||
/* If we have no realname, use symbol for it */
|
||||
if (!tp->realname)
|
||||
tp->realname = tp->symbol;
|
||||
else
|
||||
free(tp->symbol);
|
||||
tp->symbol = strdup(sym->name);
|
||||
if (!tp->symbol)
|
||||
return -ENOMEM;
|
||||
}
|
||||
tp->offset = addr - sym->start;
|
||||
tp->address -= offs;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Rename DWARF symbols to ELF symbols -- gcc sometimes optimizes functions
|
||||
* and generate new symbols with suffixes such as .constprop.N or .isra.N
|
||||
* etc. Since those symbols are not recorded in DWARF, we have to find
|
||||
* correct generated symbols from offline ELF binary.
|
||||
* For online kernel or uprobes we don't need this because those are
|
||||
* rebased on _text, or already a section relative address.
|
||||
*/
|
||||
static int
|
||||
post_process_offline_probe_trace_events(struct probe_trace_event *tevs,
|
||||
int ntevs, const char *pathname)
|
||||
{
|
||||
struct map *map;
|
||||
unsigned long stext = 0;
|
||||
int i, ret = 0;
|
||||
|
||||
/* Prepare a map for offline binary */
|
||||
map = dso__new_map(pathname);
|
||||
if (!map || get_text_start_address(pathname, &stext) < 0) {
|
||||
pr_warning("Failed to get ELF symbols for %s\n", pathname);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < ntevs; i++) {
|
||||
ret = post_process_probe_trace_point(&tevs[i].point,
|
||||
map, stext);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
map__put(map);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
|
||||
int ntevs, const char *exec)
|
||||
{
|
||||
|
@ -694,7 +755,8 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
|
|||
|
||||
/* Skip post process if the target is an offline kernel */
|
||||
if (symbol_conf.ignore_vmlinux_buildid)
|
||||
return 0;
|
||||
return post_process_offline_probe_trace_events(tevs, ntevs,
|
||||
symbol_conf.vmlinux_name);
|
||||
|
||||
reloc_sym = kernel_get_ref_reloc_sym();
|
||||
if (!reloc_sym) {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
libperf-$(CONFIG_LIBPERL) += trace-event-perl.o
|
||||
libperf-$(CONFIG_LIBPYTHON) += trace-event-python.o
|
||||
|
||||
CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default
|
||||
CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default
|
||||
|
||||
CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow
|
||||
|
|
|
@ -269,6 +269,7 @@ static int strfilter_node__sprint(struct strfilter_node *node, char *buf)
|
|||
len = strfilter_node__sprint_pt(node->l, buf);
|
||||
if (len < 0)
|
||||
return len;
|
||||
__fallthrough;
|
||||
case '!':
|
||||
if (buf) {
|
||||
*(buf + len++) = *node->p;
|
||||
|
|
|
@ -21,6 +21,8 @@ s64 perf_atoll(const char *str)
|
|||
case 'b': case 'B':
|
||||
if (*p)
|
||||
goto out_err;
|
||||
|
||||
__fallthrough;
|
||||
case '\0':
|
||||
return length;
|
||||
default:
|
||||
|
|
|
@ -93,7 +93,7 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
|
|||
{
|
||||
DIR *proc;
|
||||
int max_threads = 32, items, i;
|
||||
char path[256];
|
||||
char path[NAME_MAX + 1 + 6];
|
||||
struct dirent *dirent, **namelist = NULL;
|
||||
struct thread_map *threads = thread_map__alloc(max_threads);
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue