import G96XFXXUGFUG4 OSRC
This commit is contained in:
parent
4f1cca8e24
commit
baaf28bf91
49 changed files with 2166 additions and 485 deletions
0
README.md
Normal file → Executable file
0
README.md
Normal file → Executable file
|
@ -171,6 +171,11 @@
|
|||
#define KBASE_TRACE_SIZE (1 << KBASE_TRACE_SIZE_LOG2)
|
||||
#define KBASE_TRACE_MASK ((1 << KBASE_TRACE_SIZE_LOG2)-1)
|
||||
|
||||
/**
|
||||
* Maximum number of GPU memory region zones
|
||||
*/
|
||||
#define KBASE_REG_ZONE_MAX 4ul
|
||||
|
||||
#include "mali_kbase_js_defs.h"
|
||||
#include "mali_kbase_hwaccess_defs.h"
|
||||
|
||||
|
@ -1931,6 +1936,21 @@ struct kbase_sub_alloc {
|
|||
DECLARE_BITMAP(sub_pages, SZ_2M / SZ_4K);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct kbase_reg_zone - Information about GPU memory region zones
|
||||
* @base_pfn: Page Frame Number in GPU virtual address space for the start of
|
||||
* the Zone
|
||||
* @va_size_pages: Size of the Zone in pages
|
||||
*
|
||||
* Track information about a zone KBASE_REG_ZONE() and related macros.
|
||||
* In future, this could also store the &rb_root that are currently in
|
||||
* &kbase_context
|
||||
*/
|
||||
struct kbase_reg_zone {
|
||||
u64 base_pfn;
|
||||
u64 va_size_pages;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct kbase_context - Kernel base context
|
||||
*
|
||||
|
@ -1981,6 +2001,7 @@ struct kbase_sub_alloc {
|
|||
* @reg_rbtree_exec: RB tree of the memory regions allocated from the EXEC_VA
|
||||
* zone of the GPU virtual address space. Used for GPU-executable
|
||||
* allocations which don't need the SAME_VA property.
|
||||
* @reg_zone: Zone information for the reg_rbtree_<...> members.
|
||||
* @cookies: Bitmask containing of BITS_PER_LONG bits, used mainly for
|
||||
* SAME_VA allocations to defer the reservation of memory region
|
||||
* (from the GPU virtual address space) from base_mem_alloc
|
||||
|
@ -2055,9 +2076,6 @@ struct kbase_sub_alloc {
|
|||
* created the context. Used for accounting the physical
|
||||
* pages used for GPU allocations, done for the context,
|
||||
* to the memory consumed by the process.
|
||||
* @same_va_end: End address of the SAME_VA zone (in 4KB page units)
|
||||
* @exec_va_start: Start address of the EXEC_VA zone (in 4KB page units)
|
||||
* or U64_MAX if the EXEC_VA zone is uninitialized.
|
||||
* @gpu_va_end: End address of the GPU va space (in 4KB page units)
|
||||
* @jit_va: Indicates if a JIT_VA zone has been created.
|
||||
* @mem_profile_data: Buffer containing the profiling information provided by
|
||||
|
@ -2189,6 +2207,7 @@ struct kbase_context {
|
|||
struct rb_root reg_rbtree_same;
|
||||
struct rb_root reg_rbtree_custom;
|
||||
struct rb_root reg_rbtree_exec;
|
||||
struct kbase_reg_zone reg_zone[KBASE_REG_ZONE_MAX];
|
||||
|
||||
|
||||
unsigned long cookies;
|
||||
|
@ -2223,8 +2242,6 @@ struct kbase_context {
|
|||
|
||||
spinlock_t mm_update_lock;
|
||||
struct mm_struct __rcu *process_mm;
|
||||
u64 same_va_end;
|
||||
u64 exec_va_start;
|
||||
u64 gpu_va_end;
|
||||
bool jit_va;
|
||||
|
||||
|
|
|
@ -84,22 +84,28 @@ static struct rb_root *kbase_gpu_va_to_rbtree(struct kbase_context *kctx,
|
|||
u64 gpu_pfn)
|
||||
{
|
||||
struct rb_root *rbtree = NULL;
|
||||
struct kbase_reg_zone *exec_va_zone =
|
||||
kbase_ctx_reg_zone_get(kctx, KBASE_REG_ZONE_EXEC_VA);
|
||||
|
||||
/* The gpu_pfn can only be greater than the starting pfn of the EXEC_VA
|
||||
* zone if this has been initialized.
|
||||
*/
|
||||
if (gpu_pfn >= kctx->exec_va_start)
|
||||
if (gpu_pfn >= exec_va_zone->base_pfn)
|
||||
rbtree = &kctx->reg_rbtree_exec;
|
||||
else {
|
||||
u64 same_va_end;
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
if (kbase_ctx_flag(kctx, KCTX_COMPAT))
|
||||
if (kbase_ctx_flag(kctx, KCTX_COMPAT)) {
|
||||
#endif /* CONFIG_64BIT */
|
||||
same_va_end = KBASE_REG_ZONE_CUSTOM_VA_BASE;
|
||||
#ifdef CONFIG_64BIT
|
||||
else
|
||||
same_va_end = kctx->same_va_end;
|
||||
} else {
|
||||
struct kbase_reg_zone *same_va_zone =
|
||||
kbase_ctx_reg_zone_get(kctx,
|
||||
KBASE_REG_ZONE_SAME_VA);
|
||||
same_va_end = kbase_reg_zone_end_pfn(same_va_zone);
|
||||
}
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
if (gpu_pfn >= same_va_end)
|
||||
|
@ -699,21 +705,24 @@ int kbase_region_tracker_init(struct kbase_context *kctx)
|
|||
u64 custom_va_size = KBASE_REG_ZONE_CUSTOM_VA_SIZE;
|
||||
u64 gpu_va_limit = (1ULL << kctx->kbdev->gpu_props.mmu.va_bits) >> PAGE_SHIFT;
|
||||
u64 same_va_pages;
|
||||
u64 same_va_base = 1u;
|
||||
int err;
|
||||
|
||||
/* Take the lock as kbase_free_alloced_region requires it */
|
||||
kbase_gpu_vm_lock(kctx);
|
||||
|
||||
same_va_pages = (1ULL << (same_va_bits - PAGE_SHIFT)) - 1;
|
||||
same_va_pages = (1ULL << (same_va_bits - PAGE_SHIFT)) - same_va_base;
|
||||
/* all have SAME_VA */
|
||||
same_va_reg = kbase_alloc_free_region(&kctx->reg_rbtree_same, 1,
|
||||
same_va_pages,
|
||||
KBASE_REG_ZONE_SAME_VA);
|
||||
same_va_reg =
|
||||
kbase_alloc_free_region(&kctx->reg_rbtree_same, same_va_base,
|
||||
same_va_pages, KBASE_REG_ZONE_SAME_VA);
|
||||
|
||||
if (!same_va_reg) {
|
||||
err = -ENOMEM;
|
||||
goto fail_unlock;
|
||||
}
|
||||
kbase_ctx_reg_zone_init(kctx, KBASE_REG_ZONE_SAME_VA, same_va_base,
|
||||
same_va_pages);
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
/* 32-bit clients have custom VA zones */
|
||||
|
@ -739,17 +748,23 @@ int kbase_region_tracker_init(struct kbase_context *kctx)
|
|||
err = -ENOMEM;
|
||||
goto fail_free_same_va;
|
||||
}
|
||||
kbase_ctx_reg_zone_init(kctx, KBASE_REG_ZONE_CUSTOM_VA,
|
||||
KBASE_REG_ZONE_CUSTOM_VA_BASE,
|
||||
custom_va_size);
|
||||
#ifdef CONFIG_64BIT
|
||||
} else {
|
||||
custom_va_size = 0;
|
||||
}
|
||||
#endif
|
||||
/* EXEC_VA zone's codepaths are slightly easier when its base_pfn is
|
||||
* initially U64_MAX
|
||||
*/
|
||||
kbase_ctx_reg_zone_init(kctx, KBASE_REG_ZONE_EXEC_VA, U64_MAX, 0u);
|
||||
/* Other zones are 0: kbase_create_context() uses vzalloc */
|
||||
|
||||
kbase_region_tracker_ds_init(kctx, same_va_reg, custom_va_reg);
|
||||
|
||||
kctx->same_va_end = same_va_pages + 1;
|
||||
kctx->gpu_va_end = kctx->same_va_end + custom_va_size;
|
||||
kctx->exec_va_start = U64_MAX;
|
||||
kctx->gpu_va_end = same_va_base + same_va_pages + custom_va_size;
|
||||
kctx->jit_va = false;
|
||||
|
||||
|
||||
|
@ -763,44 +778,147 @@ fail_unlock:
|
|||
return err;
|
||||
}
|
||||
|
||||
static bool kbase_has_exec_va_zone_locked(struct kbase_context *kctx)
|
||||
{
|
||||
struct kbase_reg_zone *exec_va_zone;
|
||||
|
||||
lockdep_assert_held(&kctx->reg_lock);
|
||||
exec_va_zone = kbase_ctx_reg_zone_get(kctx, KBASE_REG_ZONE_EXEC_VA);
|
||||
|
||||
return (exec_va_zone->base_pfn != U64_MAX);
|
||||
}
|
||||
|
||||
bool kbase_has_exec_va_zone(struct kbase_context *kctx)
|
||||
{
|
||||
bool has_exec_va_zone;
|
||||
|
||||
kbase_gpu_vm_lock(kctx);
|
||||
has_exec_va_zone = kbase_has_exec_va_zone_locked(kctx);
|
||||
kbase_gpu_vm_unlock(kctx);
|
||||
|
||||
return has_exec_va_zone;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if any allocations have been made on a context's region tracker
|
||||
* @kctx: KBase context
|
||||
*
|
||||
* Check the context to determine if any allocations have been made yet from
|
||||
* any of its zones. This check should be done before resizing a zone, e.g. to
|
||||
* make space to add a second zone.
|
||||
*
|
||||
* Whilst a zone without allocations can be resized whilst other zones have
|
||||
* allocations, we still check all of @kctx 's zones anyway: this is a stronger
|
||||
* guarantee and should be adhered to when creating new zones anyway.
|
||||
*
|
||||
* Allocations from kbdev zones are not counted.
|
||||
*
|
||||
* Return: true if any allocs exist on any zone, false otherwise
|
||||
*/
|
||||
bool kbase_region_tracker_has_allocs(struct kbase_context *kctx)
|
||||
{
|
||||
unsigned int zone_idx;
|
||||
|
||||
lockdep_assert_held(&kctx->reg_lock);
|
||||
|
||||
for (zone_idx = 0; zone_idx < KBASE_REG_ZONE_MAX; ++zone_idx) {
|
||||
struct kbase_reg_zone *zone;
|
||||
struct kbase_va_region *reg;
|
||||
u64 zone_base_addr;
|
||||
unsigned long zone_bits = KBASE_REG_ZONE(zone_idx);
|
||||
unsigned long reg_zone;
|
||||
|
||||
zone = kbase_ctx_reg_zone_get(kctx, zone_bits);
|
||||
zone_base_addr = zone->base_pfn << PAGE_SHIFT;
|
||||
|
||||
reg = kbase_region_tracker_find_region_base_address(
|
||||
kctx, zone_base_addr);
|
||||
|
||||
if (!zone->va_size_pages) {
|
||||
WARN(reg,
|
||||
"Should not have found a region that starts at 0x%.16llx for zone 0x%lx",
|
||||
(unsigned long long)zone_base_addr, zone_bits);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (WARN(!reg,
|
||||
"There should always be a region that starts at 0x%.16llx for zone 0x%lx, couldn't find it",
|
||||
(unsigned long long)zone_base_addr, zone_bits))
|
||||
return true; /* Safest return value */
|
||||
|
||||
reg_zone = reg->flags & KBASE_REG_ZONE_MASK;
|
||||
if (WARN(reg_zone != zone_bits,
|
||||
"The region that starts at 0x%.16llx should be in zone 0x%lx but was found in the wrong zone 0x%lx",
|
||||
(unsigned long long)zone_base_addr, zone_bits,
|
||||
reg_zone))
|
||||
return true; /* Safest return value */
|
||||
|
||||
/* Unless the region is completely free, of the same size as
|
||||
* the original zone, then it has allocs
|
||||
*/
|
||||
if ((!(reg->flags & KBASE_REG_FREE)) ||
|
||||
(reg->nr_pages != zone->va_size_pages))
|
||||
return true;
|
||||
}
|
||||
|
||||
/* All zones are the same size as originally made, so there are no
|
||||
* allocs
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
static int kbase_region_tracker_init_jit_64(struct kbase_context *kctx,
|
||||
u64 jit_va_pages)
|
||||
{
|
||||
struct kbase_va_region *same_va;
|
||||
struct kbase_va_region *same_va_reg;
|
||||
struct kbase_reg_zone *same_va_zone;
|
||||
u64 same_va_zone_base_addr;
|
||||
const unsigned long same_va_zone_bits = KBASE_REG_ZONE_SAME_VA;
|
||||
struct kbase_va_region *custom_va_reg;
|
||||
u64 jit_va_start;
|
||||
|
||||
lockdep_assert_held(&kctx->reg_lock);
|
||||
|
||||
/* First verify that a JIT_VA zone has not been created already. */
|
||||
if (kctx->jit_va)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Modify the same VA free region after creation. Be careful to ensure
|
||||
* that allocations haven't been made as they could cause an overlap
|
||||
* to happen with existing same VA allocations and the custom VA zone.
|
||||
* Modify the same VA free region after creation. The caller has
|
||||
* ensured that allocations haven't been made, as any allocations could
|
||||
* cause an overlap to happen with existing same VA allocations and the
|
||||
* custom VA zone.
|
||||
*/
|
||||
same_va = kbase_region_tracker_find_region_base_address(kctx,
|
||||
PAGE_SIZE);
|
||||
if (!same_va)
|
||||
same_va_zone = kbase_ctx_reg_zone_get(kctx, same_va_zone_bits);
|
||||
same_va_zone_base_addr = same_va_zone->base_pfn << PAGE_SHIFT;
|
||||
|
||||
same_va_reg = kbase_region_tracker_find_region_base_address(
|
||||
kctx, same_va_zone_base_addr);
|
||||
if (WARN(!same_va_reg,
|
||||
"Already found a free region at the start of every zone, but now cannot find any region for zone base 0x%.16llx zone 0x%lx",
|
||||
(unsigned long long)same_va_zone_base_addr, same_va_zone_bits))
|
||||
return -ENOMEM;
|
||||
|
||||
if (same_va->nr_pages < jit_va_pages || kctx->same_va_end < jit_va_pages)
|
||||
/* kbase_region_tracker_has_allocs() in the caller has already ensured
|
||||
* that all of the zones have no allocs, so no need to check that again
|
||||
* on same_va_reg
|
||||
*/
|
||||
WARN_ON((!(same_va_reg->flags & KBASE_REG_FREE)) ||
|
||||
same_va_reg->nr_pages != same_va_zone->va_size_pages);
|
||||
|
||||
if (same_va_reg->nr_pages < jit_va_pages ||
|
||||
same_va_zone->va_size_pages < jit_va_pages)
|
||||
return -ENOMEM;
|
||||
|
||||
/* It's safe to adjust the same VA zone now */
|
||||
same_va->nr_pages -= jit_va_pages;
|
||||
kctx->same_va_end -= jit_va_pages;
|
||||
same_va_reg->nr_pages -= jit_va_pages;
|
||||
same_va_zone->va_size_pages -= jit_va_pages;
|
||||
jit_va_start = kbase_reg_zone_end_pfn(same_va_zone);
|
||||
|
||||
/*
|
||||
* Create a custom VA zone at the end of the VA for allocations which
|
||||
* JIT can use so it doesn't have to allocate VA from the kernel.
|
||||
*/
|
||||
custom_va_reg = kbase_alloc_free_region(&kctx->reg_rbtree_custom,
|
||||
kctx->same_va_end,
|
||||
jit_va_pages,
|
||||
KBASE_REG_ZONE_CUSTOM_VA);
|
||||
custom_va_reg =
|
||||
kbase_alloc_free_region(&kctx->reg_rbtree_custom, jit_va_start,
|
||||
jit_va_pages, KBASE_REG_ZONE_CUSTOM_VA);
|
||||
|
||||
/*
|
||||
* The context will be destroyed if we fail here so no point
|
||||
|
@ -808,6 +926,11 @@ static int kbase_region_tracker_init_jit_64(struct kbase_context *kctx,
|
|||
*/
|
||||
if (!custom_va_reg)
|
||||
return -ENOMEM;
|
||||
/* Since this is 64-bit, the custom zone will not have been
|
||||
* initialized, so initialize it now
|
||||
*/
|
||||
kbase_ctx_reg_zone_init(kctx, KBASE_REG_ZONE_CUSTOM_VA, jit_va_start,
|
||||
jit_va_pages);
|
||||
|
||||
kbase_region_tracker_insert(custom_va_reg);
|
||||
return 0;
|
||||
|
@ -828,6 +951,23 @@ int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages,
|
|||
|
||||
kbase_gpu_vm_lock(kctx);
|
||||
|
||||
/* Verify that a JIT_VA zone has not been created already. */
|
||||
if (kctx->jit_va) {
|
||||
err = -EINVAL;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
/* If in 64-bit, we always lookup the SAME_VA zone. To ensure it has no
|
||||
* allocs, we can ensure there are no allocs anywhere.
|
||||
*
|
||||
* This check is also useful in 32-bit, just to make sure init of the
|
||||
* zone is always done before any allocs.
|
||||
*/
|
||||
if (kbase_region_tracker_has_allocs(kctx)) {
|
||||
err = -ENOMEM;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
if (!kbase_ctx_flag(kctx, KCTX_COMPAT))
|
||||
err = kbase_region_tracker_init_jit_64(kctx, jit_va_pages);
|
||||
|
@ -844,6 +984,7 @@ int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages,
|
|||
kctx->jit_group_id = group_id;
|
||||
}
|
||||
|
||||
exit_unlock:
|
||||
kbase_gpu_vm_unlock(kctx);
|
||||
|
||||
return err;
|
||||
|
@ -851,24 +992,33 @@ int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages,
|
|||
|
||||
int kbase_region_tracker_init_exec(struct kbase_context *kctx, u64 exec_va_pages)
|
||||
{
|
||||
struct kbase_va_region *shrinking_va_reg;
|
||||
struct kbase_va_region *exec_va_reg;
|
||||
u64 exec_va_start, exec_va_base_addr;
|
||||
struct kbase_reg_zone *exec_va_zone;
|
||||
struct kbase_reg_zone *target_zone;
|
||||
struct kbase_va_region *target_reg;
|
||||
u64 target_zone_base_addr;
|
||||
unsigned long target_zone_bits;
|
||||
u64 exec_va_start;
|
||||
int err;
|
||||
|
||||
/* The EXEC_VA zone shall be created by making space at the end of the
|
||||
* address space. Firstly, verify that the number of EXEC_VA pages
|
||||
* requested by the client is reasonable and then make sure that it is
|
||||
* not greater than the address space itself before calculating the base
|
||||
* address of the new zone.
|
||||
/* The EXEC_VA zone shall be created by making space either:
|
||||
* - for 64-bit clients, at the end of the process's address space
|
||||
* - for 32-bit clients, in the CUSTOM zone
|
||||
*
|
||||
* Firstly, verify that the number of EXEC_VA pages requested by the
|
||||
* client is reasonable and then make sure that it is not greater than
|
||||
* the address space itself before calculating the base address of the
|
||||
* new zone.
|
||||
*/
|
||||
if (exec_va_pages == 0 || exec_va_pages > KBASE_REG_ZONE_EXEC_VA_MAX_PAGES)
|
||||
return -EINVAL;
|
||||
|
||||
kbase_gpu_vm_lock(kctx);
|
||||
|
||||
/* First verify that a JIT_VA zone has not been created already. */
|
||||
if (kctx->jit_va) {
|
||||
/* Verify that we've not already created a EXEC_VA zone, and that the
|
||||
* EXEC_VA zone must come before JIT's CUSTOM_VA.
|
||||
*/
|
||||
if (kbase_has_exec_va_zone_locked(kctx) || kctx->jit_va) {
|
||||
err = -EPERM;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
@ -878,27 +1028,49 @@ int kbase_region_tracker_init_exec(struct kbase_context *kctx, u64 exec_va_pages
|
|||
goto exit_unlock;
|
||||
}
|
||||
|
||||
exec_va_start = kctx->gpu_va_end - exec_va_pages;
|
||||
exec_va_base_addr = exec_va_start << PAGE_SHIFT;
|
||||
|
||||
shrinking_va_reg = kbase_region_tracker_find_region_enclosing_address(kctx,
|
||||
exec_va_base_addr);
|
||||
if (!shrinking_va_reg) {
|
||||
/* Verify no allocations have already been made */
|
||||
if (kbase_region_tracker_has_allocs(kctx)) {
|
||||
err = -ENOMEM;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
/* Make sure that the EXEC_VA region is still uninitialized */
|
||||
if ((shrinking_va_reg->flags & KBASE_REG_ZONE_MASK) ==
|
||||
KBASE_REG_ZONE_EXEC_VA) {
|
||||
err = -EPERM;
|
||||
goto exit_unlock;
|
||||
#ifdef CONFIG_64BIT
|
||||
if (kbase_ctx_flag(kctx, KCTX_COMPAT)) {
|
||||
#endif
|
||||
/* 32-bit client: take from CUSTOM_VA zone */
|
||||
target_zone_bits = KBASE_REG_ZONE_CUSTOM_VA;
|
||||
#ifdef CONFIG_64BIT
|
||||
} else {
|
||||
/* 64-bit client: take from SAME_VA zone */
|
||||
target_zone_bits = KBASE_REG_ZONE_SAME_VA;
|
||||
}
|
||||
#endif
|
||||
target_zone = kbase_ctx_reg_zone_get(kctx, target_zone_bits);
|
||||
target_zone_base_addr = target_zone->base_pfn << PAGE_SHIFT;
|
||||
|
||||
if (shrinking_va_reg->nr_pages <= exec_va_pages) {
|
||||
target_reg = kbase_region_tracker_find_region_base_address(
|
||||
kctx, target_zone_base_addr);
|
||||
if (WARN(!target_reg,
|
||||
"Already found a free region at the start of every zone, but now cannot find any region for zone base 0x%.16llx zone 0x%lx",
|
||||
(unsigned long long)target_zone_base_addr, target_zone_bits)) {
|
||||
err = -ENOMEM;
|
||||
goto exit_unlock;
|
||||
}
|
||||
/* kbase_region_tracker_has_allocs() above has already ensured that all
|
||||
* of the zones have no allocs, so no need to check that again on
|
||||
* target_reg
|
||||
*/
|
||||
WARN_ON((!(target_reg->flags & KBASE_REG_FREE)) ||
|
||||
target_reg->nr_pages != target_zone->va_size_pages);
|
||||
|
||||
if (target_reg->nr_pages <= exec_va_pages ||
|
||||
target_zone->va_size_pages <= exec_va_pages) {
|
||||
err = -ENOMEM;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
/* Taken from the end of the target zone */
|
||||
exec_va_start = kbase_reg_zone_end_pfn(target_zone) - exec_va_pages;
|
||||
|
||||
exec_va_reg = kbase_alloc_free_region(&kctx->reg_rbtree_exec,
|
||||
exec_va_start,
|
||||
|
@ -908,13 +1080,17 @@ int kbase_region_tracker_init_exec(struct kbase_context *kctx, u64 exec_va_pages
|
|||
err = -ENOMEM;
|
||||
goto exit_unlock;
|
||||
}
|
||||
/* Update EXEC_VA zone
|
||||
*
|
||||
* not using kbase_ctx_reg_zone_init() - it was already initialized
|
||||
*/
|
||||
exec_va_zone = kbase_ctx_reg_zone_get(kctx, KBASE_REG_ZONE_EXEC_VA);
|
||||
exec_va_zone->base_pfn = exec_va_start;
|
||||
exec_va_zone->va_size_pages = exec_va_pages;
|
||||
|
||||
shrinking_va_reg->nr_pages -= exec_va_pages;
|
||||
#ifdef CONFIG_64BIT
|
||||
if (!kbase_ctx_flag(kctx, KCTX_COMPAT))
|
||||
kctx->same_va_end -= exec_va_pages;
|
||||
#endif
|
||||
kctx->exec_va_start = exec_va_start;
|
||||
/* Update target zone and corresponding region */
|
||||
target_reg->nr_pages -= exec_va_pages;
|
||||
target_zone->va_size_pages -= exec_va_pages;
|
||||
|
||||
kbase_region_tracker_insert(exec_va_reg);
|
||||
err = 0;
|
||||
|
@ -1219,7 +1395,9 @@ int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64
|
|||
if (err)
|
||||
goto bad_insert;
|
||||
|
||||
kbase_mem_phy_alloc_gpu_mapped(alloc->imported.alias.aliased[i].alloc);
|
||||
/* Note: mapping count is tracked at alias
|
||||
* creation time
|
||||
*/
|
||||
} else {
|
||||
err = kbase_mmu_insert_single_page(kctx,
|
||||
reg->start_pfn + i * stride,
|
||||
|
@ -1278,13 +1456,6 @@ bad_insert:
|
|||
reg->start_pfn, reg->nr_pages,
|
||||
kctx->as_nr);
|
||||
|
||||
if (alloc->type == KBASE_MEM_TYPE_ALIAS) {
|
||||
KBASE_DEBUG_ASSERT(alloc->imported.alias.aliased);
|
||||
while (i--)
|
||||
if (alloc->imported.alias.aliased[i].alloc)
|
||||
kbase_mem_phy_alloc_gpu_unmapped(alloc->imported.alias.aliased[i].alloc);
|
||||
}
|
||||
|
||||
kbase_remove_va_region(reg);
|
||||
|
||||
return err;
|
||||
|
@ -1298,7 +1469,6 @@ static void kbase_jd_user_buf_unmap(struct kbase_context *kctx,
|
|||
int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
|
||||
{
|
||||
int err = 0;
|
||||
size_t i;
|
||||
|
||||
if (reg->start_pfn == 0)
|
||||
return 0;
|
||||
|
@ -1323,10 +1493,9 @@ int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
|
|||
/* Update tracking, and other cleanup, depending on memory type. */
|
||||
switch (reg->gpu_alloc->type) {
|
||||
case KBASE_MEM_TYPE_ALIAS:
|
||||
KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased);
|
||||
for (i = 0; i < reg->gpu_alloc->imported.alias.nents; i++)
|
||||
if (reg->gpu_alloc->imported.alias.aliased[i].alloc)
|
||||
kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
|
||||
/* We mark the source allocs as unmapped from the GPU when
|
||||
* putting reg's allocs
|
||||
*/
|
||||
break;
|
||||
case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
|
||||
struct kbase_alloc_import_user_buf *user_buf =
|
||||
|
@ -2495,8 +2664,10 @@ void kbase_mem_kref_free(struct kref *kref)
|
|||
aliased = alloc->imported.alias.aliased;
|
||||
if (aliased) {
|
||||
for (i = 0; i < alloc->imported.alias.nents; i++)
|
||||
if (aliased[i].alloc)
|
||||
if (aliased[i].alloc) {
|
||||
kbase_mem_phy_alloc_gpu_unmapped(aliased[i].alloc);
|
||||
kbase_mem_phy_alloc_put(aliased[i].alloc);
|
||||
}
|
||||
vfree(aliased);
|
||||
}
|
||||
break;
|
||||
|
@ -3523,18 +3694,6 @@ void kbase_jit_term(struct kbase_context *kctx)
|
|||
cancel_work_sync(&kctx->jit_work);
|
||||
}
|
||||
|
||||
bool kbase_has_exec_va_zone(struct kbase_context *kctx)
|
||||
{
|
||||
bool has_exec_va_zone;
|
||||
|
||||
kbase_gpu_vm_lock(kctx);
|
||||
has_exec_va_zone = (kctx->exec_va_start != U64_MAX);
|
||||
kbase_gpu_vm_unlock(kctx);
|
||||
|
||||
return has_exec_va_zone;
|
||||
}
|
||||
|
||||
|
||||
int kbase_jd_user_buf_pin_pages(struct kbase_context *kctx,
|
||||
struct kbase_va_region *reg)
|
||||
{
|
||||
|
|
|
@ -105,7 +105,9 @@ struct kbase_aliased {
|
|||
* updated as part of the change.
|
||||
*
|
||||
* @kref: number of users of this alloc
|
||||
* @gpu_mappings: count number of times mapped on the GPU
|
||||
* @gpu_mappings: count number of times mapped on the GPU. Indicates the number
|
||||
* of references there are to the physical pages from different
|
||||
* GPU VA regions.
|
||||
* @nents: 0..N
|
||||
* @pages: N elements, only 0..nents are valid
|
||||
* @mappings: List of CPU mappings of this physical memory allocation.
|
||||
|
@ -279,8 +281,13 @@ struct kbase_va_region {
|
|||
#define KBASE_REG_SHARE_BOTH (1ul << 10)
|
||||
|
||||
/* Space for 4 different zones */
|
||||
#define KBASE_REG_ZONE_MASK (3ul << 11)
|
||||
#define KBASE_REG_ZONE(x) (((x) & 3) << 11)
|
||||
#define KBASE_REG_ZONE_MASK ((KBASE_REG_ZONE_MAX - 1ul) << 11)
|
||||
#define KBASE_REG_ZONE(x) (((x) & (KBASE_REG_ZONE_MAX - 1ul)) << 11)
|
||||
#define KBASE_REG_ZONE_IDX(x) (((x) & KBASE_REG_ZONE_MASK) >> 11)
|
||||
|
||||
#if ((KBASE_REG_ZONE_MAX - 1) & 0x3) != (KBASE_REG_ZONE_MAX - 1)
|
||||
#error KBASE_REG_ZONE_MAX too large for allocation of KBASE_REG_<...> bits
|
||||
#endif
|
||||
|
||||
/* GPU read access */
|
||||
#define KBASE_REG_GPU_RD (1ul<<13)
|
||||
|
@ -1685,4 +1692,76 @@ int kbase_mem_do_sync_imported(struct kbase_context *kctx,
|
|||
struct kbase_va_region *reg, enum kbase_sync_type sync_fn);
|
||||
#endif /* CONFIG_DMA_SHARED_BUFFER */
|
||||
|
||||
/**
|
||||
* kbase_ctx_reg_zone_end_pfn - return the end Page Frame Number of @zone
|
||||
* @zone: zone to query
|
||||
*
|
||||
* Return: The end of the zone corresponding to @zone
|
||||
*/
|
||||
static inline u64 kbase_reg_zone_end_pfn(struct kbase_reg_zone *zone)
|
||||
{
|
||||
return zone->base_pfn + zone->va_size_pages;
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_ctx_reg_zone_init - initialize a zone in @kctx
|
||||
* @kctx: Pointer to kbase context
|
||||
* @zone_bits: A KBASE_REG_ZONE_<...> to initialize
|
||||
* @base_pfn: Page Frame Number in GPU virtual address space for the start of
|
||||
* the Zone
|
||||
* @va_size_pages: Size of the Zone in pages
|
||||
*/
|
||||
static inline void kbase_ctx_reg_zone_init(struct kbase_context *kctx,
|
||||
unsigned long zone_bits,
|
||||
u64 base_pfn, u64 va_size_pages)
|
||||
{
|
||||
struct kbase_reg_zone *zone;
|
||||
|
||||
lockdep_assert_held(&kctx->reg_lock);
|
||||
WARN_ON((zone_bits & KBASE_REG_ZONE_MASK) != zone_bits);
|
||||
|
||||
zone = &kctx->reg_zone[KBASE_REG_ZONE_IDX(zone_bits)];
|
||||
*zone = (struct kbase_reg_zone){
|
||||
.base_pfn = base_pfn, .va_size_pages = va_size_pages,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_ctx_reg_zone_get_nolock - get a zone from @kctx where the caller does
|
||||
* not have @kctx 's region lock
|
||||
* @kctx: Pointer to kbase context
|
||||
* @zone_bits: A KBASE_REG_ZONE_<...> to retrieve
|
||||
*
|
||||
* This should only be used in performance-critical paths where the code is
|
||||
* resilient to a race with the zone changing.
|
||||
*
|
||||
* Return: The zone corresponding to @zone_bits
|
||||
*/
|
||||
static inline struct kbase_reg_zone *
|
||||
kbase_ctx_reg_zone_get_nolock(struct kbase_context *kctx,
|
||||
unsigned long zone_bits)
|
||||
{
|
||||
WARN_ON((zone_bits & KBASE_REG_ZONE_MASK) != zone_bits);
|
||||
|
||||
return &kctx->reg_zone[KBASE_REG_ZONE_IDX(zone_bits)];
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_ctx_reg_zone_get - get a zone from @kctx
|
||||
* @kctx: Pointer to kbase context
|
||||
* @zone_bits: A KBASE_REG_ZONE_<...> to retrieve
|
||||
*
|
||||
* The get is not refcounted - there is no corresponding 'put' operation
|
||||
*
|
||||
* Return: The zone corresponding to @zone_bits
|
||||
*/
|
||||
static inline struct kbase_reg_zone *
|
||||
kbase_ctx_reg_zone_get(struct kbase_context *kctx, unsigned long zone_bits)
|
||||
{
|
||||
lockdep_assert_held(&kctx->reg_lock);
|
||||
WARN_ON((zone_bits & KBASE_REG_ZONE_MASK) != zone_bits);
|
||||
|
||||
return &kctx->reg_zone[KBASE_REG_ZONE_IDX(zone_bits)];
|
||||
}
|
||||
|
||||
#endif /* _KBASE_MEM_H_ */
|
||||
|
|
|
@ -881,7 +881,12 @@ int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned in
|
|||
prev_needed = (KBASE_REG_DONT_NEED & reg->flags) == KBASE_REG_DONT_NEED;
|
||||
new_needed = (BASE_MEM_DONT_NEED & flags) == BASE_MEM_DONT_NEED;
|
||||
if (prev_needed != new_needed) {
|
||||
/* Aliased allocations can't be made ephemeral */
|
||||
/* Aliased allocations can't be shrunk as the code doesn't
|
||||
* support looking up:
|
||||
* - all physical pages assigned to different GPU VAs
|
||||
* - CPU mappings for the physical pages at different vm_pgoff
|
||||
* (==GPU VA) locations.
|
||||
*/
|
||||
if (atomic_read(®->cpu_alloc->gpu_mappings) > 1)
|
||||
goto out_unlock;
|
||||
|
||||
|
@ -1487,6 +1492,7 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
|
|||
u32 cache_line_alignment = kbase_get_cache_line_alignment(kctx->kbdev);
|
||||
struct kbase_alloc_import_user_buf *user_buf;
|
||||
struct page **pages = NULL;
|
||||
int write;
|
||||
|
||||
if ((address & (cache_line_alignment - 1)) != 0 ||
|
||||
(size & (cache_line_alignment - 1)) != 0) {
|
||||
|
@ -1596,22 +1602,22 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
|
|||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
|
||||
write = reg->flags & (KBASE_REG_CPU_WR | KBASE_REG_GPU_WR);
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
|
||||
faulted_pages = get_user_pages(current, current->mm, address, *va_pages,
|
||||
#if KERNEL_VERSION(4, 4, 168) <= LINUX_VERSION_CODE && \
|
||||
KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE
|
||||
reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
|
||||
pages, NULL);
|
||||
write ? FOLL_WRITE : 0, pages, NULL);
|
||||
#else
|
||||
reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL);
|
||||
write, 0, pages, NULL);
|
||||
#endif
|
||||
#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
|
||||
faulted_pages = get_user_pages(address, *va_pages,
|
||||
reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL);
|
||||
write, 0, pages, NULL);
|
||||
#else
|
||||
faulted_pages = get_user_pages(address, *va_pages,
|
||||
reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
|
||||
pages, NULL);
|
||||
write ? FOLL_WRITE : 0, pages, NULL);
|
||||
#endif
|
||||
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
@ -1783,6 +1789,15 @@ u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride,
|
|||
goto bad_handle; /* Not found/already free */
|
||||
if (aliasing_reg->flags & KBASE_REG_DONT_NEED)
|
||||
goto bad_handle; /* Ephemeral region */
|
||||
if (aliasing_reg->flags & KBASE_REG_NO_USER_FREE)
|
||||
goto bad_handle; /* JIT regions can't be
|
||||
* aliased. NO_USER_FREE flag
|
||||
* covers the entire lifetime
|
||||
* of JIT regions. The other
|
||||
* types of regions covered
|
||||
* by this flag also shall
|
||||
* not be aliased.
|
||||
*/
|
||||
if (!(aliasing_reg->flags & KBASE_REG_GPU_CACHED))
|
||||
goto bad_handle; /* GPU uncached memory */
|
||||
if (!aliasing_reg->gpu_alloc)
|
||||
|
@ -1812,6 +1827,18 @@ u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride,
|
|||
reg->gpu_alloc->imported.alias.aliased[i].alloc = kbase_mem_phy_alloc_get(alloc);
|
||||
reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length;
|
||||
reg->gpu_alloc->imported.alias.aliased[i].offset = ai[i].offset;
|
||||
|
||||
/* Ensure the underlying alloc is marked as being
|
||||
* mapped at >1 different GPU VA immediately, even
|
||||
* though mapping might not happen until later.
|
||||
*
|
||||
* Otherwise, we would (incorrectly) allow shrinking of
|
||||
* the source region (aliasing_reg) and so freeing the
|
||||
* physical pages (without freeing the entire alloc)
|
||||
* whilst we still hold an implicit reference on those
|
||||
* physical pages.
|
||||
*/
|
||||
kbase_mem_phy_alloc_gpu_mapped(alloc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1855,6 +1882,10 @@ no_cookie:
|
|||
#endif
|
||||
no_mmap:
|
||||
bad_handle:
|
||||
/* Marking the source allocs as not being mapped on the GPU and putting
|
||||
* them is handled by putting reg's allocs, so no rollback of those
|
||||
* actions is done here.
|
||||
*/
|
||||
kbase_gpu_vm_unlock(kctx);
|
||||
no_aliased_array:
|
||||
invalid_flags:
|
||||
|
@ -2094,7 +2125,15 @@ int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages)
|
|||
if (new_pages > reg->nr_pages)
|
||||
goto out_unlock;
|
||||
|
||||
/* can't be mapped more than once on the GPU */
|
||||
/* Can't shrink when physical pages are mapped to different GPU
|
||||
* VAs. The code doesn't support looking up:
|
||||
* - all physical pages assigned to different GPU VAs
|
||||
* - CPU mappings for the physical pages at different vm_pgoff
|
||||
* (==GPU VA) locations.
|
||||
*
|
||||
* Note that for Native allocs mapped at multiple GPU VAs, growth of
|
||||
* such allocs is not a supported use-case.
|
||||
*/
|
||||
if (atomic_read(®->gpu_alloc->gpu_mappings) > 1)
|
||||
goto out_unlock;
|
||||
/* can't grow regions which are ephemeral */
|
||||
|
|
|
@ -270,6 +270,26 @@ unsigned long kbase_context_get_unmapped_area(struct kbase_context *const kctx,
|
|||
bool is_same_4gb_page = false;
|
||||
unsigned long ret;
|
||||
|
||||
/* the 'nolock' form is used here:
|
||||
* - the base_pfn of the SAME_VA zone does not change
|
||||
* - in normal use, va_size_pages is constant once the first allocation
|
||||
* begins
|
||||
*
|
||||
* However, in abnormal use this function could be processing whilst
|
||||
* another new zone is being setup in a different thread (e.g. to
|
||||
* borrow part of the SAME_VA zone). In the worst case, this path may
|
||||
* witness a higher SAME_VA end_pfn than the code setting up the new
|
||||
* zone.
|
||||
*
|
||||
* This is safe because once we reach the main allocation functions,
|
||||
* we'll see the updated SAME_VA end_pfn and will determine that there
|
||||
* is no free region at the address found originally by too large a
|
||||
* same_va_end_addr here, and will fail the allocation gracefully.
|
||||
*/
|
||||
struct kbase_reg_zone *zone =
|
||||
kbase_ctx_reg_zone_get_nolock(kctx, KBASE_REG_ZONE_SAME_VA);
|
||||
u64 same_va_end_addr = kbase_reg_zone_end_pfn(zone) << PAGE_SHIFT;
|
||||
|
||||
/* err on fixed address */
|
||||
if ((flags & MAP_FIXED) || addr)
|
||||
return -EINVAL;
|
||||
|
@ -280,9 +300,8 @@ unsigned long kbase_context_get_unmapped_area(struct kbase_context *const kctx,
|
|||
return -ENOMEM;
|
||||
|
||||
if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
|
||||
|
||||
high_limit = min_t(unsigned long, mm->mmap_base,
|
||||
(kctx->same_va_end << PAGE_SHIFT));
|
||||
high_limit =
|
||||
min_t(unsigned long, mm->mmap_base, same_va_end_addr);
|
||||
|
||||
/* If there's enough (> 33 bits) of GPU VA space, align
|
||||
* to 2MB boundaries.
|
||||
|
@ -352,11 +371,10 @@ unsigned long kbase_context_get_unmapped_area(struct kbase_context *const kctx,
|
|||
is_same_4gb_page);
|
||||
|
||||
if (IS_ERR_VALUE(ret) && high_limit == mm->mmap_base &&
|
||||
high_limit < (kctx->same_va_end << PAGE_SHIFT)) {
|
||||
high_limit < same_va_end_addr) {
|
||||
/* Retry above mmap_base */
|
||||
info.low_limit = mm->mmap_base;
|
||||
info.high_limit = min_t(u64, TASK_SIZE,
|
||||
(kctx->same_va_end << PAGE_SHIFT));
|
||||
info.high_limit = min_t(u64, TASK_SIZE, same_va_end_addr);
|
||||
|
||||
ret = kbase_unmapped_area_topdown(&info, is_shader_code,
|
||||
is_same_4gb_page);
|
||||
|
|
|
@ -171,6 +171,11 @@
|
|||
#define KBASE_TRACE_SIZE (1 << KBASE_TRACE_SIZE_LOG2)
|
||||
#define KBASE_TRACE_MASK ((1 << KBASE_TRACE_SIZE_LOG2)-1)
|
||||
|
||||
/**
|
||||
* Maximum number of GPU memory region zones
|
||||
*/
|
||||
#define KBASE_REG_ZONE_MAX 4ul
|
||||
|
||||
#include "mali_kbase_js_defs.h"
|
||||
#include "mali_kbase_hwaccess_defs.h"
|
||||
|
||||
|
@ -1210,6 +1215,21 @@ struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void);
|
|||
#define DEVNAME_SIZE 16
|
||||
|
||||
|
||||
/**
|
||||
* struct kbase_reg_zone - Information about GPU memory region zones
|
||||
* @base_pfn: Page Frame Number in GPU virtual address space for the start of
|
||||
* the Zone
|
||||
* @va_size_pages: Size of the Zone in pages
|
||||
*
|
||||
* Track information about a zone KBASE_REG_ZONE() and related macros.
|
||||
* In future, this could also store the &rb_root that are currently in
|
||||
* &kbase_context
|
||||
*/
|
||||
struct kbase_reg_zone {
|
||||
u64 base_pfn;
|
||||
u64 va_size_pages;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct kbase_device - Object representing an instance of GPU platform device,
|
||||
* allocated from the probe method of mali driver.
|
||||
|
@ -1844,6 +1864,7 @@ struct kbase_sub_alloc {
|
|||
* @reg_rbtree_exec: RB tree of the memory regions allocated from the EXEC_VA
|
||||
* zone of the GPU virtual address space. Used for GPU-executable
|
||||
* allocations which don't need the SAME_VA property.
|
||||
* @reg_zone: Zone information for the reg_rbtree_<...> members.
|
||||
* @cookies: Bitmask containing of BITS_PER_LONG bits, used mainly for
|
||||
* SAME_VA allocations to defer the reservation of memory region
|
||||
* (from the GPU virtual address space) from base_mem_alloc
|
||||
|
@ -1915,9 +1936,6 @@ struct kbase_sub_alloc {
|
|||
* created the context. Used for accounting the physical
|
||||
* pages used for GPU allocations, done for the context,
|
||||
* to the memory consumed by the process.
|
||||
* @same_va_end: End address of the SAME_VA zone (in 4KB page units)
|
||||
* @exec_va_start: Start address of the EXEC_VA zone (in 4KB page units)
|
||||
* or U64_MAX if the EXEC_VA zone is uninitialized.
|
||||
* @gpu_va_end: End address of the GPU va space (in 4KB page units)
|
||||
* @jit_va: Indicates if a JIT_VA zone has been created.
|
||||
* @timeline: Object tracking the number of atoms currently in flight for
|
||||
|
@ -2044,6 +2062,7 @@ struct kbase_context {
|
|||
struct rb_root reg_rbtree_same;
|
||||
struct rb_root reg_rbtree_custom;
|
||||
struct rb_root reg_rbtree_exec;
|
||||
struct kbase_reg_zone reg_zone[KBASE_REG_ZONE_MAX];
|
||||
|
||||
|
||||
unsigned long cookies;
|
||||
|
@ -2086,8 +2105,6 @@ struct kbase_context {
|
|||
* All other flags must be added there */
|
||||
spinlock_t mm_update_lock;
|
||||
struct mm_struct __rcu *process_mm;
|
||||
u64 same_va_end;
|
||||
u64 exec_va_start;
|
||||
u64 gpu_va_end;
|
||||
bool jit_va;
|
||||
|
||||
|
|
|
@ -78,22 +78,28 @@ static struct rb_root *kbase_gpu_va_to_rbtree(struct kbase_context *kctx,
|
|||
u64 gpu_pfn)
|
||||
{
|
||||
struct rb_root *rbtree = NULL;
|
||||
struct kbase_reg_zone *exec_va_zone =
|
||||
kbase_ctx_reg_zone_get(kctx, KBASE_REG_ZONE_EXEC_VA);
|
||||
|
||||
/* The gpu_pfn can only be greater than the starting pfn of the EXEC_VA
|
||||
* zone if this has been initialized.
|
||||
*/
|
||||
if (gpu_pfn >= kctx->exec_va_start)
|
||||
if (gpu_pfn >= exec_va_zone->base_pfn)
|
||||
rbtree = &kctx->reg_rbtree_exec;
|
||||
else {
|
||||
u64 same_va_end;
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
if (kbase_ctx_flag(kctx, KCTX_COMPAT))
|
||||
if (kbase_ctx_flag(kctx, KCTX_COMPAT)) {
|
||||
#endif /* CONFIG_64BIT */
|
||||
same_va_end = KBASE_REG_ZONE_CUSTOM_VA_BASE;
|
||||
#ifdef CONFIG_64BIT
|
||||
else
|
||||
same_va_end = kctx->same_va_end;
|
||||
} else {
|
||||
struct kbase_reg_zone *same_va_zone =
|
||||
kbase_ctx_reg_zone_get(kctx,
|
||||
KBASE_REG_ZONE_SAME_VA);
|
||||
same_va_end = kbase_reg_zone_end_pfn(same_va_zone);
|
||||
}
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
if (gpu_pfn >= same_va_end)
|
||||
|
@ -685,21 +691,24 @@ int kbase_region_tracker_init(struct kbase_context *kctx)
|
|||
u64 custom_va_size = KBASE_REG_ZONE_CUSTOM_VA_SIZE;
|
||||
u64 gpu_va_limit = (1ULL << kctx->kbdev->gpu_props.mmu.va_bits) >> PAGE_SHIFT;
|
||||
u64 same_va_pages;
|
||||
u64 same_va_base = 1u;
|
||||
int err;
|
||||
|
||||
/* Take the lock as kbase_free_alloced_region requires it */
|
||||
kbase_gpu_vm_lock(kctx);
|
||||
|
||||
same_va_pages = (1ULL << (same_va_bits - PAGE_SHIFT)) - 1;
|
||||
same_va_pages = (1ULL << (same_va_bits - PAGE_SHIFT)) - same_va_base;
|
||||
/* all have SAME_VA */
|
||||
same_va_reg = kbase_alloc_free_region(&kctx->reg_rbtree_same, 1,
|
||||
same_va_pages,
|
||||
KBASE_REG_ZONE_SAME_VA);
|
||||
same_va_reg =
|
||||
kbase_alloc_free_region(&kctx->reg_rbtree_same, same_va_base,
|
||||
same_va_pages, KBASE_REG_ZONE_SAME_VA);
|
||||
|
||||
if (!same_va_reg) {
|
||||
err = -ENOMEM;
|
||||
goto fail_unlock;
|
||||
}
|
||||
kbase_ctx_reg_zone_init(kctx, KBASE_REG_ZONE_SAME_VA, same_va_base,
|
||||
same_va_pages);
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
/* 32-bit clients have custom VA zones */
|
||||
|
@ -725,17 +734,23 @@ int kbase_region_tracker_init(struct kbase_context *kctx)
|
|||
err = -ENOMEM;
|
||||
goto fail_free_same_va;
|
||||
}
|
||||
kbase_ctx_reg_zone_init(kctx, KBASE_REG_ZONE_CUSTOM_VA,
|
||||
KBASE_REG_ZONE_CUSTOM_VA_BASE,
|
||||
custom_va_size);
|
||||
#ifdef CONFIG_64BIT
|
||||
} else {
|
||||
custom_va_size = 0;
|
||||
}
|
||||
#endif
|
||||
/* EXEC_VA zone's codepaths are slightly easier when its base_pfn is
|
||||
* initially U64_MAX
|
||||
*/
|
||||
kbase_ctx_reg_zone_init(kctx, KBASE_REG_ZONE_EXEC_VA, U64_MAX, 0u);
|
||||
/* Other zones are 0: kbase_create_context() uses vzalloc */
|
||||
|
||||
kbase_region_tracker_ds_init(kctx, same_va_reg, custom_va_reg);
|
||||
|
||||
kctx->same_va_end = same_va_pages + 1;
|
||||
kctx->gpu_va_end = kctx->same_va_end + custom_va_size;
|
||||
kctx->exec_va_start = U64_MAX;
|
||||
kctx->gpu_va_end = same_va_base + same_va_pages + custom_va_size;
|
||||
kctx->jit_va = false;
|
||||
|
||||
|
||||
|
@ -749,48 +764,147 @@ fail_unlock:
|
|||
return err;
|
||||
}
|
||||
|
||||
static bool kbase_has_exec_va_zone_locked(struct kbase_context *kctx)
|
||||
{
|
||||
struct kbase_reg_zone *exec_va_zone;
|
||||
|
||||
lockdep_assert_held(&kctx->reg_lock);
|
||||
exec_va_zone = kbase_ctx_reg_zone_get(kctx, KBASE_REG_ZONE_EXEC_VA);
|
||||
|
||||
return (exec_va_zone->base_pfn != U64_MAX);
|
||||
}
|
||||
|
||||
bool kbase_has_exec_va_zone(struct kbase_context *kctx)
|
||||
{
|
||||
bool has_exec_va_zone;
|
||||
|
||||
kbase_gpu_vm_lock(kctx);
|
||||
has_exec_va_zone = kbase_has_exec_va_zone_locked(kctx);
|
||||
kbase_gpu_vm_unlock(kctx);
|
||||
|
||||
return has_exec_va_zone;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if any allocations have been made on a context's region tracker
|
||||
* @kctx: KBase context
|
||||
*
|
||||
* Check the context to determine if any allocations have been made yet from
|
||||
* any of its zones. This check should be done before resizing a zone, e.g. to
|
||||
* make space to add a second zone.
|
||||
*
|
||||
* Whilst a zone without allocations can be resized whilst other zones have
|
||||
* allocations, we still check all of @kctx 's zones anyway: this is a stronger
|
||||
* guarantee and should be adhered to when creating new zones anyway.
|
||||
*
|
||||
* Allocations from kbdev zones are not counted.
|
||||
*
|
||||
* Return: true if any allocs exist on any zone, false otherwise
|
||||
*/
|
||||
bool kbase_region_tracker_has_allocs(struct kbase_context *kctx)
|
||||
{
|
||||
unsigned int zone_idx;
|
||||
|
||||
lockdep_assert_held(&kctx->reg_lock);
|
||||
|
||||
for (zone_idx = 0; zone_idx < KBASE_REG_ZONE_MAX; ++zone_idx) {
|
||||
struct kbase_reg_zone *zone;
|
||||
struct kbase_va_region *reg;
|
||||
u64 zone_base_addr;
|
||||
unsigned long zone_bits = KBASE_REG_ZONE(zone_idx);
|
||||
unsigned long reg_zone;
|
||||
|
||||
zone = kbase_ctx_reg_zone_get(kctx, zone_bits);
|
||||
zone_base_addr = zone->base_pfn << PAGE_SHIFT;
|
||||
|
||||
reg = kbase_region_tracker_find_region_base_address(
|
||||
kctx, zone_base_addr);
|
||||
|
||||
if (!zone->va_size_pages) {
|
||||
WARN(reg,
|
||||
"Should not have found a region that starts at 0x%.16llx for zone 0x%lx",
|
||||
(unsigned long long)zone_base_addr, zone_bits);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (WARN(!reg,
|
||||
"There should always be a region that starts at 0x%.16llx for zone 0x%lx, couldn't find it",
|
||||
(unsigned long long)zone_base_addr, zone_bits))
|
||||
return true; /* Safest return value */
|
||||
|
||||
reg_zone = reg->flags & KBASE_REG_ZONE_MASK;
|
||||
if (WARN(reg_zone != zone_bits,
|
||||
"The region that starts at 0x%.16llx should be in zone 0x%lx but was found in the wrong zone 0x%lx",
|
||||
(unsigned long long)zone_base_addr, zone_bits,
|
||||
reg_zone))
|
||||
return true; /* Safest return value */
|
||||
|
||||
/* Unless the region is completely free, of the same size as
|
||||
* the original zone, then it has allocs
|
||||
*/
|
||||
if ((!(reg->flags & KBASE_REG_FREE)) ||
|
||||
(reg->nr_pages != zone->va_size_pages))
|
||||
return true;
|
||||
}
|
||||
|
||||
/* All zones are the same size as originally made, so there are no
|
||||
* allocs
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
static int kbase_region_tracker_init_jit_64(struct kbase_context *kctx,
|
||||
u64 jit_va_pages)
|
||||
{
|
||||
struct kbase_va_region *same_va;
|
||||
struct kbase_va_region *same_va_reg;
|
||||
struct kbase_reg_zone *same_va_zone;
|
||||
u64 same_va_zone_base_addr;
|
||||
const unsigned long same_va_zone_bits = KBASE_REG_ZONE_SAME_VA;
|
||||
struct kbase_va_region *custom_va_reg;
|
||||
u64 same_va_bits = kbase_get_same_va_bits(kctx);
|
||||
u64 total_va_size;
|
||||
u64 jit_va_start;
|
||||
|
||||
lockdep_assert_held(&kctx->reg_lock);
|
||||
|
||||
total_va_size = (1ULL << (same_va_bits - PAGE_SHIFT)) - 1;
|
||||
|
||||
/* First verify that a JIT_VA zone has not been created already. */
|
||||
if (kctx->jit_va)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Modify the same VA free region after creation. Be careful to ensure
|
||||
* that allocations haven't been made as they could cause an overlap
|
||||
* to happen with existing same VA allocations and the custom VA zone.
|
||||
* Modify the same VA free region after creation. The caller has
|
||||
* ensured that allocations haven't been made, as any allocations could
|
||||
* cause an overlap to happen with existing same VA allocations and the
|
||||
* custom VA zone.
|
||||
*/
|
||||
same_va = kbase_region_tracker_find_region_base_address(kctx,
|
||||
PAGE_SIZE);
|
||||
if (!same_va)
|
||||
same_va_zone = kbase_ctx_reg_zone_get(kctx, same_va_zone_bits);
|
||||
same_va_zone_base_addr = same_va_zone->base_pfn << PAGE_SHIFT;
|
||||
|
||||
same_va_reg = kbase_region_tracker_find_region_base_address(
|
||||
kctx, same_va_zone_base_addr);
|
||||
if (WARN(!same_va_reg,
|
||||
"Already found a free region at the start of every zone, but now cannot find any region for zone base 0x%.16llx zone 0x%lx",
|
||||
(unsigned long long)same_va_zone_base_addr, same_va_zone_bits))
|
||||
return -ENOMEM;
|
||||
|
||||
if (same_va->nr_pages < jit_va_pages || kctx->same_va_end < jit_va_pages)
|
||||
/* kbase_region_tracker_has_allocs() in the caller has already ensured
|
||||
* that all of the zones have no allocs, so no need to check that again
|
||||
* on same_va_reg
|
||||
*/
|
||||
WARN_ON((!(same_va_reg->flags & KBASE_REG_FREE)) ||
|
||||
same_va_reg->nr_pages != same_va_zone->va_size_pages);
|
||||
|
||||
if (same_va_reg->nr_pages < jit_va_pages ||
|
||||
same_va_zone->va_size_pages < jit_va_pages)
|
||||
return -ENOMEM;
|
||||
|
||||
/* It's safe to adjust the same VA zone now */
|
||||
same_va->nr_pages -= jit_va_pages;
|
||||
kctx->same_va_end -= jit_va_pages;
|
||||
same_va_reg->nr_pages -= jit_va_pages;
|
||||
same_va_zone->va_size_pages -= jit_va_pages;
|
||||
jit_va_start = kbase_reg_zone_end_pfn(same_va_zone);
|
||||
|
||||
/*
|
||||
* Create a custom VA zone at the end of the VA for allocations which
|
||||
* JIT can use so it doesn't have to allocate VA from the kernel.
|
||||
*/
|
||||
custom_va_reg = kbase_alloc_free_region(&kctx->reg_rbtree_custom,
|
||||
kctx->same_va_end,
|
||||
jit_va_pages,
|
||||
KBASE_REG_ZONE_CUSTOM_VA);
|
||||
custom_va_reg =
|
||||
kbase_alloc_free_region(&kctx->reg_rbtree_custom, jit_va_start,
|
||||
jit_va_pages, KBASE_REG_ZONE_CUSTOM_VA);
|
||||
|
||||
/*
|
||||
* The context will be destroyed if we fail here so no point
|
||||
|
@ -798,6 +912,11 @@ static int kbase_region_tracker_init_jit_64(struct kbase_context *kctx,
|
|||
*/
|
||||
if (!custom_va_reg)
|
||||
return -ENOMEM;
|
||||
/* Since this is 64-bit, the custom zone will not have been
|
||||
* initialized, so initialize it now
|
||||
*/
|
||||
kbase_ctx_reg_zone_init(kctx, KBASE_REG_ZONE_CUSTOM_VA, jit_va_start,
|
||||
jit_va_pages);
|
||||
|
||||
kbase_region_tracker_insert(custom_va_reg);
|
||||
return 0;
|
||||
|
@ -814,6 +933,23 @@ int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages,
|
|||
|
||||
kbase_gpu_vm_lock(kctx);
|
||||
|
||||
/* Verify that a JIT_VA zone has not been created already. */
|
||||
if (kctx->jit_va) {
|
||||
err = -EINVAL;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
/* If in 64-bit, we always lookup the SAME_VA zone. To ensure it has no
|
||||
* allocs, we can ensure there are no allocs anywhere.
|
||||
*
|
||||
* This check is also useful in 32-bit, just to make sure init of the
|
||||
* zone is always done before any allocs.
|
||||
*/
|
||||
if (kbase_region_tracker_has_allocs(kctx)) {
|
||||
err = -ENOMEM;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
if (!kbase_ctx_flag(kctx, KCTX_COMPAT))
|
||||
err = kbase_region_tracker_init_jit_64(kctx, jit_va_pages);
|
||||
|
@ -829,6 +965,7 @@ int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages,
|
|||
kctx->jit_va = true;
|
||||
}
|
||||
|
||||
exit_unlock:
|
||||
kbase_gpu_vm_unlock(kctx);
|
||||
|
||||
return err;
|
||||
|
@ -836,24 +973,33 @@ int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages,
|
|||
|
||||
int kbase_region_tracker_init_exec(struct kbase_context *kctx, u64 exec_va_pages)
|
||||
{
|
||||
struct kbase_va_region *shrinking_va_reg;
|
||||
struct kbase_va_region *exec_va_reg;
|
||||
u64 exec_va_start, exec_va_base_addr;
|
||||
struct kbase_reg_zone *exec_va_zone;
|
||||
struct kbase_reg_zone *target_zone;
|
||||
struct kbase_va_region *target_reg;
|
||||
u64 target_zone_base_addr;
|
||||
unsigned long target_zone_bits;
|
||||
u64 exec_va_start;
|
||||
int err;
|
||||
|
||||
/* The EXEC_VA zone shall be created by making space at the end of the
|
||||
* address space. Firstly, verify that the number of EXEC_VA pages
|
||||
* requested by the client is reasonable and then make sure that it is
|
||||
* not greater than the address space itself before calculating the base
|
||||
* address of the new zone.
|
||||
/* The EXEC_VA zone shall be created by making space either:
|
||||
* - for 64-bit clients, at the end of the process's address space
|
||||
* - for 32-bit clients, in the CUSTOM zone
|
||||
*
|
||||
* Firstly, verify that the number of EXEC_VA pages requested by the
|
||||
* client is reasonable and then make sure that it is not greater than
|
||||
* the address space itself before calculating the base address of the
|
||||
* new zone.
|
||||
*/
|
||||
if (exec_va_pages == 0 || exec_va_pages > KBASE_REG_ZONE_EXEC_VA_MAX_PAGES)
|
||||
return -EINVAL;
|
||||
|
||||
kbase_gpu_vm_lock(kctx);
|
||||
|
||||
/* First verify that a JIT_VA zone has not been created already. */
|
||||
if (kctx->jit_va) {
|
||||
/* Verify that we've not already created a EXEC_VA zone, and that the
|
||||
* EXEC_VA zone must come before JIT's CUSTOM_VA.
|
||||
*/
|
||||
if (kbase_has_exec_va_zone_locked(kctx) || kctx->jit_va) {
|
||||
err = -EPERM;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
@ -863,27 +1009,49 @@ int kbase_region_tracker_init_exec(struct kbase_context *kctx, u64 exec_va_pages
|
|||
goto exit_unlock;
|
||||
}
|
||||
|
||||
exec_va_start = kctx->gpu_va_end - exec_va_pages;
|
||||
exec_va_base_addr = exec_va_start << PAGE_SHIFT;
|
||||
|
||||
shrinking_va_reg = kbase_region_tracker_find_region_enclosing_address(kctx,
|
||||
exec_va_base_addr);
|
||||
if (!shrinking_va_reg) {
|
||||
/* Verify no allocations have already been made */
|
||||
if (kbase_region_tracker_has_allocs(kctx)) {
|
||||
err = -ENOMEM;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
/* Make sure that the EXEC_VA region is still uninitialized */
|
||||
if ((shrinking_va_reg->flags & KBASE_REG_ZONE_MASK) ==
|
||||
KBASE_REG_ZONE_EXEC_VA) {
|
||||
err = -EPERM;
|
||||
goto exit_unlock;
|
||||
#ifdef CONFIG_64BIT
|
||||
if (kbase_ctx_flag(kctx, KCTX_COMPAT)) {
|
||||
#endif
|
||||
/* 32-bit client: take from CUSTOM_VA zone */
|
||||
target_zone_bits = KBASE_REG_ZONE_CUSTOM_VA;
|
||||
#ifdef CONFIG_64BIT
|
||||
} else {
|
||||
/* 64-bit client: take from SAME_VA zone */
|
||||
target_zone_bits = KBASE_REG_ZONE_SAME_VA;
|
||||
}
|
||||
#endif
|
||||
target_zone = kbase_ctx_reg_zone_get(kctx, target_zone_bits);
|
||||
target_zone_base_addr = target_zone->base_pfn << PAGE_SHIFT;
|
||||
|
||||
if (shrinking_va_reg->nr_pages <= exec_va_pages) {
|
||||
target_reg = kbase_region_tracker_find_region_base_address(
|
||||
kctx, target_zone_base_addr);
|
||||
if (WARN(!target_reg,
|
||||
"Already found a free region at the start of every zone, but now cannot find any region for zone base 0x%.16llx zone 0x%lx",
|
||||
(unsigned long long)target_zone_base_addr, target_zone_bits)) {
|
||||
err = -ENOMEM;
|
||||
goto exit_unlock;
|
||||
}
|
||||
/* kbase_region_tracker_has_allocs() above has already ensured that all
|
||||
* of the zones have no allocs, so no need to check that again on
|
||||
* target_reg
|
||||
*/
|
||||
WARN_ON((!(target_reg->flags & KBASE_REG_FREE)) ||
|
||||
target_reg->nr_pages != target_zone->va_size_pages);
|
||||
|
||||
if (target_reg->nr_pages <= exec_va_pages ||
|
||||
target_zone->va_size_pages <= exec_va_pages) {
|
||||
err = -ENOMEM;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
/* Taken from the end of the target zone */
|
||||
exec_va_start = kbase_reg_zone_end_pfn(target_zone) - exec_va_pages;
|
||||
|
||||
exec_va_reg = kbase_alloc_free_region(&kctx->reg_rbtree_exec,
|
||||
exec_va_start,
|
||||
|
@ -893,13 +1061,17 @@ int kbase_region_tracker_init_exec(struct kbase_context *kctx, u64 exec_va_pages
|
|||
err = -ENOMEM;
|
||||
goto exit_unlock;
|
||||
}
|
||||
/* Update EXEC_VA zone
|
||||
*
|
||||
* not using kbase_ctx_reg_zone_init() - it was already initialized
|
||||
*/
|
||||
exec_va_zone = kbase_ctx_reg_zone_get(kctx, KBASE_REG_ZONE_EXEC_VA);
|
||||
exec_va_zone->base_pfn = exec_va_start;
|
||||
exec_va_zone->va_size_pages = exec_va_pages;
|
||||
|
||||
shrinking_va_reg->nr_pages -= exec_va_pages;
|
||||
#ifdef CONFIG_64BIT
|
||||
if (!kbase_ctx_flag(kctx, KCTX_COMPAT))
|
||||
kctx->same_va_end -= exec_va_pages;
|
||||
#endif
|
||||
kctx->exec_va_start = exec_va_start;
|
||||
/* Update target zone and corresponding region */
|
||||
target_reg->nr_pages -= exec_va_pages;
|
||||
target_zone->va_size_pages -= exec_va_pages;
|
||||
|
||||
kbase_region_tracker_insert(exec_va_reg);
|
||||
err = 0;
|
||||
|
@ -1164,7 +1336,9 @@ int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64
|
|||
if (err)
|
||||
goto bad_insert;
|
||||
|
||||
kbase_mem_phy_alloc_gpu_mapped(alloc->imported.alias.aliased[i].alloc);
|
||||
/* Note: mapping count is tracked at alias
|
||||
* creation time
|
||||
*/
|
||||
} else {
|
||||
err = kbase_mmu_insert_single_page(kctx,
|
||||
reg->start_pfn + i * stride,
|
||||
|
@ -1204,7 +1378,6 @@ bad_insert:
|
|||
reg->start_pfn + (i * stride),
|
||||
reg->gpu_alloc->imported.alias.aliased[i].length,
|
||||
kctx->as_nr);
|
||||
kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1226,14 +1399,11 @@ int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
|
|||
return 0;
|
||||
|
||||
if (reg->gpu_alloc && reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
|
||||
size_t i;
|
||||
|
||||
err = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu,
|
||||
reg->start_pfn, reg->nr_pages, kctx->as_nr);
|
||||
KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased);
|
||||
for (i = 0; i < reg->gpu_alloc->imported.alias.nents; i++)
|
||||
if (reg->gpu_alloc->imported.alias.aliased[i].alloc)
|
||||
kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
|
||||
/* We mark the source allocs as unmapped from the GPU when
|
||||
* putting reg's allocs
|
||||
*/
|
||||
} else if (reg->gpu_alloc) {
|
||||
err = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu,
|
||||
reg->start_pfn, kbase_reg_current_backed_size(reg),
|
||||
|
@ -2360,8 +2530,10 @@ void kbase_mem_kref_free(struct kref *kref)
|
|||
aliased = alloc->imported.alias.aliased;
|
||||
if (aliased) {
|
||||
for (i = 0; i < alloc->imported.alias.nents; i++)
|
||||
if (aliased[i].alloc)
|
||||
if (aliased[i].alloc) {
|
||||
kbase_mem_phy_alloc_gpu_unmapped(aliased[i].alloc);
|
||||
kbase_mem_phy_alloc_put(aliased[i].alloc);
|
||||
}
|
||||
vfree(aliased);
|
||||
}
|
||||
break;
|
||||
|
@ -3351,17 +3523,6 @@ void kbase_jit_term(struct kbase_context *kctx)
|
|||
cancel_work_sync(&kctx->jit_work);
|
||||
}
|
||||
|
||||
bool kbase_has_exec_va_zone(struct kbase_context *kctx)
|
||||
{
|
||||
bool has_exec_va_zone;
|
||||
|
||||
kbase_gpu_vm_lock(kctx);
|
||||
has_exec_va_zone = (kctx->exec_va_start != U64_MAX);
|
||||
kbase_gpu_vm_unlock(kctx);
|
||||
|
||||
return has_exec_va_zone;
|
||||
}
|
||||
|
||||
static int kbase_jd_user_buf_map(struct kbase_context *kctx,
|
||||
struct kbase_va_region *reg)
|
||||
{
|
||||
|
|
|
@ -277,8 +277,13 @@ struct kbase_va_region {
|
|||
#define KBASE_REG_SHARE_BOTH (1ul << 10)
|
||||
|
||||
/* Space for 4 different zones */
|
||||
#define KBASE_REG_ZONE_MASK (3ul << 11)
|
||||
#define KBASE_REG_ZONE(x) (((x) & 3) << 11)
|
||||
#define KBASE_REG_ZONE_MASK ((KBASE_REG_ZONE_MAX - 1ul) << 11)
|
||||
#define KBASE_REG_ZONE(x) (((x) & (KBASE_REG_ZONE_MAX - 1ul)) << 11)
|
||||
#define KBASE_REG_ZONE_IDX(x) (((x) & KBASE_REG_ZONE_MASK) >> 11)
|
||||
|
||||
#if ((KBASE_REG_ZONE_MAX - 1) & 0x3) != (KBASE_REG_ZONE_MAX - 1)
|
||||
#error KBASE_REG_ZONE_MAX too large for allocation of KBASE_REG_<...> bits
|
||||
#endif
|
||||
|
||||
/* GPU read access */
|
||||
#define KBASE_REG_GPU_RD (1ul<<13)
|
||||
|
@ -1572,4 +1577,76 @@ static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool)
|
|||
void kbase_mem_evictable_mark_reclaim(struct kbase_mem_phy_alloc *alloc);
|
||||
|
||||
|
||||
/**
|
||||
* kbase_ctx_reg_zone_end_pfn - return the end Page Frame Number of @zone
|
||||
* @zone: zone to query
|
||||
*
|
||||
* Return: The end of the zone corresponding to @zone
|
||||
*/
|
||||
static inline u64 kbase_reg_zone_end_pfn(struct kbase_reg_zone *zone)
|
||||
{
|
||||
return zone->base_pfn + zone->va_size_pages;
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_ctx_reg_zone_init - initialize a zone in @kctx
|
||||
* @kctx: Pointer to kbase context
|
||||
* @zone_bits: A KBASE_REG_ZONE_<...> to initialize
|
||||
* @base_pfn: Page Frame Number in GPU virtual address space for the start of
|
||||
* the Zone
|
||||
* @va_size_pages: Size of the Zone in pages
|
||||
*/
|
||||
static inline void kbase_ctx_reg_zone_init(struct kbase_context *kctx,
|
||||
unsigned long zone_bits,
|
||||
u64 base_pfn, u64 va_size_pages)
|
||||
{
|
||||
struct kbase_reg_zone *zone;
|
||||
|
||||
lockdep_assert_held(&kctx->reg_lock);
|
||||
WARN_ON((zone_bits & KBASE_REG_ZONE_MASK) != zone_bits);
|
||||
|
||||
zone = &kctx->reg_zone[KBASE_REG_ZONE_IDX(zone_bits)];
|
||||
*zone = (struct kbase_reg_zone){
|
||||
.base_pfn = base_pfn, .va_size_pages = va_size_pages,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_ctx_reg_zone_get_nolock - get a zone from @kctx where the caller does
|
||||
* not have @kctx 's region lock
|
||||
* @kctx: Pointer to kbase context
|
||||
* @zone_bits: A KBASE_REG_ZONE_<...> to retrieve
|
||||
*
|
||||
* This should only be used in performance-critical paths where the code is
|
||||
* resilient to a race with the zone changing.
|
||||
*
|
||||
* Return: The zone corresponding to @zone_bits
|
||||
*/
|
||||
static inline struct kbase_reg_zone *
|
||||
kbase_ctx_reg_zone_get_nolock(struct kbase_context *kctx,
|
||||
unsigned long zone_bits)
|
||||
{
|
||||
WARN_ON((zone_bits & KBASE_REG_ZONE_MASK) != zone_bits);
|
||||
|
||||
return &kctx->reg_zone[KBASE_REG_ZONE_IDX(zone_bits)];
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_ctx_reg_zone_get - get a zone from @kctx
|
||||
* @kctx: Pointer to kbase context
|
||||
* @zone_bits: A KBASE_REG_ZONE_<...> to retrieve
|
||||
*
|
||||
* The get is not refcounted - there is no corresponding 'put' operation
|
||||
*
|
||||
* Return: The zone corresponding to @zone_bits
|
||||
*/
|
||||
static inline struct kbase_reg_zone *
|
||||
kbase_ctx_reg_zone_get(struct kbase_context *kctx, unsigned long zone_bits)
|
||||
{
|
||||
lockdep_assert_held(&kctx->reg_lock);
|
||||
WARN_ON((zone_bits & KBASE_REG_ZONE_MASK) != zone_bits);
|
||||
|
||||
return &kctx->reg_zone[KBASE_REG_ZONE_IDX(zone_bits)];
|
||||
}
|
||||
|
||||
#endif /* _KBASE_MEM_H_ */
|
||||
|
|
|
@ -811,7 +811,12 @@ int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned in
|
|||
prev_needed = (KBASE_REG_DONT_NEED & reg->flags) == KBASE_REG_DONT_NEED;
|
||||
new_needed = (BASE_MEM_DONT_NEED & flags) == BASE_MEM_DONT_NEED;
|
||||
if (prev_needed != new_needed) {
|
||||
/* Aliased allocations can't be made ephemeral */
|
||||
/* Aliased allocations can't be shrunk as the code doesn't
|
||||
* support looking up:
|
||||
* - all physical pages assigned to different GPU VAs
|
||||
* - CPU mappings for the physical pages at different vm_pgoff
|
||||
* (==GPU VA) locations.
|
||||
*/
|
||||
if (atomic_read(®->cpu_alloc->gpu_mappings) > 1)
|
||||
goto out_unlock;
|
||||
|
||||
|
@ -1006,6 +1011,7 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
|
|||
u32 cache_line_alignment = kbase_get_cache_line_alignment(kctx->kbdev);
|
||||
struct kbase_alloc_import_user_buf *user_buf;
|
||||
struct page **pages = NULL;
|
||||
int write;
|
||||
|
||||
if ((address & (cache_line_alignment - 1)) != 0 ||
|
||||
(size & (cache_line_alignment - 1)) != 0) {
|
||||
|
@ -1114,15 +1120,17 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
|
|||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
|
||||
write = reg->flags & (KBASE_REG_CPU_WR | KBASE_REG_GPU_WR);
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
|
||||
faulted_pages = get_user_pages(current, current->mm, address, *va_pages,
|
||||
reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL);
|
||||
write, 0, pages, NULL);
|
||||
#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
|
||||
faulted_pages = get_user_pages(address, *va_pages,
|
||||
reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL);
|
||||
write, 0, pages, NULL);
|
||||
#else
|
||||
faulted_pages = get_user_pages(address, *va_pages,
|
||||
reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
|
||||
write ? FOLL_WRITE : 0,
|
||||
pages, NULL);
|
||||
#endif
|
||||
|
||||
|
@ -1294,6 +1302,15 @@ u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride,
|
|||
goto bad_handle; /* Not found/already free */
|
||||
if (aliasing_reg->flags & KBASE_REG_DONT_NEED)
|
||||
goto bad_handle; /* Ephemeral region */
|
||||
if (aliasing_reg->flags & KBASE_REG_JIT)
|
||||
goto bad_handle; /* JIT regions can't be
|
||||
* aliased. NO_USER_FREE flag
|
||||
* covers the entire lifetime
|
||||
* of JIT regions. The other
|
||||
* types of regions covered
|
||||
* by this flag also shall
|
||||
* not be aliased.
|
||||
*/
|
||||
if (!(aliasing_reg->flags & KBASE_REG_GPU_CACHED))
|
||||
goto bad_handle; /* GPU uncached memory */
|
||||
if (!aliasing_reg->gpu_alloc)
|
||||
|
@ -1323,6 +1340,18 @@ u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride,
|
|||
reg->gpu_alloc->imported.alias.aliased[i].alloc = kbase_mem_phy_alloc_get(alloc);
|
||||
reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length;
|
||||
reg->gpu_alloc->imported.alias.aliased[i].offset = ai[i].offset;
|
||||
|
||||
/* Ensure the underlying alloc is marked as being
|
||||
* mapped at >1 different GPU VA immediately, even
|
||||
* though mapping might not happen until later.
|
||||
*
|
||||
* Otherwise, we would (incorrectly) allow shrinking of
|
||||
* the source region (aliasing_reg) and so freeing the
|
||||
* physical pages (without freeing the entire alloc)
|
||||
* whilst we still hold an implicit reference on those
|
||||
* physical pages.
|
||||
*/
|
||||
kbase_mem_phy_alloc_gpu_mapped(alloc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1366,6 +1395,10 @@ no_cookie:
|
|||
#endif
|
||||
no_mmap:
|
||||
bad_handle:
|
||||
/* Marking the source allocs as not being mapped on the GPU and putting
|
||||
* them is handled by putting reg's allocs, so no rollback of those
|
||||
* actions is done here.
|
||||
*/
|
||||
kbase_gpu_vm_unlock(kctx);
|
||||
no_aliased_array:
|
||||
invalid_flags:
|
||||
|
@ -1604,7 +1637,15 @@ int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages)
|
|||
if (new_pages > reg->nr_pages)
|
||||
goto out_unlock;
|
||||
|
||||
/* can't be mapped more than once on the GPU */
|
||||
/* Can't shrink when physical pages are mapped to different GPU
|
||||
* VAs. The code doesn't support looking up:
|
||||
* - all physical pages assigned to different GPU VAs
|
||||
* - CPU mappings for the physical pages at different vm_pgoff
|
||||
* (==GPU VA) locations.
|
||||
*
|
||||
* Note that for Native allocs mapped at multiple GPU VAs, growth of
|
||||
* such allocs is not a supported use-case.
|
||||
*/
|
||||
if (atomic_read(®->gpu_alloc->gpu_mappings) > 1)
|
||||
goto out_unlock;
|
||||
/* can't grow regions which are ephemeral */
|
||||
|
|
|
@ -271,6 +271,26 @@ unsigned long kbase_get_unmapped_area(struct file *filp,
|
|||
bool is_same_4gb_page = false;
|
||||
unsigned long ret;
|
||||
|
||||
/* the 'nolock' form is used here:
|
||||
* - the base_pfn of the SAME_VA zone does not change
|
||||
* - in normal use, va_size_pages is constant once the first allocation
|
||||
* begins
|
||||
*
|
||||
* However, in abnormal use this function could be processing whilst
|
||||
* another new zone is being setup in a different thread (e.g. to
|
||||
* borrow part of the SAME_VA zone). In the worst case, this path may
|
||||
* witness a higher SAME_VA end_pfn than the code setting up the new
|
||||
* zone.
|
||||
*
|
||||
* This is safe because once we reach the main allocation functions,
|
||||
* we'll see the updated SAME_VA end_pfn and will determine that there
|
||||
* is no free region at the address found originally by too large a
|
||||
* same_va_end_addr here, and will fail the allocation gracefully.
|
||||
*/
|
||||
struct kbase_reg_zone *zone =
|
||||
kbase_ctx_reg_zone_get_nolock(kctx, KBASE_REG_ZONE_SAME_VA);
|
||||
u64 same_va_end_addr = kbase_reg_zone_end_pfn(zone) << PAGE_SHIFT;
|
||||
|
||||
/* err on fixed address */
|
||||
if ((flags & MAP_FIXED) || addr)
|
||||
return -EINVAL;
|
||||
|
@ -281,9 +301,8 @@ unsigned long kbase_get_unmapped_area(struct file *filp,
|
|||
return -ENOMEM;
|
||||
|
||||
if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
|
||||
|
||||
high_limit = min_t(unsigned long, mm->mmap_base,
|
||||
(kctx->same_va_end << PAGE_SHIFT));
|
||||
high_limit =
|
||||
min_t(unsigned long, mm->mmap_base, same_va_end_addr);
|
||||
|
||||
/* If there's enough (> 33 bits) of GPU VA space, align
|
||||
* to 2MB boundaries.
|
||||
|
@ -353,11 +372,10 @@ unsigned long kbase_get_unmapped_area(struct file *filp,
|
|||
is_same_4gb_page);
|
||||
|
||||
if (IS_ERR_VALUE(ret) && high_limit == mm->mmap_base &&
|
||||
high_limit < (kctx->same_va_end << PAGE_SHIFT)) {
|
||||
high_limit < same_va_end_addr) {
|
||||
/* Retry above mmap_base */
|
||||
info.low_limit = mm->mmap_base;
|
||||
info.high_limit = min_t(u64, TASK_SIZE,
|
||||
(kctx->same_va_end << PAGE_SHIFT));
|
||||
info.high_limit = min_t(u64, TASK_SIZE, same_va_end_addr);
|
||||
|
||||
ret = kbase_unmapped_area_topdown(&info, is_shader_code,
|
||||
is_same_4gb_page);
|
||||
|
|
|
@ -139,9 +139,14 @@ typedef u32 base_mem_alloc_flags;
|
|||
*/
|
||||
#define BASE_MEM_RESERVED_BIT_5 ((base_mem_alloc_flags)1 << 5)
|
||||
#define BASE_MEM_RESERVED_BIT_6 ((base_mem_alloc_flags)1 << 6)
|
||||
#define BASE_MEM_RESERVED_BIT_7 ((base_mem_alloc_flags)1 << 7)
|
||||
#define BASE_MEM_RESERVED_BIT_8 ((base_mem_alloc_flags)1 << 8)
|
||||
|
||||
/*
|
||||
* Userspace is not allowed to free this memory.
|
||||
* Flag is only allowed on allocations originating from kbase.
|
||||
*/
|
||||
#define BASEP_MEM_NO_USER_FREE ((base_mem_alloc_flags)1 << 7)
|
||||
|
||||
/* Grow backing store on GPU Page Fault
|
||||
*/
|
||||
#define BASE_MEM_GROW_ON_GPF ((base_mem_alloc_flags)1 << 9)
|
||||
|
@ -227,7 +232,7 @@ typedef u32 base_mem_alloc_flags;
|
|||
*/
|
||||
#define BASE_MEM_FLAGS_RESERVED \
|
||||
(BASE_MEM_RESERVED_BIT_5 | BASE_MEM_RESERVED_BIT_6 | \
|
||||
BASE_MEM_RESERVED_BIT_7 | BASE_MEM_RESERVED_BIT_8 | \
|
||||
BASE_MEM_RESERVED_BIT_8 | \
|
||||
BASE_MEM_RESERVED_BIT_19)
|
||||
|
||||
/* A mask of all the flags that can be returned via the base_mem_get_flags()
|
||||
|
|
|
@ -979,7 +979,9 @@ int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64
|
|||
if (err)
|
||||
goto bad_insert;
|
||||
|
||||
kbase_mem_phy_alloc_gpu_mapped(alloc->imported.alias.aliased[i].alloc);
|
||||
/* Note: mapping count is tracked at alias
|
||||
* creation time
|
||||
*/
|
||||
} else {
|
||||
err = kbase_mmu_insert_single_page(kctx,
|
||||
reg->start_pfn + i * stride,
|
||||
|
@ -1012,7 +1014,6 @@ bad_insert:
|
|||
while (i--)
|
||||
if (reg->gpu_alloc->imported.alias.aliased[i].alloc) {
|
||||
kbase_mmu_teardown_pages(kctx, reg->start_pfn + (i * stride), reg->gpu_alloc->imported.alias.aliased[i].length);
|
||||
kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1034,13 +1035,10 @@ int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
|
|||
return 0;
|
||||
|
||||
if (reg->gpu_alloc && reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
|
||||
size_t i;
|
||||
|
||||
err = kbase_mmu_teardown_pages(kctx, reg->start_pfn, reg->nr_pages);
|
||||
KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased);
|
||||
for (i = 0; i < reg->gpu_alloc->imported.alias.nents; i++)
|
||||
if (reg->gpu_alloc->imported.alias.aliased[i].alloc)
|
||||
kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
|
||||
/* We mark the source allocs as unmapped from the GPU when
|
||||
* putting reg's allocs
|
||||
*/
|
||||
} else {
|
||||
err = kbase_mmu_teardown_pages(kctx, reg->start_pfn, kbase_reg_current_backed_size(reg));
|
||||
/* MALI_SEC_INTEGRATION */
|
||||
|
@ -1346,8 +1344,8 @@ int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *re
|
|||
KBASE_DEBUG_ASSERT(NULL != reg);
|
||||
lockdep_assert_held(&kctx->reg_lock);
|
||||
|
||||
if (reg->flags & KBASE_REG_JIT) {
|
||||
dev_warn(reg->kctx->kbdev->dev, "Attempt to free JIT memory!\n");
|
||||
if (reg->flags & KBASE_REG_NO_USER_FREE) {
|
||||
dev_warn(reg->kctx->kbdev->dev, "Attempt to free GPU memory whose freeing by user space is forbidden!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -1509,6 +1507,9 @@ int kbase_update_region_flags(struct kbase_context *kctx,
|
|||
KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT);
|
||||
}
|
||||
|
||||
if (flags & BASEP_MEM_NO_USER_FREE)
|
||||
reg->flags |= KBASE_REG_NO_USER_FREE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2115,8 +2116,10 @@ void kbase_mem_kref_free(struct kref *kref)
|
|||
aliased = alloc->imported.alias.aliased;
|
||||
if (aliased) {
|
||||
for (i = 0; i < alloc->imported.alias.nents; i++)
|
||||
if (aliased[i].alloc)
|
||||
if (aliased[i].alloc) {
|
||||
kbase_mem_phy_alloc_gpu_unmapped(aliased[i].alloc);
|
||||
kbase_mem_phy_alloc_put(aliased[i].alloc);
|
||||
}
|
||||
vfree(aliased);
|
||||
}
|
||||
break;
|
||||
|
@ -2585,7 +2588,7 @@ static void kbase_jit_destroy_worker(struct work_struct *work)
|
|||
mutex_unlock(&kctx->jit_evict_lock);
|
||||
|
||||
kbase_gpu_vm_lock(kctx);
|
||||
reg->flags &= ~KBASE_REG_JIT;
|
||||
reg->flags &= ~KBASE_REG_NO_USER_FREE;
|
||||
kbase_mem_free_region(kctx, reg);
|
||||
kbase_gpu_vm_unlock(kctx);
|
||||
} while (1);
|
||||
|
@ -2894,7 +2897,7 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
|
|||
/* No suitable JIT allocation was found so create a new one */
|
||||
u64 flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD |
|
||||
BASE_MEM_PROT_GPU_WR | BASE_MEM_GROW_ON_GPF |
|
||||
BASE_MEM_COHERENT_LOCAL;
|
||||
BASE_MEM_COHERENT_LOCAL | BASEP_MEM_NO_USER_FREE;
|
||||
u64 gpu_addr;
|
||||
|
||||
mutex_unlock(&kctx->jit_evict_lock);
|
||||
|
@ -2907,8 +2910,6 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
|
|||
if (!reg)
|
||||
goto out_unlocked;
|
||||
|
||||
reg->flags |= KBASE_REG_JIT;
|
||||
|
||||
mutex_lock(&kctx->jit_evict_lock);
|
||||
list_add(®->jit_node, &kctx->jit_active_head);
|
||||
mutex_unlock(&kctx->jit_evict_lock);
|
||||
|
@ -3022,7 +3023,7 @@ bool kbase_jit_evict(struct kbase_context *kctx)
|
|||
mutex_unlock(&kctx->jit_evict_lock);
|
||||
|
||||
if (reg) {
|
||||
reg->flags &= ~KBASE_REG_JIT;
|
||||
reg->flags &= ~KBASE_REG_NO_USER_FREE;
|
||||
kbase_mem_free_region(kctx, reg);
|
||||
}
|
||||
|
||||
|
@ -3044,7 +3045,7 @@ void kbase_jit_term(struct kbase_context *kctx)
|
|||
list_del(&walker->jit_node);
|
||||
list_del_init(&walker->gpu_alloc->evict_node);
|
||||
mutex_unlock(&kctx->jit_evict_lock);
|
||||
walker->flags &= ~KBASE_REG_JIT;
|
||||
walker->flags &= ~KBASE_REG_NO_USER_FREE;
|
||||
kbase_mem_free_region(kctx, walker);
|
||||
mutex_lock(&kctx->jit_evict_lock);
|
||||
}
|
||||
|
@ -3056,7 +3057,7 @@ void kbase_jit_term(struct kbase_context *kctx)
|
|||
list_del(&walker->jit_node);
|
||||
list_del_init(&walker->gpu_alloc->evict_node);
|
||||
mutex_unlock(&kctx->jit_evict_lock);
|
||||
walker->flags &= ~KBASE_REG_JIT;
|
||||
walker->flags &= ~KBASE_REG_NO_USER_FREE;
|
||||
kbase_mem_free_region(kctx, walker);
|
||||
mutex_lock(&kctx->jit_evict_lock);
|
||||
}
|
||||
|
|
|
@ -289,7 +289,7 @@ struct kbase_va_region {
|
|||
#define KBASE_REG_TILER_ALIGN_TOP (1ul << 23)
|
||||
|
||||
/* Memory is handled by JIT - user space should not be able to free it */
|
||||
#define KBASE_REG_JIT (1ul << 24)
|
||||
#define KBASE_REG_NO_USER_FREE (1ul << 24)
|
||||
|
||||
#define KBASE_REG_ZONE_SAME_VA KBASE_REG_ZONE(0)
|
||||
|
||||
|
|
|
@ -603,7 +603,12 @@ int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned in
|
|||
prev_needed = (KBASE_REG_DONT_NEED & reg->flags) == KBASE_REG_DONT_NEED;
|
||||
new_needed = (BASE_MEM_DONT_NEED & flags) == BASE_MEM_DONT_NEED;
|
||||
if (prev_needed != new_needed) {
|
||||
/* Aliased allocations can't be made ephemeral */
|
||||
/* Aliased allocations can't be shrunk as the code doesn't
|
||||
* support looking up:
|
||||
* - all physical pages assigned to different GPU VAs
|
||||
* - CPU mappings for the physical pages at different vm_pgoff
|
||||
* (==GPU VA) locations.
|
||||
*/
|
||||
if (atomic_read(®->cpu_alloc->gpu_mappings) > 1)
|
||||
goto out_unlock;
|
||||
|
||||
|
@ -791,6 +796,7 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
|
|||
u32 cache_line_alignment = kbase_get_cache_line_alignment(kctx);
|
||||
struct kbase_alloc_import_user_buf *user_buf;
|
||||
struct page **pages = NULL;
|
||||
int write;
|
||||
|
||||
if ((address & (cache_line_alignment - 1)) != 0 ||
|
||||
(size & (cache_line_alignment - 1)) != 0) {
|
||||
|
@ -888,15 +894,17 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
|
|||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
|
||||
write = reg->flags & (KBASE_REG_CPU_WR | KBASE_REG_GPU_WR);
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
|
||||
faulted_pages = get_user_pages(current, current->mm, address, *va_pages,
|
||||
reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL);
|
||||
write, 0, pages, NULL);
|
||||
#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
|
||||
faulted_pages = get_user_pages(address, *va_pages,
|
||||
reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL);
|
||||
write, 0, pages, NULL);
|
||||
#else
|
||||
faulted_pages = get_user_pages(address, *va_pages,
|
||||
reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
|
||||
write ? FOLL_WRITE : 0,
|
||||
pages, NULL);
|
||||
#endif
|
||||
|
||||
|
@ -1068,6 +1076,15 @@ u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride,
|
|||
goto bad_handle; /* Free region */
|
||||
if (aliasing_reg->flags & KBASE_REG_DONT_NEED)
|
||||
goto bad_handle; /* Ephemeral region */
|
||||
if (aliasing_reg->flags & KBASE_REG_NO_USER_FREE)
|
||||
goto bad_handle; /* JIT regions can't be
|
||||
* aliased. NO_USER_FREE flag
|
||||
* covers the entire lifetime
|
||||
* of JIT regions. The other
|
||||
* types of regions covered
|
||||
* by this flag also shall
|
||||
* not be aliased.
|
||||
*/
|
||||
if (!aliasing_reg->gpu_alloc)
|
||||
goto bad_handle; /* No alloc */
|
||||
if (aliasing_reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE)
|
||||
|
@ -1095,6 +1112,18 @@ u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride,
|
|||
reg->gpu_alloc->imported.alias.aliased[i].alloc = kbase_mem_phy_alloc_get(alloc);
|
||||
reg->gpu_alloc->imported.alias.aliased[i].length = ai[i].length;
|
||||
reg->gpu_alloc->imported.alias.aliased[i].offset = ai[i].offset;
|
||||
|
||||
/* Ensure the underlying alloc is marked as being
|
||||
* mapped at >1 different GPU VA immediately, even
|
||||
* though mapping might not happen until later.
|
||||
*
|
||||
* Otherwise, we would (incorrectly) allow shrinking of
|
||||
* the source region (aliasing_reg) and so freeing the
|
||||
* physical pages (without freeing the entire alloc)
|
||||
* whilst we still hold an implicit reference on those
|
||||
* physical pages.
|
||||
*/
|
||||
kbase_mem_phy_alloc_gpu_mapped(alloc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1138,6 +1167,10 @@ no_cookie:
|
|||
#endif
|
||||
no_mmap:
|
||||
bad_handle:
|
||||
/* Marking the source allocs as not being mapped on the GPU and putting
|
||||
* them is handled by putting reg's allocs, so no rollback of those
|
||||
* actions is done here.
|
||||
*/
|
||||
kbase_gpu_vm_unlock(kctx);
|
||||
no_aliased_array:
|
||||
invalid_flags:
|
||||
|
@ -1371,7 +1404,15 @@ int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages)
|
|||
if (new_pages > reg->nr_pages)
|
||||
goto out_unlock;
|
||||
|
||||
/* can't be mapped more than once on the GPU */
|
||||
/* Can't shrink when physical pages are mapped to different GPU
|
||||
* VAs. The code doesn't support looking up:
|
||||
* - all physical pages assigned to different GPU VAs
|
||||
* - CPU mappings for the physical pages at different vm_pgoff
|
||||
* (==GPU VA) locations.
|
||||
*
|
||||
* Note that for Native allocs mapped at multiple GPU VAs, growth of
|
||||
* such allocs is not a supported use-case.
|
||||
*/
|
||||
if (atomic_read(®->gpu_alloc->gpu_mappings) > 1)
|
||||
goto out_unlock;
|
||||
/* can't grow regions which are ephemeral */
|
||||
|
|
|
@ -1179,7 +1179,7 @@ typedef struct dhd_pub {
|
|||
void *flowid_lock; /* per os lock for flowid info protection */
|
||||
void *flowring_list_lock; /* per os lock for flowring list protection */
|
||||
uint8 max_multi_client_flow_rings;
|
||||
uint8 multi_client_flow_rings;
|
||||
osl_atomic_t multi_client_flow_rings;
|
||||
uint32 num_flow_rings;
|
||||
cumm_ctr_t cumm_ctr; /* cumm queue length placeholder */
|
||||
cumm_ctr_t l2cumm_ctr; /* level 2 cumm queue length placeholder */
|
||||
|
|
|
@ -393,7 +393,8 @@ dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_flow_rings)
|
|||
bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
|
||||
|
||||
dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp);
|
||||
dhdp->multi_client_flow_rings = 0U;
|
||||
|
||||
OSL_ATOMIC_INIT(dhdp->osh, &dhdp->multi_client_flow_rings);
|
||||
|
||||
#ifdef DHD_LOSSLESS_ROAMING
|
||||
dhdp->dequeue_prec_map = ALLPRIO;
|
||||
|
@ -495,7 +496,8 @@ void dhd_flow_rings_deinit(dhd_pub_t *dhdp)
|
|||
bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
|
||||
|
||||
dhdp->max_multi_client_flow_rings = 0U;
|
||||
dhdp->multi_client_flow_rings = 0U;
|
||||
|
||||
OSL_ATOMIC_INIT(dhdp->osh, &dhdp->multi_client_flow_rings);
|
||||
|
||||
lock = dhdp->flowid_lock;
|
||||
dhdp->flowid_lock = NULL;
|
||||
|
@ -735,9 +737,11 @@ dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
|
|||
/* Abort Flowring creation if multi client flowrings crossed the threshold */
|
||||
#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
|
||||
if (if_role_multi_client &&
|
||||
(dhdp->multi_client_flow_rings >= dhdp->max_multi_client_flow_rings)) {
|
||||
(OSL_ATOMIC_READ(dhdp->osh, &dhdp->multi_client_flow_rings) >=
|
||||
dhdp->max_multi_client_flow_rings)) {
|
||||
DHD_ERROR_RLMT(("%s: Max multi client flow rings reached: %d:%d\n",
|
||||
__FUNCTION__, dhdp->multi_client_flow_rings,
|
||||
__FUNCTION__,
|
||||
OSL_ATOMIC_READ(dhdp->osh, &dhdp->multi_client_flow_rings),
|
||||
dhdp->max_multi_client_flow_rings));
|
||||
return BCME_ERROR;
|
||||
}
|
||||
|
@ -763,7 +767,7 @@ dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
|
|||
|
||||
/* Only after flowid alloc, increment multi_client_flow_rings */
|
||||
if (if_role_multi_client) {
|
||||
dhdp->multi_client_flow_rings++;
|
||||
OSL_ATOMIC_INC(dhdp->osh, &dhdp->multi_client_flow_rings);
|
||||
}
|
||||
|
||||
/* register this flowid in dhd_pub */
|
||||
|
@ -980,7 +984,11 @@ dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
|
|||
|
||||
/* Decrement multi_client_flow_rings */
|
||||
if (if_role_multi_client) {
|
||||
dhdp->multi_client_flow_rings--;
|
||||
if (OSL_ATOMIC_READ(dhdp->osh,
|
||||
&dhdp->multi_client_flow_rings)) {
|
||||
OSL_ATOMIC_DEC(dhdp->osh,
|
||||
&dhdp->multi_client_flow_rings);
|
||||
}
|
||||
}
|
||||
|
||||
/* deregister flowid from dhd_pub. */
|
||||
|
@ -1030,6 +1038,53 @@ dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex)
|
|||
}
|
||||
}
|
||||
|
||||
void
|
||||
dhd_update_multicilent_flow_rings(dhd_pub_t *dhdp, uint8 ifindex, bool increment)
|
||||
{
|
||||
uint32 id;
|
||||
flow_ring_table_t *flow_ring_table;
|
||||
uint32 max_h2d_rings;
|
||||
|
||||
DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
|
||||
|
||||
ASSERT(ifindex < DHD_MAX_IFS);
|
||||
if (ifindex >= DHD_MAX_IFS)
|
||||
return;
|
||||
|
||||
if (!dhdp->flow_ring_table)
|
||||
return;
|
||||
|
||||
max_h2d_rings = dhd_bus_max_h2d_queues(dhdp->bus);
|
||||
flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
|
||||
for (id = 0; id < max_h2d_rings; id++) {
|
||||
if (flow_ring_table[id].active &&
|
||||
(flow_ring_table[id].flow_info.ifindex == ifindex) &&
|
||||
(flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
|
||||
if (increment) {
|
||||
if (OSL_ATOMIC_READ(dhdp->osh, &dhdp->multi_client_flow_rings) <
|
||||
dhdp->max_multi_client_flow_rings) {
|
||||
OSL_ATOMIC_INC(dhdp->osh, &dhdp->multi_client_flow_rings);
|
||||
} else {
|
||||
DHD_ERROR(("%s: multi_client_flow_rings:%u"
|
||||
" reached max:%d\n", __FUNCTION__,
|
||||
OSL_ATOMIC_READ(dhdp->osh,
|
||||
&dhdp->multi_client_flow_rings),
|
||||
dhdp->max_multi_client_flow_rings));
|
||||
}
|
||||
} else {
|
||||
if (OSL_ATOMIC_READ(dhdp->osh, &dhdp->multi_client_flow_rings)) {
|
||||
OSL_ATOMIC_DEC(dhdp->osh, &dhdp->multi_client_flow_rings);
|
||||
} else {
|
||||
DHD_ERROR(("%s: multi_client_flow_rings:%u"
|
||||
" reached ZERO\n", __FUNCTION__,
|
||||
OSL_ATOMIC_READ(dhdp->osh,
|
||||
&dhdp->multi_client_flow_rings)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex)
|
||||
{
|
||||
|
@ -1119,8 +1174,16 @@ dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex,
|
|||
if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
|
||||
|
||||
if (op == WLC_E_IF_ADD || op == WLC_E_IF_CHANGE) {
|
||||
|
||||
DHD_ERROR(("%s: ifindex:%d previous role:%d new role:%d\n",
|
||||
__FUNCTION__, ifindex, if_flow_lkup[ifindex].role, role));
|
||||
|
||||
if_flow_lkup[ifindex].role = role;
|
||||
#ifdef PCIE_FULL_DONGLE
|
||||
if (op == WLC_E_IF_CHANGE) {
|
||||
bool increment = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
|
||||
dhd_update_multicilent_flow_rings(dhdp, ifindex, increment);
|
||||
}
|
||||
#endif /* PCIE_FULL_DONGLE */
|
||||
|
||||
if (role == WLC_E_IF_ROLE_WDS) {
|
||||
/**
|
||||
|
|
|
@ -269,6 +269,8 @@ extern int dhd_flowid_find_by_ifidx(dhd_pub_t *dhdp, uint8 ifidex, uint16 flowid
|
|||
extern void dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid);
|
||||
|
||||
extern void dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex);
|
||||
extern void dhd_update_multicilent_flow_rings(dhd_pub_t *dhdp, uint8 ifindex,
|
||||
bool increment);
|
||||
extern void dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex);
|
||||
|
||||
extern void dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex,
|
||||
|
|
|
@ -10599,8 +10599,9 @@ dhd_prot_debug_info_print(dhd_pub_t *dhd)
|
|||
DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
|
||||
__FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted));
|
||||
#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
|
||||
DHD_ERROR(("%s: multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
|
||||
__FUNCTION__, dhd->multi_client_flow_rings, dhd->max_multi_client_flow_rings));
|
||||
DHD_ERROR(("%s: multi_client_flow_rings:%u max_multi_client_flow_rings:%d\n",
|
||||
__FUNCTION__, OSL_ATOMIC_READ(dhd->osh, &dhd->multi_client_flow_rings),
|
||||
dhd->max_multi_client_flow_rings));
|
||||
#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
|
||||
|
||||
DHD_ERROR(("pktid_txq_start_cnt: %d\n", prot->pktid_txq_start_cnt));
|
||||
|
|
|
@ -7451,8 +7451,9 @@ void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
|
|||
dhdp->bus->h2d_mb_data_ptr_addr, dhdp->bus->d2h_mb_data_ptr_addr);
|
||||
bcm_bprintf(strbuf, "dhd cumm_ctr %d\n", DHD_CUMM_CTR_READ(&dhdp->cumm_ctr));
|
||||
#ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
|
||||
bcm_bprintf(strbuf, "multi_client_flow_rings:%d max_multi_client_flow_rings:%d\n",
|
||||
dhdp->multi_client_flow_rings, dhdp->max_multi_client_flow_rings);
|
||||
bcm_bprintf(strbuf, "multi_client_flow_rings:%u max_multi_client_flow_rings:%d\n",
|
||||
OSL_ATOMIC_READ(dhdp->osh, &dhdp->multi_client_flow_rings),
|
||||
dhdp->max_multi_client_flow_rings);
|
||||
#endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
|
||||
bcm_bprintf(strbuf,
|
||||
"%4s %4s %2s %4s %17s %4s %4s %6s %10s %4s %4s ",
|
||||
|
|
|
@ -213,6 +213,12 @@ static int ion_rbin_heap_allocate(struct ion_heap *heap,
|
|||
if (align > PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
if (!!(flags & ION_FLAG_PROTECTED)) {
|
||||
pr_err("ION_FLAG_PROTECTED is set to non-secure heap %s",
|
||||
heap->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
size_remaining = size = PAGE_ALIGN(size_org);
|
||||
nr_total = rbin_heap->count << PAGE_SHIFT;
|
||||
|
||||
|
|
|
@ -59,6 +59,7 @@
|
|||
#include <linux/file.h>
|
||||
#include <linux/configfs.h>
|
||||
#include <linux/usb/composite.h>
|
||||
#include <linux/kref.h>
|
||||
|
||||
/* platform specific definitions */
|
||||
/* ex) #define __ANDROID__ */
|
||||
|
@ -124,6 +125,8 @@ struct conn_gadget_dev {
|
|||
/* flag variable that save flush call status
|
||||
* to check wakeup reason */
|
||||
atomic_t flush;
|
||||
|
||||
struct kref kref;
|
||||
};
|
||||
|
||||
static struct usb_interface_descriptor conn_gadget_interface_desc = {
|
||||
|
@ -231,6 +234,7 @@ struct conn_gadget_instance {
|
|||
const char *name;
|
||||
};
|
||||
|
||||
static void conn_gadget_cleanup(struct kref *kref);
|
||||
|
||||
static inline struct conn_gadget_dev *func_to_conn_gadget(struct usb_function *f)
|
||||
{
|
||||
|
@ -702,6 +706,11 @@ static int conn_gadget_open(struct inode *ip, struct file *fp)
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!kref_get_unless_zero(&_conn_gadget_dev->kref)) {
|
||||
CONN_GADGET_ERR("already device removed\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
fp->private_data = _conn_gadget_dev;
|
||||
|
||||
/* clear the error latch */
|
||||
|
@ -748,6 +757,8 @@ static int conn_gadget_release(struct inode *ip, struct file *fp)
|
|||
atomic_set(&_conn_gadget_dev->flush, 0);
|
||||
|
||||
conn_gadget_unlock(&_conn_gadget_dev->open_excl);
|
||||
|
||||
kref_put(&_conn_gadget_dev->kref, conn_gadget_cleanup);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1235,6 +1246,8 @@ static int conn_gadget_setup(struct conn_gadget_instance *fi_conn_gadget)
|
|||
atomic_set(&dev->flush, 0);
|
||||
atomic_set(&dev->ep_out_excl, 0);
|
||||
|
||||
kref_init(&dev->kref);
|
||||
|
||||
INIT_LIST_HEAD(&dev->tx_idle);
|
||||
INIT_LIST_HEAD(&dev->rx_idle);
|
||||
INIT_LIST_HEAD(&dev->rx_busy);
|
||||
|
@ -1284,7 +1297,7 @@ err_:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void conn_gadget_cleanup(void)
|
||||
static void conn_gadget_cleanup(struct kref *kref)
|
||||
{
|
||||
printk(KERN_INFO "conn_gadget_cleanup\n");
|
||||
|
||||
|
@ -1360,8 +1373,8 @@ static void conn_gadget_free_inst(struct usb_function_instance *fi)
|
|||
|
||||
fi_conn_gadget = to_fi_conn_gadget(fi);
|
||||
kfree(fi_conn_gadget->name);
|
||||
conn_gadget_cleanup();
|
||||
kfree(fi_conn_gadget);
|
||||
kref_put(&_conn_gadget_dev->kref, conn_gadget_cleanup);
|
||||
}
|
||||
|
||||
struct usb_function_instance *alloc_inst_conn_gadget(void)
|
||||
|
|
BIN
init/uh.8g.elf.h
BIN
init/uh.8g.elf.h
Binary file not shown.
BIN
init/uh.elf.h
BIN
init/uh.elf.h
Binary file not shown.
|
@ -730,8 +730,8 @@ static int ptrace_peek_siginfo(struct task_struct *child,
|
|||
|
||||
for (i = 0; i < arg.nr; ) {
|
||||
siginfo_t info;
|
||||
unsigned long off = arg.off + i;
|
||||
bool found = false;
|
||||
unsigned long off = arg.off + i;
|
||||
bool found = false;
|
||||
|
||||
spin_lock_irq(&child->sighand->siglock);
|
||||
list_for_each_entry(q, &pending->list, list) {
|
||||
|
|
|
@ -46,12 +46,14 @@ static int set_migratetype_isolate(struct page *page,
|
|||
notifier_ret = notifier_to_errno(notifier_ret);
|
||||
if (notifier_ret)
|
||||
goto out;
|
||||
#if !defined(CONFIG_HPA)
|
||||
/*
|
||||
* FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
|
||||
* We just check MOVABLE pages.
|
||||
*/
|
||||
if (!has_unmovable_pages(zone, page, arg.pages_found,
|
||||
skip_hwpoisoned_pages))
|
||||
#endif
|
||||
ret = 0;
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,24 +1,29 @@
|
|||
#
|
||||
# Makefile for the DSMS
|
||||
#
|
||||
|
||||
obj-y := dsms_kernel_api.o
|
||||
obj-y += dsms_access_control.o
|
||||
obj-y += dsms_rate_limit.o
|
||||
obj-y += dsms_policy.o
|
||||
obj-y += dsms_init.o
|
||||
obj-y += dsms_debug.o
|
||||
|
||||
ccflags-y := -Wformat
|
||||
ccflags-y += -DDSMS_ALLOWLIST_IGNORE_NAME_SUFFIXES_ENABLE
|
||||
ccflags-y += -I$(srctree)/$(src)
|
||||
|
||||
EXTRA_CFLAGS += -DDSMS_WHITELIST_IGNORE_NAME_SUFFIXES_ENABLE
|
||||
obj-$(CONFIG_SECURITY_DSMS) := dsms_access_control.o
|
||||
obj-$(CONFIG_SECURITY_DSMS) += dsms_init.o
|
||||
obj-$(CONFIG_SECURITY_DSMS) += dsms_kernel_api.o
|
||||
obj-$(CONFIG_SECURITY_DSMS) += dsms_policy.o
|
||||
obj-$(CONFIG_SECURITY_DSMS) += dsms_rate_limit.o
|
||||
obj-$(CONFIG_SECURITY_DSMS) += dsms_netlink.o
|
||||
obj-$(CONFIG_SECURITY_DSMS) += dsms_message_list.o
|
||||
|
||||
# Debugging options:
|
||||
|
||||
ifneq (,$(filter userdebug eng, $(TARGET_BUILD_VARIANT)))
|
||||
# CFLAGS_dsms_debug.o := -DDEBUG
|
||||
endif
|
||||
|
||||
ifneq ($(CONFIG_KUNIT), y)
|
||||
EXTRA_CFLAGS += -D__visible_for_testing=static
|
||||
# kunit tests options:
|
||||
GCOV_PROFILE := $(CONFIG_KUNIT)
|
||||
ifeq ($(CONFIG_KUNIT), y)
|
||||
ifneq ($(wildcard $(srctree)/$(src)/test),)
|
||||
ccflags-y += -DDSMS_KUNIT_ENABLED
|
||||
obj-y += test/security_dsms_access_control_test.o
|
||||
obj-y += test/security_dsms_debug_test.o
|
||||
obj-y += test/security_dsms_init_test.o
|
||||
obj-y += test/security_dsms_kernel_api_test.o
|
||||
obj-y += test/security_dsms_policy_test.o
|
||||
obj-y += test/security_dsms_rate_limit_test.o
|
||||
obj-y += test/security_dsms_test_utils.o
|
||||
endif
|
||||
endif
|
||||
|
|
|
@ -10,9 +10,9 @@
|
|||
#include <linux/dsms.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include "dsms_access_control.h"
|
||||
#include "dsms_debug.h"
|
||||
#include "dsms_kernel_api.h"
|
||||
#include "dsms_test.h"
|
||||
|
||||
typedef int (*cmp_fn_t)(const void *key, const void *element);
|
||||
|
||||
|
@ -24,10 +24,10 @@ typedef int (*cmp_fn_t)(const void *key, const void *element);
|
|||
*
|
||||
* Returns lexicographic order of the two compared function names
|
||||
*/
|
||||
__visible_for_testing int compare_policy_entries(const char *function_name,
|
||||
const struct dsms_policy_entry *entry)
|
||||
__visible_for_testing int compare_policy_entries(const void *function_name, const void *entry)
|
||||
{
|
||||
return strncmp(function_name, entry->function_name, KSYM_NAME_LEN);
|
||||
return strncmp((const char *)function_name,
|
||||
((const struct dsms_policy_entry *)entry)->function_name, KSYM_NAME_LEN);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -45,7 +45,7 @@ __visible_for_testing struct dsms_policy_entry *find_policy_entry(const char *fu
|
|||
entry = bsearch(function_name,
|
||||
dsms_policy,
|
||||
dsms_policy_size(),
|
||||
(sizeof *dsms_policy),
|
||||
sizeof(*dsms_policy),
|
||||
(cmp_fn_t) compare_policy_entries);
|
||||
|
||||
return (struct dsms_policy_entry *)entry;
|
||||
|
@ -60,32 +60,32 @@ int dsms_verify_access(const void *address)
|
|||
char function_name[KSYM_NAME_LEN+1];
|
||||
int index;
|
||||
|
||||
dsms_log_write(LOG_DEBUG, "dsms_verify_access: "
|
||||
"Caller function is %pS (%pF)", address, address);
|
||||
DSMS_LOG_DEBUG("%s: Caller function is %pS (%pF)", __func__,
|
||||
address, address);
|
||||
|
||||
if (!address) {
|
||||
dsms_log_write(LOG_ERROR, "DENY: invalid caller address.");
|
||||
DSMS_LOG_ERROR("DENY: invalid caller address.");
|
||||
return DSMS_DENY;
|
||||
}
|
||||
|
||||
symname = kallsyms_lookup((unsigned long)address,
|
||||
&symsize, &offset, &modname, function_name);
|
||||
if (!symname) {
|
||||
dsms_log_write(LOG_ERROR, "DENY: caller address not in kallsyms.");
|
||||
DSMS_LOG_ERROR("DENY: caller address not in kallsyms.");
|
||||
return DSMS_DENY;
|
||||
}
|
||||
|
||||
function_name[KSYM_NAME_LEN] = 0;
|
||||
dsms_log_write(LOG_DEBUG, "kallsyms caller modname = %s, function_name = '%s',"
|
||||
" offset = 0x%lx", modname, function_name, offset);
|
||||
DSMS_LOG_DEBUG("%s: kallsyms caller modname = %s, function_name = '%s', offset = 0x%lx",
|
||||
__func__, modname, function_name, offset);
|
||||
|
||||
if (modname != NULL) {
|
||||
dsms_log_write(LOG_ERROR, "DENY: function '%s' is "
|
||||
"not a kernel symbol", function_name);
|
||||
DSMS_LOG_ERROR("DENY: function '%s' is not a kernel symbol",
|
||||
function_name);
|
||||
return DSMS_DENY; // not a kernel symbol
|
||||
}
|
||||
|
||||
if (should_ignore_whitelist_suffix()) {
|
||||
if (should_ignore_allowlist_suffix()) {
|
||||
for (index = 0; index < KSYM_NAME_LEN; index++)
|
||||
if ((function_name[index] == '.') || (function_name[index] == 0))
|
||||
break;
|
||||
|
@ -93,8 +93,8 @@ int dsms_verify_access(const void *address)
|
|||
}
|
||||
|
||||
if (find_policy_entry(function_name) == NULL) {
|
||||
dsms_log_write(LOG_ERROR, "DENY: function '%s': is "
|
||||
"not allowed by policy", function_name);
|
||||
DSMS_LOG_ERROR("DENY: function '%s': is not allowed by policy",
|
||||
function_name);
|
||||
return DSMS_DENY;
|
||||
}
|
||||
|
||||
|
|
|
@ -9,22 +9,19 @@
|
|||
#ifndef _DSMS_ACCESS_CONTROL_H
|
||||
#define _DSMS_ACCESS_CONTROL_H
|
||||
|
||||
#ifdef CONFIG_KUNIT
|
||||
#include <kunit/mock.h>
|
||||
#endif
|
||||
#include <linux/types.h>
|
||||
|
||||
#define CALLER_FRAME (0)
|
||||
|
||||
#ifdef DSMS_WHITELIST_IGNORE_NAME_SUFFIXES_ENABLE
|
||||
# define WHITELIST_IGNORE_SUFFIX (1)
|
||||
#ifdef DSMS_ALLOWLIST_IGNORE_NAME_SUFFIXES_ENABLE
|
||||
# define ALLOWLIST_IGNORE_SUFFIX (1)
|
||||
#else
|
||||
# define WHITELIST_IGNORE_SUFFIX (0)
|
||||
# define ALLOWLIST_IGNORE_SUFFIX (0)
|
||||
#endif
|
||||
|
||||
static inline char should_ignore_whitelist_suffix(void)
|
||||
static inline char should_ignore_allowlist_suffix(void)
|
||||
{
|
||||
return WHITELIST_IGNORE_SUFFIX;
|
||||
return ALLOWLIST_IGNORE_SUFFIX;
|
||||
}
|
||||
|
||||
struct dsms_policy_entry {
|
||||
|
@ -37,10 +34,4 @@ extern struct dsms_policy_entry dsms_policy[];
|
|||
extern size_t dsms_policy_size(void);
|
||||
extern int dsms_verify_access(const void *address);
|
||||
|
||||
#ifdef CONFIG_KUNIT
|
||||
extern int compare_policy_entries(const char *function_name,
|
||||
const struct dsms_policy_entry *entry);
|
||||
extern struct dsms_policy_entry *find_policy_entry(const char *function_name);
|
||||
#endif
|
||||
|
||||
#endif /* _DSMS_ACCESS_CONTROL_H */
|
||||
|
|
|
@ -6,21 +6,14 @@
|
|||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include <linux/dsms.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kmod.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include "dsms_debug.h"
|
||||
#include "dsms_init.h"
|
||||
#include "dsms_kernel_api.h"
|
||||
#include "dsms_netlink.h"
|
||||
#include "dsms_message_list.h"
|
||||
#include "dsms_rate_limit.h"
|
||||
#include "dsms_test.h"
|
||||
|
||||
static int is_dsms_initialized_flag = false;
|
||||
|
||||
|
@ -29,12 +22,33 @@ int dsms_is_initialized(void)
|
|||
return is_dsms_initialized_flag;
|
||||
}
|
||||
|
||||
static __init int dsms_init(void)
|
||||
kunit_init_module(dsms_init)
|
||||
{
|
||||
dsms_log_write(LOG_INFO, "Started.");
|
||||
dsms_rate_limit_init();
|
||||
is_dsms_initialized_flag = true;
|
||||
return 0;
|
||||
int ret = 0;
|
||||
DSMS_LOG_INFO("Started.");
|
||||
|
||||
if (is_dsms_initialized_flag != true) {
|
||||
ret = prepare_userspace_communication();
|
||||
if (ret != 0) {
|
||||
DSMS_LOG_ERROR("It was not possible to prepare the userspace communication: %d.", ret);
|
||||
return ret;
|
||||
}
|
||||
init_semaphore_list();
|
||||
dsms_rate_limit_init();
|
||||
is_dsms_initialized_flag = true;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit dsms_exit(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
DSMS_LOG_INFO("Exited.");
|
||||
ret = remove_userspace_communication();
|
||||
if (ret != 0)
|
||||
DSMS_LOG_ERROR("It was not possible to remove the userspace communication: %d.", ret);
|
||||
}
|
||||
|
||||
module_init(dsms_init);
|
||||
module_exit(dsms_exit);
|
||||
|
|
|
@ -6,8 +6,6 @@
|
|||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include <linux/dsms.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
|
@ -16,165 +14,83 @@
|
|||
#include <linux/proc_fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include "dsms_access_control.h"
|
||||
#include "dsms_debug.h"
|
||||
#include "dsms_init.h"
|
||||
#include "dsms_rate_limit.h"
|
||||
#include "dsms_kernel_api.h"
|
||||
#include "dsms_message_list.h"
|
||||
#include "dsms_rate_limit.h"
|
||||
#include "dsms_test.h"
|
||||
|
||||
#define MAX_ALLOWED_DETAIL_LENGTH (1024)
|
||||
#define VALUE_STRLEN (22)
|
||||
|
||||
// Command: <<DSMS_BINARY>> <<feature_code>> <<detail>> <<value>>
|
||||
#define DSMS_BINARY "/system/bin/umh/dsms"
|
||||
static const char *dsms_command[] = {
|
||||
DSMS_BINARY,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL
|
||||
};
|
||||
#define FEATURE_INDEX (1)
|
||||
#define EXTRA_INDEX (2)
|
||||
#define VALUE_INDEX (3)
|
||||
|
||||
#define MESSAGE_COUNT_LIMIT (50)
|
||||
|
||||
static const char *dsms_environ[] = {
|
||||
"HOME=/",
|
||||
"PATH=/sbin:/vendor/bin:/system/sbin:/system/bin:/system/xbin",
|
||||
"ANDROID_DATA=/data",
|
||||
NULL
|
||||
};
|
||||
|
||||
static atomic_t message_counter = ATOMIC_INIT(0);
|
||||
|
||||
__visible_for_testing char *dsms_alloc_user_string(const char *string)
|
||||
{
|
||||
size_t size;
|
||||
char *string_cpy;
|
||||
if (string == NULL || *string == 0)
|
||||
return "";
|
||||
|
||||
size = strnlen(string, PAGE_SIZE - 1) + 1;
|
||||
string_cpy = (char *) kmalloc(size * sizeof(string[0]),
|
||||
GFP_USER);
|
||||
if (string_cpy) {
|
||||
memcpy(string_cpy, string, size);
|
||||
string_cpy[size - 1] = '\0';
|
||||
}
|
||||
|
||||
return string_cpy;
|
||||
}
|
||||
|
||||
__visible_for_testing char *dsms_alloc_user_value(int64_t value)
|
||||
{
|
||||
char *string = (char *) kmalloc(VALUE_STRLEN, GFP_USER);
|
||||
if (string) {
|
||||
snprintf(string, VALUE_STRLEN, "%lld", value);
|
||||
string[VALUE_STRLEN-1] = 0;
|
||||
}
|
||||
return string;
|
||||
}
|
||||
|
||||
__visible_for_testing void dsms_free_user_string(const char *string)
|
||||
{
|
||||
if (string == NULL || *string == 0)
|
||||
return;
|
||||
kfree(string);
|
||||
}
|
||||
|
||||
__visible_for_testing void dsms_message_cleanup(struct subprocess_info *info)
|
||||
{
|
||||
if (info && info->argv) {
|
||||
dsms_free_user_string(info->argv[FEATURE_INDEX]);
|
||||
dsms_free_user_string(info->argv[EXTRA_INDEX]);
|
||||
dsms_free_user_string(info->argv[VALUE_INDEX]);
|
||||
kfree(info->argv);
|
||||
}
|
||||
atomic_dec(&message_counter);
|
||||
}
|
||||
|
||||
__visible_for_testing inline int dsms_send_allowed_message(const char *feature_code,
|
||||
__visible_for_testing struct dsms_message *create_message(const char *feature_code,
|
||||
const char *detail,
|
||||
int64_t value)
|
||||
{
|
||||
char **argv;
|
||||
struct subprocess_info *info;
|
||||
int ret = DSMS_DENY;
|
||||
int len_detail = 0;
|
||||
struct dsms_message *message;
|
||||
|
||||
// limit number of message to prevent message's bursts
|
||||
if (atomic_add_unless(&message_counter, 1, MESSAGE_COUNT_LIMIT) == 0) {
|
||||
dsms_log_write(LOG_ERROR, "Message counter has reached its limit.");
|
||||
ret = -EBUSY;
|
||||
goto limit_error;
|
||||
}
|
||||
// allocate argv, envp, necessary data
|
||||
argv = (char**) kmalloc(sizeof(dsms_command), GFP_USER);
|
||||
if (!argv) {
|
||||
dsms_log_write(LOG_ERROR, "Failed memory allocation for argv.");
|
||||
ret = -ENOMEM;
|
||||
goto no_mem_error;
|
||||
message = kmalloc(sizeof(struct dsms_message), GFP_KERNEL);
|
||||
if (!message) {
|
||||
DSMS_LOG_ERROR("It was not possible to allocate memory for message.");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memcpy(argv, dsms_command, sizeof(dsms_command));
|
||||
|
||||
argv[FEATURE_INDEX] = dsms_alloc_user_string(feature_code);
|
||||
argv[EXTRA_INDEX] = dsms_alloc_user_string(detail);
|
||||
argv[VALUE_INDEX] = dsms_alloc_user_value(value);
|
||||
if (!argv[FEATURE_INDEX] || !argv[EXTRA_INDEX] ||
|
||||
!argv[VALUE_INDEX]) {
|
||||
dsms_log_write(LOG_ERROR, "Failed memory allocation for user string.");
|
||||
ret = -ENOMEM;
|
||||
goto no_mem_error;
|
||||
message->feature_code = kmalloc_array(FEATURE_CODE_LENGTH + 1,
|
||||
sizeof(char), GFP_KERNEL);
|
||||
if (!message->feature_code) {
|
||||
DSMS_LOG_ERROR("It was not possible to allocate memory for feature code.");
|
||||
kfree(message);
|
||||
return NULL;
|
||||
}
|
||||
strncpy(message->feature_code, feature_code, sizeof(char) *
|
||||
FEATURE_CODE_LENGTH);
|
||||
message->feature_code[FEATURE_CODE_LENGTH] = '\0';
|
||||
|
||||
// call_usermodehelper with wait_proc and callback function to cleanup data after execution
|
||||
info = call_usermodehelper_setup(DSMS_BINARY, argv,
|
||||
(char**) dsms_environ,
|
||||
GFP_ATOMIC, NULL,
|
||||
&dsms_message_cleanup, NULL);
|
||||
if (!info) {
|
||||
dsms_log_write(LOG_ERROR, "Failed memory allocation for"
|
||||
"call_usermodehelper_setup.");
|
||||
ret = -ENOMEM;
|
||||
goto no_mem_error;
|
||||
len_detail = strnlen(detail, MAX_ALLOWED_DETAIL_LENGTH) + 1;
|
||||
message->detail = kmalloc_array(len_detail, sizeof(char), GFP_KERNEL);
|
||||
if (!message->detail) {
|
||||
DSMS_LOG_ERROR("It was not possible to allocate memory for detail.");
|
||||
kfree(message->feature_code);
|
||||
kfree(message);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return call_usermodehelper_exec(info, UMH_NO_WAIT);
|
||||
|
||||
no_mem_error:
|
||||
if (argv) {
|
||||
dsms_free_user_string(argv[FEATURE_INDEX]);
|
||||
dsms_free_user_string(argv[EXTRA_INDEX]);
|
||||
dsms_free_user_string(argv[VALUE_INDEX]);
|
||||
kfree(argv);
|
||||
}
|
||||
atomic_dec(&message_counter);
|
||||
|
||||
limit_error:
|
||||
return ret;
|
||||
strncpy(message->detail, detail, len_detail);
|
||||
message->detail[len_detail - 1] = '\0';
|
||||
message->value = value;
|
||||
return message;
|
||||
}
|
||||
|
||||
int noinline dsms_send_message(const char *feature_code,
|
||||
noinline int dsms_send_message(const char *feature_code,
|
||||
const char *detail,
|
||||
int64_t value)
|
||||
{
|
||||
void *address;
|
||||
int ret = DSMS_DENY;
|
||||
size_t len;
|
||||
struct dsms_message *message;
|
||||
|
||||
if (!feature_code) {
|
||||
DSMS_LOG_ERROR("Invalid feature code.");
|
||||
ret = -EINVAL;
|
||||
goto exit_send;
|
||||
}
|
||||
|
||||
if (!detail)
|
||||
detail = "";
|
||||
|
||||
len = strnlen(detail, MAX_ALLOWED_DETAIL_LENGTH);
|
||||
dsms_log_write(LOG_DEBUG, "{'%s', '%s' (%zu bytes), %lld}",
|
||||
feature_code, detail, len, value);
|
||||
DSMS_LOG_DEBUG("{'%s', '%s' (%zu bytes), %lld}",
|
||||
feature_code, detail, len, value);
|
||||
|
||||
if (!dsms_is_initialized()) {
|
||||
dsms_log_write(LOG_ERROR, "DSMS not initialized yet.");
|
||||
DSMS_LOG_ERROR("DSMS not initialized yet.");
|
||||
ret = -EACCES;
|
||||
goto exit_send;
|
||||
}
|
||||
|
||||
ret = dsms_check_message_list_limit();
|
||||
if (ret != DSMS_SUCCESS)
|
||||
goto exit_send;
|
||||
|
||||
ret = dsms_check_message_rate_limit();
|
||||
if (ret != DSMS_SUCCESS)
|
||||
goto exit_send;
|
||||
|
@ -184,7 +100,17 @@ int noinline dsms_send_message(const char *feature_code,
|
|||
if (ret != DSMS_SUCCESS)
|
||||
goto exit_send;
|
||||
|
||||
ret = dsms_send_allowed_message(feature_code, detail, value);
|
||||
message = create_message(feature_code, detail, value);
|
||||
if (message == NULL)
|
||||
goto exit_send;
|
||||
|
||||
ret = process_dsms_message(message);
|
||||
|
||||
if (ret != 0) {
|
||||
kfree(message->detail);
|
||||
kfree(message->feature_code);
|
||||
kfree(message);
|
||||
}
|
||||
|
||||
exit_send:
|
||||
return ret;
|
||||
|
|
|
@ -1,15 +1,27 @@
|
|||
#ifdef CONFIG_KUNIT
|
||||
#include <kunit/mock.h>
|
||||
#endif
|
||||
#include <linux/dsms.h>
|
||||
#include <linux/kmod.h>
|
||||
/*
|
||||
* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_KUNIT
|
||||
extern char *dsms_alloc_user_string(const char *string);
|
||||
extern char *dsms_alloc_user_value(int64_t value);
|
||||
extern void dsms_free_user_string(const char *string);
|
||||
extern void dsms_message_cleanup(struct subprocess_info *info);
|
||||
extern inline int dsms_send_allowed_message(const char *feature_code,
|
||||
const char *detail,
|
||||
int64_t value);
|
||||
#endif
|
||||
#ifndef _DSMS_KERNEL_API_H
|
||||
#define _DSMS_KERNEL_API_H
|
||||
|
||||
#define FEATURE_CODE_LENGTH (4)
|
||||
#define MAX_ALLOWED_DETAIL_LENGTH (1024)
|
||||
|
||||
#define DSMS_TAG "[DSMS-KERNEL] "
|
||||
|
||||
#define DSMS_LOG_INFO(format, ...) pr_info(DSMS_TAG format, ##__VA_ARGS__)
|
||||
#define DSMS_LOG_ERROR(format, ...) pr_err(DSMS_TAG format, ##__VA_ARGS__)
|
||||
#define DSMS_LOG_DEBUG(format, ...) pr_debug(DSMS_TAG format, ##__VA_ARGS__)
|
||||
|
||||
struct dsms_message {
|
||||
char *feature_code;
|
||||
char *detail;
|
||||
int64_t value;
|
||||
};
|
||||
|
||||
#endif /* _DSMS_KERNEL_API_H */
|
||||
|
|
91
security/samsung/dsms/dsms_message_list.c
Executable file
91
security/samsung/dsms/dsms_message_list.c
Executable file
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/dsms.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/llist.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/string.h>
|
||||
#include "dsms_kernel_api.h"
|
||||
#include "dsms_message_list.h"
|
||||
#include "dsms_netlink.h"
|
||||
#include "dsms_test.h"
|
||||
|
||||
struct dsms_message_node {
|
||||
struct dsms_message *message;
|
||||
struct llist_node llist;
|
||||
};
|
||||
|
||||
__visible_for_testing atomic_t list_counter = ATOMIC_INIT(0);
|
||||
|
||||
static struct llist_head dsms_linked_messages = LLIST_HEAD_INIT(dsms_linked_messages);
|
||||
static struct semaphore sem_count_message;
|
||||
|
||||
static struct dsms_message_node *create_node(struct dsms_message *message)
|
||||
{
|
||||
struct dsms_message_node *node;
|
||||
|
||||
node = kmalloc(sizeof(struct dsms_message_node), GFP_KERNEL);
|
||||
if (!node) {
|
||||
DSMS_LOG_ERROR("It was not possible to allocate memory for node.");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
node->message = message;
|
||||
return node;
|
||||
}
|
||||
|
||||
struct dsms_message *get_dsms_message(void)
|
||||
{
|
||||
struct dsms_message_node *node;
|
||||
struct dsms_message *message;
|
||||
|
||||
down(&sem_count_message);
|
||||
if (atomic_read(&list_counter) == 0)
|
||||
return NULL;
|
||||
node = llist_entry(llist_del_first(&dsms_linked_messages),
|
||||
struct dsms_message_node, llist);
|
||||
atomic_dec(&list_counter);
|
||||
message = node->message;
|
||||
kfree(node);
|
||||
|
||||
return message;
|
||||
}
|
||||
|
||||
int process_dsms_message(struct dsms_message *message)
|
||||
{
|
||||
struct dsms_message_node *node;
|
||||
|
||||
if (atomic_add_unless(&list_counter, 1, LIST_COUNT_LIMIT) == 0) {
|
||||
DSMS_LOG_ERROR("List counter has reached its limit.");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
node = create_node(message);
|
||||
if (node == NULL) {
|
||||
atomic_dec(&list_counter);
|
||||
return -ENOMEM;
|
||||
}
|
||||
DSMS_LOG_DEBUG("Processing message {'%s', '%s' (%zu bytes), %lld}.",
|
||||
message->feature_code, message->detail,
|
||||
strnlen(message->detail, MAX_ALLOWED_DETAIL_LENGTH), message->value);
|
||||
llist_add(&node->llist, &dsms_linked_messages);
|
||||
up(&sem_count_message);
|
||||
return 0;
|
||||
}
|
||||
|
||||
noinline void init_semaphore_list(void)
|
||||
{
|
||||
sema_init(&sem_count_message, 0);
|
||||
}
|
||||
|
||||
noinline int dsms_check_message_list_limit(void)
|
||||
{
|
||||
return atomic_read(&list_counter) < LIST_COUNT_LIMIT ? DSMS_SUCCESS : DSMS_DENY;
|
||||
}
|
19
security/samsung/dsms/dsms_message_list.h
Executable file
19
security/samsung/dsms/dsms_message_list.h
Executable file
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _DSMS_MESSAGE_LIST_H
|
||||
#define _DSMS_MESSAGE_LIST_H
|
||||
|
||||
#define LIST_COUNT_LIMIT (50)
|
||||
|
||||
extern void init_semaphore_list(void);
|
||||
extern struct dsms_message *get_dsms_message(void);
|
||||
extern int process_dsms_message(struct dsms_message *message);
|
||||
extern int dsms_check_message_list_limit(void);
|
||||
|
||||
#endif /* _DSMS_MESSAGE_LIST_H */
|
184
security/samsung/dsms/dsms_netlink.c
Executable file
184
security/samsung/dsms/dsms_netlink.c
Executable file
|
@ -0,0 +1,184 @@
|
|||
/*
|
||||
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/string.h>
|
||||
#include "dsms_kernel_api.h"
|
||||
#include "dsms_message_list.h"
|
||||
#include "dsms_netlink.h"
|
||||
|
||||
static struct dsms_message *current_message;
|
||||
static struct task_struct *dsms_sender_thread;
|
||||
|
||||
static DEFINE_MUTEX(dsms_wait_daemon_mutex);
|
||||
|
||||
static int dsms_start_sender_thread(struct sk_buff *skb, struct genl_info *info);
|
||||
|
||||
/*
|
||||
* DSMS netlink policy creation for the possible fields for the communication.
|
||||
*/
|
||||
static struct nla_policy dsms_netlink_policy[DSMS_ATTR_COUNT + 1] = {
|
||||
[DSMS_VALUE] = { .type = NLA_U64 },
|
||||
[DSMS_FEATURE_CODE] = { .type = NLA_STRING, .len = FEATURE_CODE_LENGTH + 1},
|
||||
[DSMS_DETAIL] = { .type = NLA_STRING, .len = MAX_ALLOWED_DETAIL_LENGTH + 1},
|
||||
[DSMS_DAEMON_READY] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
/*
|
||||
* Definition of the netlink operations handled by the dsms kernel and
|
||||
* the daemon of dsms, for this case the DSMS_MSG_CMD operation will be handled
|
||||
* dsms_start_sender_thread function.
|
||||
*/
|
||||
static const struct genl_ops dsms_kernel_ops[] = {
|
||||
{
|
||||
.cmd = DSMS_MSG_CMD,
|
||||
.doit = dsms_start_sender_thread,
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)
|
||||
.policy = dsms_netlink_policy,
|
||||
#endif
|
||||
},
|
||||
};
|
||||
|
||||
struct genl_multicast_group dsms_group[] = {
|
||||
{
|
||||
.name = DSMS_GROUP,
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* Descriptor of DSMS Generic Netlink family
|
||||
*/
|
||||
static struct genl_family dsms_family = {
|
||||
.name = DSMS_FAMILY,
|
||||
.version = 1,
|
||||
.maxattr = DSMS_ATTR_MAX,
|
||||
.module = THIS_MODULE,
|
||||
.ops = dsms_kernel_ops,
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)
|
||||
.policy = dsms_netlink_policy,
|
||||
#endif
|
||||
.mcgrps = dsms_group,
|
||||
.n_mcgrps = ARRAY_SIZE(dsms_group),
|
||||
.n_ops = ARRAY_SIZE(dsms_kernel_ops),
|
||||
};
|
||||
|
||||
int prepare_userspace_communication(void)
|
||||
{
|
||||
current_message = NULL;
|
||||
dsms_sender_thread = NULL;
|
||||
return genl_register_family(&dsms_family);
|
||||
}
|
||||
|
||||
int remove_userspace_communication(void)
|
||||
{
|
||||
if (mutex_is_locked(&dsms_wait_daemon_mutex))
|
||||
mutex_unlock(&dsms_wait_daemon_mutex);
|
||||
kthread_stop(dsms_sender_thread);
|
||||
return genl_unregister_family(&dsms_family);
|
||||
}
|
||||
|
||||
noinline int dsms_send_netlink_message(struct dsms_message *message)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
void *msg_head;
|
||||
int ret = 0;
|
||||
int detail_len;
|
||||
|
||||
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
|
||||
if (skb == NULL) {
|
||||
DSMS_LOG_DEBUG("It was not possible to allocate memory for the message.");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
// Creation of the message header
|
||||
msg_head = genlmsg_put(skb, 0, 0,
|
||||
&dsms_family, 0, DSMS_MSG_CMD);
|
||||
if (msg_head == NULL) {
|
||||
DSMS_LOG_DEBUG("It was not possible to create the message head.");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = nla_put(skb, DSMS_VALUE, sizeof(message->value), &message->value);
|
||||
if (ret) {
|
||||
DSMS_LOG_DEBUG("It was not possible to add field DSMS_VALUE to the message.");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nla_put(skb, DSMS_FEATURE_CODE,
|
||||
FEATURE_CODE_LENGTH + 1, message->feature_code);
|
||||
if (ret) {
|
||||
DSMS_LOG_DEBUG("It was not possible to add field DSMS_FEATURE_CODE to the message.");
|
||||
return ret;
|
||||
}
|
||||
|
||||
detail_len = strnlen(message->detail, MAX_ALLOWED_DETAIL_LENGTH);
|
||||
ret = nla_put(skb, DSMS_DETAIL, detail_len + 1, message->detail);
|
||||
if (ret) {
|
||||
DSMS_LOG_DEBUG("It was not possible to add field DSMS_DETAIL to the message.");
|
||||
return ret;
|
||||
}
|
||||
|
||||
genlmsg_end(skb, msg_head);
|
||||
ret = genlmsg_multicast(&dsms_family, skb, 0, 0, GFP_KERNEL);
|
||||
if (ret) {
|
||||
DSMS_LOG_DEBUG("It was not possible to send the message.");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dsms_send_messages_thread(void *unused)
|
||||
{
|
||||
int ret;
|
||||
|
||||
msleep(2000);
|
||||
while (1) {
|
||||
if (!current_message && !kthread_should_stop())
|
||||
current_message = get_dsms_message();
|
||||
|
||||
if (kthread_should_stop())
|
||||
do_exit(0);
|
||||
|
||||
if (!current_message) {
|
||||
DSMS_LOG_DEBUG("There is no message in the list.");
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = dsms_send_netlink_message(current_message);
|
||||
if (ret < 0) {
|
||||
DSMS_LOG_ERROR("Error while send a message? %d.", ret);
|
||||
mutex_lock(&dsms_wait_daemon_mutex);
|
||||
msleep(2000);
|
||||
continue;
|
||||
}
|
||||
|
||||
kfree(current_message->feature_code);
|
||||
kfree(current_message->detail);
|
||||
kfree(current_message);
|
||||
current_message = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dsms_start_sender_thread(struct sk_buff *skb, struct genl_info *info)
|
||||
{
|
||||
if (!dsms_sender_thread) {
|
||||
dsms_sender_thread = kthread_run(dsms_send_messages_thread, NULL, "dsms_kthread");
|
||||
if (!dsms_sender_thread)
|
||||
DSMS_LOG_ERROR("It was not possible to create the dsms thread.");
|
||||
} else {
|
||||
if (mutex_is_locked(&dsms_wait_daemon_mutex))
|
||||
mutex_unlock(&dsms_wait_daemon_mutex);
|
||||
}
|
||||
return 0;
|
||||
}
|
20
security/samsung/dsms/dsms_netlink.h
Executable file
20
security/samsung/dsms/dsms_netlink.h
Executable file
|
@ -0,0 +1,20 @@
|
|||
/*
|
||||
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _DSMS_NETLINK_H
|
||||
#define _DSMS_NETLINK_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/version.h>
|
||||
#include <net/genetlink.h>
|
||||
#include "dsms_netlink_protocol.h"
|
||||
|
||||
extern int prepare_userspace_communication(void);
|
||||
extern int remove_userspace_communication(void);
|
||||
|
||||
#endif /* _DSMS_NETLINK_H */
|
31
security/samsung/dsms/dsms_netlink_protocol.h
Executable file
31
security/samsung/dsms/dsms_netlink_protocol.h
Executable file
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _DSMS_NETLINK_PROTOCOL_H
|
||||
#define _DSMS_NETLINK_PROTOCOL_H
|
||||
|
||||
#define DSMS_FAMILY "DSMS Family"
|
||||
#define DSMS_GROUP "DSMS Group"
|
||||
|
||||
// Creation of dsms operation for the generic netlink communication
|
||||
enum dsms_operations {
|
||||
DSMS_MSG_CMD,
|
||||
};
|
||||
|
||||
// Creation of dsms attributes ids for the dsms netlink policy
|
||||
enum dsms_attribute_ids {
|
||||
/* Numbering must start from 1 */
|
||||
DSMS_VALUE = 1,
|
||||
DSMS_FEATURE_CODE,
|
||||
DSMS_DETAIL,
|
||||
DSMS_DAEMON_READY,
|
||||
DSMS_ATTR_COUNT,
|
||||
#define DSMS_ATTR_MAX (DSMS_ATTR_COUNT - 1)
|
||||
};
|
||||
|
||||
#endif /* _DSMS_NETLINK_PROTOCOL_H */
|
|
@ -7,7 +7,6 @@
|
|||
*/
|
||||
|
||||
#include <linux/dsms.h>
|
||||
|
||||
#include "dsms_access_control.h"
|
||||
|
||||
// Policy entries *MUST BE* ordered by function_name field, as the find
|
||||
|
@ -15,7 +14,7 @@
|
|||
|
||||
// vvvvv DO NOT CHANGE THESE LINES! vvvvv
|
||||
struct dsms_policy_entry dsms_policy[] = {
|
||||
{ "security/samsung/defex_lsm/defex_procs.c", "defex_report_violation" },
|
||||
{ "security/samsung/defex_lsm/core/defex_main.c", "defex_report_violation" },
|
||||
{ "security/samsung/five/five_audit.c", "five_audit_sign_err" },
|
||||
{ "security/samsung/five/five_dsms.c", "five_dsms_msg" },
|
||||
}; // dsms_policy
|
||||
|
@ -23,5 +22,5 @@ struct dsms_policy_entry dsms_policy[] = {
|
|||
|
||||
size_t dsms_policy_size(void)
|
||||
{
|
||||
return (sizeof dsms_policy)/(sizeof *dsms_policy);
|
||||
return sizeof(dsms_policy)/sizeof(*dsms_policy);
|
||||
}
|
||||
|
|
|
@ -8,35 +8,35 @@
|
|||
|
||||
#include <linux/dsms.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/timekeeping.h>
|
||||
#include <linux/math64.h>
|
||||
|
||||
#include <linux/timekeeping.h>
|
||||
#include "dsms_kernel_api.h"
|
||||
#include "dsms_rate_limit.h"
|
||||
#include "dsms_debug.h"
|
||||
#include "dsms_test.h"
|
||||
|
||||
#define ROUND_DURATION_MS ((u64)(1000L))
|
||||
#define MAX_MESSAGES_PER_ROUND (50)
|
||||
|
||||
static int dsms_message_count;
|
||||
static u64 dsms_round_start_ms;
|
||||
__visible_for_testing int dsms_message_count;
|
||||
__visible_for_testing u64 dsms_round_start_ms;
|
||||
|
||||
#if defined(CONFIG_KUNIT) && IS_ENABLED(CONFIG_KUNIT)
|
||||
int dsms_max_messages_per_round(void)
|
||||
__visible_for_testing int dsms_get_max_messages_per_round(void)
|
||||
{
|
||||
return MAX_MESSAGES_PER_ROUND;
|
||||
}
|
||||
#endif
|
||||
|
||||
__visible_for_testing __always_inline u64 round_end_ms(u64 round_start_ms) {
|
||||
__visible_for_testing u64 round_end_ms(u64 round_start_ms)
|
||||
{
|
||||
return round_start_ms + ROUND_DURATION_MS;
|
||||
}
|
||||
|
||||
__visible_for_testing __always_inline int is_new_round(u64 now_ms, u64 last_round_start_ms)
|
||||
__visible_for_testing int is_new_round(u64 now_ms, u64 last_round_start_ms)
|
||||
{
|
||||
return now_ms >= round_end_ms(last_round_start_ms);
|
||||
}
|
||||
|
||||
__visible_for_testing __always_inline u64 dsms_get_time_ms(void) {
|
||||
__visible_for_testing u64 dsms_get_time_ms(void)
|
||||
{
|
||||
return div_u64(ktime_get_ns(), NSEC_PER_MSEC);
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ void dsms_rate_limit_init(void)
|
|||
{
|
||||
dsms_message_count = 0;
|
||||
dsms_round_start_ms = dsms_get_time_ms();
|
||||
dsms_log_write(LOG_DEBUG, "[rate limit] INIT dsms_round_start_ms=%lu dsms_message_count=%d",
|
||||
DSMS_LOG_DEBUG("[rate limit] INIT dsms_round_start_ms=%llu dsms_message_count=%d",
|
||||
dsms_round_start_ms, dsms_message_count);
|
||||
}
|
||||
|
||||
|
@ -55,22 +55,22 @@ int dsms_check_message_rate_limit(void)
|
|||
|
||||
current_time_ms = dsms_get_time_ms();
|
||||
if (current_time_ms < dsms_round_start_ms) {
|
||||
dsms_log_write(LOG_DEBUG, "[rate limit] RESET current_time_ms=%lu dsms_round_start_ms=%lu dsms_message_count=%d",
|
||||
DSMS_LOG_DEBUG("[rate limit] RESET current_time_ms=%llu dsms_round_start_ms=%llu dsms_message_count=%d",
|
||||
current_time_ms, dsms_round_start_ms, dsms_message_count);
|
||||
dsms_round_start_ms = current_time_ms;
|
||||
dsms_message_count = 0;
|
||||
}
|
||||
|
||||
if (is_new_round(current_time_ms, dsms_round_start_ms)) {
|
||||
dropped_messages = dsms_message_count - MAX_MESSAGES_PER_ROUND;
|
||||
dropped_messages = dsms_message_count - dsms_get_max_messages_per_round();
|
||||
if (dropped_messages > 0)
|
||||
dsms_log_write(LOG_ERROR, "[rate limit] %d of %d messages dropped", dropped_messages, dsms_message_count);
|
||||
DSMS_LOG_ERROR("[rate limit] %d of %d messages dropped", dropped_messages, dsms_message_count);
|
||||
dsms_round_start_ms = current_time_ms;
|
||||
dsms_message_count = 0;
|
||||
return DSMS_SUCCESS;
|
||||
}
|
||||
|
||||
if (++dsms_message_count > MAX_MESSAGES_PER_ROUND)
|
||||
if (++dsms_message_count > dsms_get_max_messages_per_round())
|
||||
return DSMS_DENY;
|
||||
return DSMS_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -9,18 +9,7 @@
|
|||
#ifndef _DSMS_RATE_LIMIT_H
|
||||
#define _DSMS_RATE_LIMIT_H
|
||||
|
||||
#ifdef CONFIG_KUNIT
|
||||
#include <kunit/mock.h>
|
||||
#endif
|
||||
|
||||
extern void dsms_rate_limit_init(void);
|
||||
extern int dsms_check_message_rate_limit(void);
|
||||
|
||||
#ifdef CONFIG_KUNIT
|
||||
extern int dsms_max_messages_per_round(void);
|
||||
extern __always_inline u64 round_end_ms(u64 round_start_ms);
|
||||
extern __always_inline int is_new_round(u64 now_ms, u64 last_round_start_ms);
|
||||
extern __always_inline u64 dsms_get_time_ms(void);
|
||||
#endif
|
||||
|
||||
#endif /* _DSMS_RATE_LIMIT_H */
|
||||
|
|
99
security/samsung/dsms/dsms_test.h
Executable file
99
security/samsung/dsms/dsms_test.h
Executable file
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _DSMS_TEST_H
|
||||
#define _DSMS_TEST_H
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* macros to allow testing initialization code */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
#if defined(DSMS_KUNIT_ENABLED)
|
||||
|
||||
#define declare_kunit_init_module(init_function_name) \
|
||||
extern int init_function_name##_kunit_helper(void)
|
||||
|
||||
#define kunit_init_module(init_function_name) \
|
||||
declare_kunit_init_module(init_function_name); \
|
||||
static __init int init_function_name(void) \
|
||||
{ \
|
||||
return init_function_name##_kunit_helper(); \
|
||||
} \
|
||||
int init_function_name##_kunit_helper(void)
|
||||
|
||||
#else
|
||||
|
||||
#define declare_kunit_init_module(init_function_name)
|
||||
|
||||
#define kunit_init_module(init_function_name) \
|
||||
static __init int init_function_name(void)
|
||||
|
||||
#endif /* defined(DSMS_KUNIT_ENABLED) */
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Functions exported for each module (using __visible_for_testing) */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
#if !defined(DSMS_KUNIT_ENABLED)
|
||||
|
||||
#define __visible_for_testing static
|
||||
|
||||
#else
|
||||
|
||||
#include <kunit/mock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/umh.h>
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* kmalloc mock */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
extern void *security_dsms_test_kmalloc_mock(size_t size, gfp_t flags);
|
||||
#define kmalloc security_dsms_test_kmalloc_mock
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* dsms_access_control */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
struct dsms_policy_entry;
|
||||
|
||||
extern int compare_policy_entries(const void *function_name, const void *entry);
|
||||
extern struct dsms_policy_entry *find_policy_entry(const char *function_name);
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* dsms_init */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
declare_kunit_init_module(dsms_init);
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* dsms_kernel_api */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
extern atomic_t list_counter;
|
||||
extern struct dsms_message *create_message(const char *feature_code,
|
||||
const char *detail,
|
||||
int64_t value);
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* dsms_rate_limit */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
extern int dsms_message_count;
|
||||
extern u64 dsms_round_start_ms;
|
||||
|
||||
extern int dsms_get_max_messages_per_round(void);
|
||||
extern u64 round_end_ms(u64 round_start_ms);
|
||||
extern int is_new_round(u64 now_ms, u64 last_round_start_ms);
|
||||
extern u64 dsms_get_time_ms(void);
|
||||
|
||||
#endif /* !defined(DSMS_KUNIT_ENABLED) */
|
||||
#endif /* _DSMS_TEST_H */
|
86
security/samsung/dsms/test/security_dsms_access_control_test.c
Executable file
86
security/samsung/dsms/test/security_dsms_access_control_test.c
Executable file
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <kunit/mock.h>
|
||||
#include <kunit/test.h>
|
||||
#include <linux/dsms.h>
|
||||
#include "dsms_access_control.h"
|
||||
#include "dsms_test.h"
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Module test functions */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
static void security_find_policy_entry_test(struct test *test)
|
||||
{
|
||||
EXPECT_EQ(test, NULL, find_policy_entry("test"));
|
||||
}
|
||||
|
||||
static void security_compare_policy_entries_test(struct test *test)
|
||||
{
|
||||
struct dsms_policy_entry entry;
|
||||
|
||||
entry.file_path = "/path/test";
|
||||
entry.function_name = "myfunction";
|
||||
EXPECT_GT(test, compare_policy_entries("myfunction1", &entry), 0);
|
||||
EXPECT_EQ(test, compare_policy_entries("myfunction", &entry), 0);
|
||||
EXPECT_LT(test, compare_policy_entries("myfunct", &entry), 0);
|
||||
entry.function_name = "myfunction1";
|
||||
EXPECT_EQ(test, compare_policy_entries("myfunction1", &entry), 0);
|
||||
EXPECT_LT(test, compare_policy_entries("Myfunction", &entry), 0);
|
||||
}
|
||||
|
||||
static void security_should_ignore_allowlist_suffix_test(struct test *test)
|
||||
{
|
||||
EXPECT_EQ(test, 1, should_ignore_allowlist_suffix());
|
||||
}
|
||||
|
||||
static void security_dsms_policy_size_test(struct test *test)
|
||||
{
|
||||
EXPECT_EQ(test, 3, dsms_policy_size());
|
||||
EXPECT_LT(test, 0, dsms_policy_size());
|
||||
EXPECT_GT(test, 4, dsms_policy_size());
|
||||
}
|
||||
|
||||
static void security_dsms_verify_access_test(struct test *test)
|
||||
{
|
||||
EXPECT_EQ(test, DSMS_DENY, dsms_verify_access(NULL));
|
||||
}
|
||||
|
||||
/*
|
||||
* dsms_verify_access_address_not_in_kallsyms_test - caller address not in
|
||||
* kallsyms test case
|
||||
* @test - struct test pointer to the running test instance context.
|
||||
*
|
||||
* Test the case where the address passed to dsms_verify_access is not null and
|
||||
* is not in the kallsyms. It is expected to return a DSMS_DENY.
|
||||
*/
|
||||
static void security_dsms_verify_access_address_not_in_kallsyms_test(struct test *test)
|
||||
{
|
||||
EXPECT_EQ(test, DSMS_DENY, dsms_verify_access((const void *)0x1));
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Module definition */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
static struct test_case security_dsms_access_control_test_cases[] = {
|
||||
TEST_CASE(security_compare_policy_entries_test),
|
||||
TEST_CASE(security_should_ignore_allowlist_suffix_test),
|
||||
TEST_CASE(security_dsms_policy_size_test),
|
||||
TEST_CASE(security_dsms_verify_access_test),
|
||||
TEST_CASE(security_find_policy_entry_test),
|
||||
TEST_CASE(security_dsms_verify_access_address_not_in_kallsyms_test),
|
||||
{},
|
||||
};
|
||||
|
||||
static struct test_module security_dsms_access_control_test_module = {
|
||||
.name = "security-dsms-access-control-test",
|
||||
.test_cases = security_dsms_access_control_test_cases,
|
||||
};
|
||||
module_test(security_dsms_access_control_test_module);
|
36
security/samsung/dsms/test/security_dsms_debug_test.c
Executable file
36
security/samsung/dsms/test/security_dsms_debug_test.c
Executable file
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <kunit/test.h>
|
||||
#include <linux/dsms.h>
|
||||
#include "dsms_kernel_api.h"
|
||||
#include "dsms_test.h"
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Module test functions */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
static void security_dsms_debug_test(struct test *test)
|
||||
{
|
||||
DSMS_LOG_INFO("DSMS Debug unit test %x\n", 0xdeadbeef);
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Module definition */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
static struct test_case security_dsms_debug_test_cases[] = {
|
||||
TEST_CASE(security_dsms_debug_test),
|
||||
{},
|
||||
};
|
||||
|
||||
static struct test_module security_dsms_debug_test_module = {
|
||||
.name = "security-dsms-debug-test",
|
||||
.test_cases = security_dsms_debug_test_cases,
|
||||
};
|
||||
module_test(security_dsms_debug_test_module);
|
42
security/samsung/dsms/test/security_dsms_init_test.c
Executable file
42
security/samsung/dsms/test/security_dsms_init_test.c
Executable file
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <kunit/mock.h>
|
||||
#include <kunit/test.h>
|
||||
#include "dsms_init.h"
|
||||
#include "dsms_test.h"
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Module test functions */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
static void security_dsms_is_initialized_test(struct test *test)
|
||||
{
|
||||
EXPECT_TRUE(test, dsms_is_initialized());
|
||||
}
|
||||
|
||||
static void security_dsms_init_test(struct test *test)
|
||||
{
|
||||
EXPECT_EQ(test, dsms_init_kunit_helper(), 0);
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Module definition */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
static struct test_case security_dsms_init_test_cases[] = {
|
||||
TEST_CASE(security_dsms_is_initialized_test),
|
||||
TEST_CASE(security_dsms_init_test),
|
||||
{},
|
||||
};
|
||||
|
||||
static struct test_module security_dsms_init_test_module = {
|
||||
.name = "security-dsms-init-test",
|
||||
.test_cases = security_dsms_init_test_cases,
|
||||
};
|
||||
module_test(security_dsms_init_test_module);
|
100
security/samsung/dsms/test/security_dsms_kernel_api_test.c
Executable file
100
security/samsung/dsms/test/security_dsms_kernel_api_test.c
Executable file
|
@ -0,0 +1,100 @@
|
|||
/*
|
||||
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <kunit/mock.h>
|
||||
#include <kunit/test.h>
|
||||
#include <linux/dsms.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/umh.h>
|
||||
#include "dsms_kernel_api.h"
|
||||
#include "dsms_message_list.h"
|
||||
#include "dsms_test.h"
|
||||
#include "security_dsms_test_utils.h"
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Module test functions */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
static void security_dsms_send_message_test(struct test *test)
|
||||
{
|
||||
// should fail, not yet in policy. TODO success test
|
||||
EXPECT_EQ(test, -1, dsms_send_message("KUNIT", "kunit test", 0));
|
||||
}
|
||||
|
||||
/*
|
||||
* dsms_process_dsms_message_test - deploy two tests on the
|
||||
* process_dsms_message function
|
||||
* @test - struct test pointer to the running test instance context.
|
||||
*
|
||||
* Test 1 - The error-free call to the function
|
||||
* Test 2 - Trigger the limit error case
|
||||
* Test 3 - Trigger the memory error case
|
||||
*/
|
||||
static void security_dsms_process_dsms_message_test(struct test *test)
|
||||
{
|
||||
struct dsms_message *message;
|
||||
int test1;
|
||||
int test2;
|
||||
int test3;
|
||||
|
||||
message = create_message("KNIT", "kunit test", 0);
|
||||
EXPECT_NE(test, NULL, message);
|
||||
|
||||
// Test 1 - The error-free call to the function
|
||||
test1 = process_dsms_message(message);
|
||||
EXPECT_NE(test, -EBUSY, test1);
|
||||
|
||||
// If the message was processed successfully we need to create another message.
|
||||
if (test1 == 0) {
|
||||
message = create_message("KNIT", "kunit test", 0);
|
||||
EXPECT_NE(test, NULL, message);
|
||||
}
|
||||
|
||||
// Test 2 - Trigger the limit error case
|
||||
atomic_set(&list_counter, LIST_COUNT_LIMIT);
|
||||
test2 = process_dsms_message(message);
|
||||
EXPECT_EQ(test, -EBUSY, test2);
|
||||
atomic_set(&list_counter, 0);
|
||||
|
||||
// If the message was processed successfully we need to create another message.
|
||||
if (test2 == 0) {
|
||||
message = create_message("KNIT", "kunit test", 0);
|
||||
EXPECT_NE(test, NULL, message);
|
||||
}
|
||||
|
||||
// Test 3 - Trigger the memory error case
|
||||
security_dsms_test_request_kmalloc_fail_at(1);
|
||||
test3 = process_dsms_message(message);
|
||||
EXPECT_EQ(test, -ENOMEM, test3);
|
||||
security_dsms_test_cancel_kmalloc_fail_requests();
|
||||
|
||||
// If the message was not processed successfully we need to free the memory
|
||||
// in the end of the test.
|
||||
if (test3 != 0) {
|
||||
kfree(message->feature_code);
|
||||
kfree(message->detail);
|
||||
kfree(message);
|
||||
}
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Module definition */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
static struct test_case security_dsms_kernel_api_test_cases[] = {
|
||||
TEST_CASE(security_dsms_send_message_test),
|
||||
TEST_CASE(security_dsms_process_dsms_message_test),
|
||||
{},
|
||||
};
|
||||
|
||||
static struct test_module security_dsms_kernel_api_module = {
|
||||
.name = "security-dsms-kernel-api",
|
||||
.test_cases = security_dsms_kernel_api_test_cases,
|
||||
};
|
||||
module_test(security_dsms_kernel_api_module);
|
45
security/samsung/dsms/test/security_dsms_policy_test.c
Executable file
45
security/samsung/dsms/test/security_dsms_policy_test.c
Executable file
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <kunit/test.h>
|
||||
#include <linux/dsms.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/string.h>
|
||||
#include "dsms_access_control.h"
|
||||
#include "dsms_test.h"
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Module test functions */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
static void security_dsms_policy_test(struct test *test)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
// Check whether policy entries are sorted by function_name
|
||||
for (i = dsms_policy_size(); i > 1; --i)
|
||||
EXPECT_TRUE(test,
|
||||
strncmp(dsms_policy[i - 2].function_name,
|
||||
dsms_policy[i - 1].function_name,
|
||||
KSYM_NAME_LEN) <= 0);
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Module definition */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
static struct test_case security_dsms_policy_test_cases[] = {
|
||||
TEST_CASE(security_dsms_policy_test),
|
||||
{},
|
||||
};
|
||||
|
||||
static struct test_module security_dsms_policy_test_module = {
|
||||
.name = "security-dsms-policy-test",
|
||||
.test_cases = security_dsms_policy_test_cases,
|
||||
};
|
||||
module_test(security_dsms_policy_test_module);
|
110
security/samsung/dsms/test/security_dsms_rate_limit_test.c
Executable file
110
security/samsung/dsms/test/security_dsms_rate_limit_test.c
Executable file
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <kunit/mock.h>
|
||||
#include <kunit/test.h>
|
||||
#include <linux/dsms.h>
|
||||
#include <linux/errno.h>
|
||||
#include "dsms_rate_limit.h"
|
||||
#include "dsms_test.h"
|
||||
|
||||
static u64 start_ms;
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Module test functions */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
static void security_round_end_ms_test(struct test *test)
|
||||
{
|
||||
EXPECT_EQ(test, start_ms + ((u64)(1000L)), round_end_ms(start_ms));
|
||||
EXPECT_NE(test, start_ms + ((u64)(1001L)), round_end_ms(start_ms));
|
||||
}
|
||||
|
||||
static void security_is_new_round_test(struct test *test)
|
||||
{
|
||||
u64 now_ms = dsms_get_time_ms();
|
||||
|
||||
EXPECT_EQ(test, 0, is_new_round(now_ms, start_ms));
|
||||
}
|
||||
|
||||
static void security_dsms_check_message_rate_limit_deny_test(struct test *test)
|
||||
{
|
||||
int failed = 0, i;
|
||||
|
||||
for (i = dsms_get_max_messages_per_round(); i >= 0; --i)
|
||||
if (dsms_check_message_rate_limit() == DSMS_DENY)
|
||||
failed = 1;
|
||||
EXPECT_TRUE(test, failed);
|
||||
}
|
||||
|
||||
static void security_dsms_check_message_rate_limit_success_test(struct test *test)
|
||||
{
|
||||
EXPECT_EQ(test, DSMS_SUCCESS, dsms_check_message_rate_limit());
|
||||
}
|
||||
|
||||
/* Test boundary cases (simulate clock wrapped, too many messages) */
|
||||
static void security_dsms_check_message_rate_limit_boundary_test(struct test *test)
|
||||
{
|
||||
int old_count;
|
||||
|
||||
dsms_round_start_ms -= 10;
|
||||
EXPECT_EQ(test, DSMS_SUCCESS, dsms_check_message_rate_limit());
|
||||
old_count = dsms_message_count;
|
||||
dsms_round_start_ms = 0;
|
||||
dsms_message_count = dsms_get_max_messages_per_round() + 1;
|
||||
EXPECT_EQ(test, DSMS_SUCCESS, dsms_check_message_rate_limit());
|
||||
EXPECT_EQ(test, dsms_message_count, 0);
|
||||
dsms_message_count = old_count;
|
||||
}
|
||||
|
||||
/**
|
||||
* dsms_check_message_rate_limit_reset_test
|
||||
*
|
||||
* This test sets the "dsms_round_start_ms" variable to the maximum value
|
||||
* of an unsigned 64 bit type (2^64 - 1). Such modification triggers the
|
||||
* "[rate limit] RESET" case on "dsms_check_message_rate_limit" function.
|
||||
*
|
||||
* @param test - struct test pointer to the running test instance context.
|
||||
*/
|
||||
static void security_dsms_check_message_rate_limit_reset_test(struct test *test)
|
||||
{
|
||||
dsms_round_start_ms = -1;
|
||||
EXPECT_EQ(test, DSMS_SUCCESS, dsms_check_message_rate_limit());
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Module initialization and exit functions */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
static int security_dsms_rate_test_init(struct test *test)
|
||||
{
|
||||
dsms_rate_limit_init();
|
||||
start_ms = dsms_get_time_ms();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Module definition */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
static struct test_case security_dsms_rate_test_cases[] = {
|
||||
TEST_CASE(security_round_end_ms_test),
|
||||
TEST_CASE(security_is_new_round_test),
|
||||
TEST_CASE(security_dsms_check_message_rate_limit_deny_test),
|
||||
TEST_CASE(security_dsms_check_message_rate_limit_success_test),
|
||||
TEST_CASE(security_dsms_check_message_rate_limit_boundary_test),
|
||||
TEST_CASE(security_dsms_check_message_rate_limit_reset_test),
|
||||
{},
|
||||
};
|
||||
|
||||
static struct test_module security_dsms_rate_test_module = {
|
||||
.name = "security-dsms-rate-limit-test",
|
||||
.init = security_dsms_rate_test_init,
|
||||
.test_cases = security_dsms_rate_test_cases,
|
||||
};
|
||||
module_test(security_dsms_rate_test_module);
|
97
security/samsung/dsms/test/security_dsms_test_utils.c
Executable file
97
security/samsung/dsms/test/security_dsms_test_utils.c
Executable file
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <kunit/test.h>
|
||||
#include <kunit/mock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
#include "security_dsms_test_utils.h"
|
||||
|
||||
/* test utils "sees" actual kmalloc */
|
||||
#undef kmalloc
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* General test functions: kmalloc mock function */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
/* each bit indicates if kmalloc mock should return fail (NULL) */
|
||||
static uint64_t dsms_test_kmalloc_fail_requests;
|
||||
|
||||
void *security_dsms_test_kmalloc_mock(size_t size, gfp_t flags)
|
||||
{
|
||||
bool fail;
|
||||
|
||||
fail = dsms_test_kmalloc_fail_requests & 1ul;
|
||||
dsms_test_kmalloc_fail_requests >>= 1;
|
||||
return fail ? NULL : kmalloc(size, flags);
|
||||
}
|
||||
|
||||
/* Requests that kmalloc fails in the attempt given by argument (1 for next) */
|
||||
void security_dsms_test_request_kmalloc_fail_at(int attempt_no)
|
||||
{
|
||||
if (attempt_no > 0)
|
||||
dsms_test_kmalloc_fail_requests |= (1ul << (attempt_no-1));
|
||||
}
|
||||
|
||||
/* Cancels all kmalloc fail requests */
|
||||
void security_dsms_test_cancel_kmalloc_fail_requests(void)
|
||||
{
|
||||
dsms_test_kmalloc_fail_requests = 0;
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Module test functions */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
static void security_dsms_test_kmalloc_mock_test(struct test *test)
|
||||
{
|
||||
void *p;
|
||||
|
||||
security_dsms_test_request_kmalloc_fail_at(1);
|
||||
security_dsms_test_request_kmalloc_fail_at(3);
|
||||
EXPECT_EQ(test, p = security_dsms_test_kmalloc_mock(1, GFP_KERNEL), NULL);
|
||||
kfree(p);
|
||||
EXPECT_NE(test, p = security_dsms_test_kmalloc_mock(1, GFP_KERNEL), NULL);
|
||||
kfree(p);
|
||||
EXPECT_EQ(test, p = security_dsms_test_kmalloc_mock(1, GFP_KERNEL), NULL);
|
||||
kfree(p);
|
||||
EXPECT_NE(test, p = security_dsms_test_kmalloc_mock(1, GFP_KERNEL), NULL);
|
||||
kfree(p);
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Module initialization and exit functions */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
static int security_dsms_test_utils_init(struct test *test)
|
||||
{
|
||||
security_dsms_test_cancel_kmalloc_fail_requests();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void security_dsms_test_utils_exit(struct test *test)
|
||||
{
|
||||
security_dsms_test_cancel_kmalloc_fail_requests();
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* Module definition */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
static struct test_case security_dsms_test_utils_test_cases[] = {
|
||||
TEST_CASE(security_dsms_test_kmalloc_mock_test),
|
||||
{},
|
||||
};
|
||||
|
||||
static struct test_module security_dsms_test_utils_module = {
|
||||
.name = "security-dsms-test-utils-test",
|
||||
.init = security_dsms_test_utils_init,
|
||||
.exit = security_dsms_test_utils_exit,
|
||||
.test_cases = security_dsms_test_utils_test_cases,
|
||||
};
|
||||
module_test(security_dsms_test_utils_module);
|
24
security/samsung/dsms/test/security_dsms_test_utils.h
Executable file
24
security/samsung/dsms/test/security_dsms_test_utils.h
Executable file
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _SECURITY_DSMS_TEST_UTILS_H
|
||||
#define _SECURITY_DSMS_TEST_UTILS_H
|
||||
|
||||
#include "dsms_test.h"
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
/* General test functions: kmalloc mock function */
|
||||
/* -------------------------------------------------------------------------- */
|
||||
|
||||
/* Requests that kmalloc fails in the attempt given by argument (1 for next) */
|
||||
void security_dsms_test_request_kmalloc_fail_at(int attempt_no);
|
||||
|
||||
/* Cancels all kmalloc fail requests */
|
||||
void security_dsms_test_cancel_kmalloc_fail_requests(void);
|
||||
|
||||
#endif /* _SECURITY_DSMS_TEST_UTILS_H */
|
Loading…
Add table
Add a link
Reference in a new issue