http://ijcsit.com/docs/Volume%205/vol5issue04/ijcsit20140504225.pdf
Check Attack Surface Reduction for memory protection in the actual source code. (Mainly on ARM)
KCONFIG
These are the keywords ...
These are defined in src / arch / Kconfig. Yeah, it's in a place I don't usually touch.
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/arch/Kconfig
arch/Kconfig
config ARCH_OPTIONAL_KERNEL_RWX
def_bool n
config ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
def_bool n
config ARCH_HAS_STRICT_KERNEL_RWX
def_bool n
config STRICT_KERNEL_RWX
bool "Make kernel text and rodata read-only" if ARCH_OPTIONAL_KERNEL_RWX
depends on ARCH_HAS_STRICT_KERNEL_RWX
default !ARCH_OPTIONAL_KERNEL_RWX || ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
help
If this is set, kernel text and rodata memory will be made read-only,
and non-text memory will be made non-executable. This provides
protection against certain security exploits (e.g. executing the heap
or modifying text)
These features are considered standard security practice these days.
You should say Y here in almost all cases.
config ARCH_HAS_STRICT_MODULE_RWX
def_bool n
config STRICT_MODULE_RWX
bool "Set loadable kernel module data as NX and text as RO" if ARCH_OPTIONAL_KERNEL_RWX
depends on ARCH_HAS_STRICT_MODULE_RWX && MODULES
default !ARCH_OPTIONAL_KERNEL_RWX || ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
help
If this is set, module text and rodata memory will be made read-only,
and non-text memory will be made non-executable. This provides
protection against certain security exploits (e.g. writing to text)
So that's it. ʻARCH_HAS_STRICT_KERNEL_RWX` is dependent, and it is invalid without it.
First of all, scatter used when linking. It's long, so only a part of it.
c:arch/arm/kernel/vmlinux.lds.S
<Omitted>
#ifdef CONFIG_STRICT_KERNEL_RWX
. = ALIGN(1<<SECTION_SHIFT);
#endif
#ifdef CONFIG_ARM_MPU
. = ALIGN(PMSAv8_MINALIGN);
#endif
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
ARM_TEXT
}
#ifdef CONFIG_DEBUG_ALIGN_RODATA
. = ALIGN(1<<SECTION_SHIFT);
#endif
_etext = .; /* End of text section */
RO_DATA(PAGE_SIZE)
<Omitted>
Click here for the definition of SECTION_SHIFT. (2 Level if not compatible with LPAE, 3 Level if compatible with LPAE)
arch/arm/include/asm/pgtable-2level.h
/*
* section address mask and size definitions.
*/
#define SECTION_SHIFT 20
#define SECTION_SIZE (1UL << SECTION_SHIFT)
#define SECTION_MASK (~(SECTION_SIZE-1))
1 << 20, so 1,048,576 = 1MB boundary.
Now, areas such as text / ro / rw / bss should start in 1MB increments.
The following code manages each attribute.
-"nx_perms" is Non-execute permission. --"ro_perms" is Read Only permission. I think that the.
arch/arm/mm/init.c
static struct section_perm nx_perms[] = {
/* Make pages tables, etc before _stext RW (set NX). */
{
.name = "pre-text NX",
.start = PAGE_OFFSET,
.end = (unsigned long)_stext,
.mask = ~PMD_SECT_XN,
.prot = PMD_SECT_XN,
},
/* Make init RW (set NX). */
{
.name = "init NX",
.start = (unsigned long)__init_begin,
.end = (unsigned long)_sdata,
.mask = ~PMD_SECT_XN,
.prot = PMD_SECT_XN,
},
/* Make rodata NX (set RO in ro_perms below). */
{
.name = "rodata NX",
.start = (unsigned long)__start_rodata_section_aligned,
.end = (unsigned long)__init_begin,
.mask = ~PMD_SECT_XN,
.prot = PMD_SECT_XN,
},
};
static struct section_perm ro_perms[] = {
/* Make kernel code and rodata RX (set RO). */
{
.name = "text/rodata RO",
.start = (unsigned long)_stext,
.end = (unsigned long)__init_begin,
#ifdef CONFIG_ARM_LPAE
.mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
.prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
#else
.mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
.prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
.clear = PMD_SECT_AP_WRITE,
#endif
},
};
arch/arm/include/asm/pgtable-2level-hwdef.h
/*
* - section
*/
#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
#define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_AP_READ (_AT(pmdval_t, 1) << 11)
#define PMD_SECT_TEX(x) (_AT(pmdval_t, (x)) << 12) /* v5 */
#define PMD_SECT_APX (_AT(pmdval_t, 1) << 15) /* v6 */
#define PMD_SECT_S (_AT(pmdval_t, 1) << 16) /* v6 */
#define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
#define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
#define PMD_SECT_AF (_AT(pmdval_t, 0))
For example, in the previous example
Start | End | NX/RO/RW | Purpose |
---|---|---|---|
PAGE_OFFSET | _stext | NX | pre-text |
_stext | _etext | RO | text |
_start_rodata_section_aligned | __init_begin | RO + NX | rodata |
__init_begin | _sdata | NX | init |
However, regarding rodata, the macro is built so that there is a boundary of 1MB.
The call sequence that reaches the section update looks like this. (I haven't seen the kexec relationship for the time being).
update_sections_early()
Here, request to change the permission setting of the section for all threads (excluding kthreads) of all processors and the current actieve_mm and init_mm.
c:arch/arm/mm/init.c::update_sections_early()
/**
* update_sections_early intended to be called only through stop_machine
* framework and executed by only one CPU while all other CPUs will spin and
* wait, so no locking is required in this function.
*/
static void update_sections_early(struct section_perm perms[], int n)
{
struct task_struct *t, *s;
for_each_process(t) {
if (t->flags & PF_KTHREAD)
continue;
for_each_thread(t, s)
if (s->mm)
set_section_perms(perms, n, true, s->mm);
}
set_section_perms(perms, n, true, current->active_mm);
set_section_perms(perms, n, true, &init_mm);
}
set_section_perms()
Here, the attribute change of section is requested in 1MB units. Depending on the set flag, whether to use permission prot or clear changes.
c:arch/arm/mm/init.c::set_section_perms()
static void set_section_perms(struct section_perm *perms, int n, bool set,
struct mm_struct *mm)
{
size_t i;
unsigned long addr;
if (!arch_has_strict_perms())
return;
for (i = 0; i < n; i++) {
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
perms[i].name, perms[i].start, perms[i].end,
SECTION_SIZE);
continue;
}
for (addr = perms[i].start;
addr < perms[i].end;
addr += SECTION_SIZE)
section_update(addr, perms[i].mask,
set ? perms[i].prot : perms[i].clear, mm);
}
}
section_update()
From the memory address, convert it to pgd, convert it to pud, convert it to pmd, apply mask and prot (or clear) to it, and update it.
c:arch/arm/mm/init.c::section_update()
/*
* Updates section permissions only for the current mm (sections are
* copied into each mm). During startup, this is the init_mm. Is only
* safe to be called with preemption disabled, as under stop_machine().
*/
static inline void section_update(unsigned long addr, pmdval_t mask,
pmdval_t prot, struct mm_struct *mm)
{
pmd_t *pmd;
pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
#ifdef CONFIG_ARM_LPAE
pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
#else
if (addr & SECTION_SIZE)
pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
else
pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
#endif
flush_pmd_entry(pmd);
local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
}
flush_pmd_entry()
Since the contents of pmd have been rewritten, flush the pmd entry here.
arch/arm/include/asm/tlbflush.h
/*
* flush_pmd_entry
*
* Flush a PMD entry (word aligned, or double-word aligned) to
* RAM if the TLB for the CPU we are running on requires this.
* This is typically used when we are creating PMD entries.
*
* clean_pmd_entry
*
* Clean (but don't drain the write buffer) if the CPU requires
* these operations. This is typically used when we are removing
* PMD entries.
*/
static inline void flush_pmd_entry(void *pmd)
{
const unsigned int __tlb_flag = __cpu_tlb_flags;
tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd);
tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd);
if (tlb_flag(TLB_WB))
dsb(ishst);
}
#define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg)
#define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg)
#define __tlb_op(f, insnarg, arg) \
do { \
if (always_tlb_flags & (f)) \
asm("mcr " insnarg \
: : "r" (arg) : "cc"); \
else if (possible_tlb_flags & (f)) \
asm("tst %1, %2\n\t" \
"mcrne " insnarg \
: : "r" (arg), "r" (__tlb_flag), "Ir" (f) \
: "cc"); \
} while (0)
So, it's been over 4 hours since I wrote it so far, but I can't proceed from here. I read the topper project documentation and understood it, but ... well. I will leave it up to that point.
asm("mcr p15, 0, %0, c7, c10, 1" : : "r"(pmd) : "cc" );
https://www.aps-web.jp/academy/ca/229/
MCR{ cond } coproc,#opcode1,Rd,CRn,CRm {, # opcode2 } opcode1 Specifies an opcode specific to the 3-bit coprocessor. CRn Specifies the coprocessor register. CRm Specifies the coprocessor register.
→ CRn = c7, CRm = c10, opcode=0, opcode2 = 1
http://infocenter.arm.com/help/topic/com.arm.doc.ddi0388fj/CIHGJFEH.html
1 DCCVAC WO
So, for the details of this instruction, see ARM Reference Manual, so for the time being, that's it.
Perhaps, by reflecting the result of operating AP bit at the SECTION level, it will be possible to set the authority in 1MB units.
That's it.
Recommended Posts