http://ijcsit.com/docs/Volume%205/vol5issue04/ijcsit20140504225.pdf
Concernant la protection mémoire confirmée dans, vérifiez Attack Surface Reduction avec le code source réel. (Principalement dans ARM)
KCONFIG
Ce sont les mots-clés ...
Ceux-ci sont définis dans src / arch / Kconfig. Ouais, c'est dans un endroit que je ne touche pas d'habitude.
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/arch/Kconfig
arch/Kconfig
config ARCH_OPTIONAL_KERNEL_RWX
def_bool n
config ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
def_bool n
config ARCH_HAS_STRICT_KERNEL_RWX
def_bool n
config STRICT_KERNEL_RWX
bool "Make kernel text and rodata read-only" if ARCH_OPTIONAL_KERNEL_RWX
depends on ARCH_HAS_STRICT_KERNEL_RWX
default !ARCH_OPTIONAL_KERNEL_RWX || ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
help
If this is set, kernel text and rodata memory will be made read-only,
and non-text memory will be made non-executable. This provides
protection against certain security exploits (e.g. executing the heap
or modifying text)
These features are considered standard security practice these days.
You should say Y here in almost all cases.
config ARCH_HAS_STRICT_MODULE_RWX
def_bool n
config STRICT_MODULE_RWX
bool "Set loadable kernel module data as NX and text as RO" if ARCH_OPTIONAL_KERNEL_RWX
depends on ARCH_HAS_STRICT_MODULE_RWX && MODULES
default !ARCH_OPTIONAL_KERNEL_RWX || ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
help
If this is set, module text and rodata memory will be made read-only,
and non-text memory will be made non-executable. This provides
protection against certain security exploits (e.g. writing to text)
Alors c'est tout. ʻARCH_HAS_STRICT_KERNEL_RWX` est dépendant, et il est invalide sans lui.
Tout d'abord, le scatter utilisé lors de la liaison. C'est long, donc seulement une partie.
c:arch/arm/kernel/vmlinux.lds.S
<Omis>
#ifdef CONFIG_STRICT_KERNEL_RWX
. = ALIGN(1<<SECTION_SHIFT);
#endif
#ifdef CONFIG_ARM_MPU
. = ALIGN(PMSAv8_MINALIGN);
#endif
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
ARM_TEXT
}
#ifdef CONFIG_DEBUG_ALIGN_RODATA
. = ALIGN(1<<SECTION_SHIFT);
#endif
_etext = .; /* End of text section */
RO_DATA(PAGE_SIZE)
<Omis>
Cliquez ici pour la définition de SECTION_SHIFT. (2 niveaux si LPAE n'est pas pris en charge, 3 niveaux si LPAE est pris en charge)
arch/arm/include/asm/pgtable-2level.h
/*
* section address mask and size definitions.
*/
#define SECTION_SHIFT 20
#define SECTION_SIZE (1UL << SECTION_SHIFT)
#define SECTION_MASK (~(SECTION_SIZE-1))
1 << 20, donc 1 048 576 = limite de 1 Mo.
Désormais, les zones telles que text / ro / rw / bss devraient commencer par incréments de 1 Mo.
Le code suivant gère chaque attribut.
arch/arm/mm/init.c
static struct section_perm nx_perms[] = {
/* Make pages tables, etc before _stext RW (set NX). */
{
.name = "pre-text NX",
.start = PAGE_OFFSET,
.end = (unsigned long)_stext,
.mask = ~PMD_SECT_XN,
.prot = PMD_SECT_XN,
},
/* Make init RW (set NX). */
{
.name = "init NX",
.start = (unsigned long)__init_begin,
.end = (unsigned long)_sdata,
.mask = ~PMD_SECT_XN,
.prot = PMD_SECT_XN,
},
/* Make rodata NX (set RO in ro_perms below). */
{
.name = "rodata NX",
.start = (unsigned long)__start_rodata_section_aligned,
.end = (unsigned long)__init_begin,
.mask = ~PMD_SECT_XN,
.prot = PMD_SECT_XN,
},
};
static struct section_perm ro_perms[] = {
/* Make kernel code and rodata RX (set RO). */
{
.name = "text/rodata RO",
.start = (unsigned long)_stext,
.end = (unsigned long)__init_begin,
#ifdef CONFIG_ARM_LPAE
.mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
.prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
#else
.mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
.prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
.clear = PMD_SECT_AP_WRITE,
#endif
},
};
arch/arm/include/asm/pgtable-2level-hwdef.h
/*
* - section
*/
#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
#define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_AP_READ (_AT(pmdval_t, 1) << 11)
#define PMD_SECT_TEX(x) (_AT(pmdval_t, (x)) << 12) /* v5 */
#define PMD_SECT_APX (_AT(pmdval_t, 1) << 15) /* v6 */
#define PMD_SECT_S (_AT(pmdval_t, 1) << 16) /* v6 */
#define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
#define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
#define PMD_SECT_AF (_AT(pmdval_t, 0))
Par exemple, dans l'exemple précédent
Start | End | NX/RO/RW | Objectif |
---|---|---|---|
PAGE_OFFSET | _stext | NX | pre-text |
_stext | _etext | RO | text |
_start_rodata_section_aligned | __init_begin | RO + NX | rodata |
__init_begin | _sdata | NX | init |
Cependant, en ce qui concerne rodata, la macro est construite de manière à ce qu'il y ait une limite de 1 Mo.
La séquence d'appel qui atteint la mise à jour de section ressemble à ceci. (Je n'ai pas vu la relation kexec pour le moment).
update_sections_early()
Ici, demandez à modifier les paramètres d'autorisation de section pour tous les threads (à l'exclusion des kthreads) de tous les processeurs et l'actieve_mm et init_mm actuels.
c:arch/arm/mm/init.c::update_sections_early()
/**
* update_sections_early intended to be called only through stop_machine
* framework and executed by only one CPU while all other CPUs will spin and
* wait, so no locking is required in this function.
*/
static void update_sections_early(struct section_perm perms[], int n)
{
struct task_struct *t, *s;
for_each_process(t) {
if (t->flags & PF_KTHREAD)
continue;
for_each_thread(t, s)
if (s->mm)
set_section_perms(perms, n, true, s->mm);
}
set_section_perms(perms, n, true, current->active_mm);
set_section_perms(perms, n, true, &init_mm);
}
set_section_perms()
Ici, le changement d'attribut de section est demandé en unités de 1 Mo. Selon l'indicateur défini, s'il faut utiliser la protection d'autorisation ou effacer les modifications.
c:arch/arm/mm/init.c::set_section_perms()
static void set_section_perms(struct section_perm *perms, int n, bool set,
struct mm_struct *mm)
{
size_t i;
unsigned long addr;
if (!arch_has_strict_perms())
return;
for (i = 0; i < n; i++) {
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
perms[i].name, perms[i].start, perms[i].end,
SECTION_SIZE);
continue;
}
for (addr = perms[i].start;
addr < perms[i].end;
addr += SECTION_SIZE)
section_update(addr, perms[i].mask,
set ? perms[i].prot : perms[i].clear, mm);
}
}
section_update()
À partir de l'adresse mémoire, convertissez-le en pgd, convertissez-le en pud, convertissez-le en pmd, appliquez-lui un masque et un prot (ou effacez) et mettez-le à jour.
c:arch/arm/mm/init.c::section_update()
/*
* Updates section permissions only for the current mm (sections are
* copied into each mm). During startup, this is the init_mm. Is only
* safe to be called with preemption disabled, as under stop_machine().
*/
static inline void section_update(unsigned long addr, pmdval_t mask,
pmdval_t prot, struct mm_struct *mm)
{
pmd_t *pmd;
pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
#ifdef CONFIG_ARM_LPAE
pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
#else
if (addr & SECTION_SIZE)
pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
else
pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
#endif
flush_pmd_entry(pmd);
local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
}
flush_pmd_entry()
Le contenu de pmd a été réécrit, donc si vous videz l'entrée pmd ici.
arch/arm/include/asm/tlbflush.h
/*
* flush_pmd_entry
*
* Flush a PMD entry (word aligned, or double-word aligned) to
* RAM if the TLB for the CPU we are running on requires this.
* This is typically used when we are creating PMD entries.
*
* clean_pmd_entry
*
* Clean (but don't drain the write buffer) if the CPU requires
* these operations. This is typically used when we are removing
* PMD entries.
*/
static inline void flush_pmd_entry(void *pmd)
{
const unsigned int __tlb_flag = __cpu_tlb_flags;
tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd);
tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd);
if (tlb_flag(TLB_WB))
dsb(ishst);
}
#define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg)
#define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg)
#define __tlb_op(f, insnarg, arg) \
do { \
if (always_tlb_flags & (f)) \
asm("mcr " insnarg \
: : "r" (arg) : "cc"); \
else if (possible_tlb_flags & (f)) \
asm("tst %1, %2\n\t" \
"mcrne " insnarg \
: : "r" (arg), "r" (__tlb_flag), "Ir" (f) \
: "cc"); \
} while (0)
Donc, cela fait plus de 4 heures que je l'ai écrit jusqu'à présent, mais je ne peux pas continuer à partir d'ici. J'ai lu la documentation du projet de topper et l'ai comprise, mais ... eh bien. Je vais m'en tenir à ce point.
asm("mcr p15, 0, %0, c7, c10, 1" : : "r"(pmd) : "cc" );
https://www.aps-web.jp/academy/ca/229/
MCR{ cond } coproc,#opcode1,Rd,CRn,CRm {, # opcode2 } opcode1 Spécifie un opcode spécifique au coprocesseur 3 bits. CRn Spécifie le registre du coprocesseur. CRm Spécifie le registre du coprocesseur.
→ CRn = c7, CRm = c10, opcode=0, opcode2 = 1
http://infocenter.arm.com/help/topic/com.arm.doc.ddi0388fj/CIHGJFEH.html
1 DCCVAC WO
Donc, pour les détails de cette instruction, voir le manuel de référence ARM, donc pour le moment, c'est tout.
Peut-être, en reflétant le résultat de l'exploitation du bit AP au niveau SECTION, il sera possible de définir des autorisations dans des unités de 1 Mo.
C'est tout.
Recommended Posts