88#ifdef LOSCFG_KERNEL_MMU
96#define TRY_MAX_TIMES 10
99 __attribute__((section(
".bss.prebss.translation_table")))
UINT8 \
100 g_firstPageTable[MMU_DESCRIPTOR_L1_SMALL_ENTRY_NUMBERS];
101#ifdef LOSCFG_KERNEL_SMP
102__attribute__((aligned(MMU_DESCRIPTOR_L1_SMALL_ENTRY_NUMBERS))) \
103 __attribute__((section(
".bss.prebss.translation_table")))
UINT8 \
104 g_tempPageTable[MMU_DESCRIPTOR_L1_SMALL_ENTRY_NUMBERS];
105UINT8 *g_mmuJumpPageTable = g_tempPageTable;
107extern CHAR __mmu_ttlb_begin;
108UINT8 *g_mmuJumpPageTable = (
UINT8 *)&__mmu_ttlb_begin;
114#ifdef LOSCFG_PAGE_TABLE_FINE_LOCK
118 if (vmPage == NULL) {
121 lock = &vmPage->
lock;
123 lock = &archMmu->
lock;
132 return OsGetPteLock(archMmu, paddr, intSave);
146#ifdef LOSCFG_PAGE_TABLE_FINE_LOCK
147 spinLock = OsGetPteLock(archMmu, paddr, intSave);
158#ifdef LOSCFG_PAGE_TABLE_FINE_LOCK
171 PADDR_T pa = MMU_DESCRIPTOR_L1_PAGE_TABLE_ADDR(pte1);
172 return OsGetPteLock(archMmu, pa, intSave);
182 PADDR_T pa = MMU_DESCRIPTOR_L1_PAGE_TABLE_ADDR(pte1);
188 return (
VADDR_T *)g_firstPageTable;
196 unmapCount = MIN2((MMU_DESCRIPTOR_L1_SMALL_SIZE - (*vaddr % MMU_DESCRIPTOR_L1_SMALL_SIZE)) >>
197 MMU_DESCRIPTOR_L2_SMALL_SHIFT, *count);
198 *vaddr += unmapCount << MMU_DESCRIPTOR_L2_SMALL_SHIFT;
199 *count -= unmapCount;
206#if !WITH_ARCH_MMU_PICK_SPOT
207 if (flags & VM_MAP_REGION_FLAG_NS) {
214 if (!MMU_DESCRIPTOR_IS_L2_SIZE_ALIGNED(vaddr) || !MMU_DESCRIPTOR_IS_L2_SIZE_ALIGNED(paddr)) {
215 return LOS_ERRNO_VM_INVALID_ARGS;
225 if (l1Entry & MMU_DESCRIPTOR_L1_PAGETABLE_NON_SECURE) {
226 *flags |= VM_MAP_REGION_FLAG_NS;
229 switch (l2Entry & MMU_DESCRIPTOR_L2_TEX_TYPE_MASK) {
230 case MMU_DESCRIPTOR_L2_TYPE_STRONGLY_ORDERED:
231 *flags |= VM_MAP_REGION_FLAG_STRONGLY_ORDERED;
233 case MMU_DESCRIPTOR_L2_TYPE_NORMAL_NOCACHE:
234 *flags |= VM_MAP_REGION_FLAG_UNCACHED;
236 case MMU_DESCRIPTOR_L2_TYPE_DEVICE_SHARED:
237 case MMU_DESCRIPTOR_L2_TYPE_DEVICE_NON_SHARED:
238 *flags |= VM_MAP_REGION_FLAG_UNCACHED_DEVICE;
244 *flags |= VM_MAP_REGION_FLAG_PERM_READ;
246 switch (l2Entry & MMU_DESCRIPTOR_L2_AP_MASK) {
247 case MMU_DESCRIPTOR_L2_AP_P_RO_U_NA:
249 case MMU_DESCRIPTOR_L2_AP_P_RW_U_NA:
250 *flags |= VM_MAP_REGION_FLAG_PERM_WRITE;
252 case MMU_DESCRIPTOR_L2_AP_P_RO_U_RO:
253 *flags |= VM_MAP_REGION_FLAG_PERM_USER;
255 case MMU_DESCRIPTOR_L2_AP_P_RW_U_RW:
256 *flags |= VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_WRITE;
261 if ((l2Entry & MMU_DESCRIPTOR_L2_TYPE_MASK) != MMU_DESCRIPTOR_L2_TYPE_SMALL_PAGE_XN) {
262 *flags |= VM_MAP_REGION_FLAG_PERM_EXECUTE;
271 for (index = 0; index < MMU_DESCRIPTOR_L1_SMALL_L2_TABLES_PER_PAGE; index++) {
272 ttEntry = archMmu->
virtTtb[ROUNDDOWN(l1Index, MMU_DESCRIPTOR_L1_SMALL_L2_TABLES_PER_PAGE) + index];
273 if ((ttEntry & MMU_DESCRIPTOR_L1_TYPE_MASK) == MMU_DESCRIPTOR_L1_TYPE_PAGE_TABLE) {
277#ifdef LOSCFG_KERNEL_VM
280 if (vmPage == NULL) {
281 LOS_Panic(
"bad page table paddr %#x\n", l2Paddr);
300 PTE_T *pte2BasePtr = NULL;
310 if (pte2Lock == NULL) {
314 if (pte2BasePtr == NULL) {
320 if (scanIndex == MMU_DESCRIPTOR_L2_NUMBERS_PER_L1) {
323 if (pte2BasePtr[scanIndex++]) {
347 OsPutL2Table(archMmu, l1Index, MMU_DESCRIPTOR_L1_PAGE_TABLE_ADDR(pte1Val));
357 switch (flags & VM_MAP_REGION_FLAG_CACHE_MASK) {
358 case VM_MAP_REGION_FLAG_CACHED:
359 mmuFlags |= MMU_DESCRIPTOR_L1_TYPE_NORMAL_WRITE_BACK_ALLOCATE;
360#ifdef LOSCFG_KERNEL_SMP
361 mmuFlags |= MMU_DESCRIPTOR_L1_SECTION_SHAREABLE;
364 case VM_MAP_REGION_FLAG_STRONGLY_ORDERED:
365 mmuFlags |= MMU_DESCRIPTOR_L1_TYPE_STRONGLY_ORDERED;
367 case VM_MAP_REGION_FLAG_UNCACHED:
368 mmuFlags |= MMU_DESCRIPTOR_L1_TYPE_NORMAL_NOCACHE;
370 case VM_MAP_REGION_FLAG_UNCACHED_DEVICE:
371 mmuFlags |= MMU_DESCRIPTOR_L1_TYPE_DEVICE_SHARED;
374 return LOS_ERRNO_VM_INVALID_ARGS;
383 switch (flags & (VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE)) {
385 mmuFlags |= MMU_DESCRIPTOR_L1_AP_P_NA_U_NA;
387 case VM_MAP_REGION_FLAG_PERM_READ:
388 case VM_MAP_REGION_FLAG_PERM_USER:
389 mmuFlags |= MMU_DESCRIPTOR_L1_AP_P_RO_U_NA;
391 case VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_READ:
392 mmuFlags |= MMU_DESCRIPTOR_L1_AP_P_RO_U_RO;
394 case VM_MAP_REGION_FLAG_PERM_WRITE:
395 case VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE:
396 mmuFlags |= MMU_DESCRIPTOR_L1_AP_P_RW_U_NA;
398 case VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_WRITE:
399 case VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE:
400 mmuFlags |= MMU_DESCRIPTOR_L1_AP_P_RW_U_RW;
414 if (mmuFlags == LOS_ERRNO_VM_INVALID_ARGS) {
418 mmuFlags |= MMU_DESCRIPTOR_L1_SMALL_DOMAIN_CLIENT;
422 if (!(flags & VM_MAP_REGION_FLAG_PERM_EXECUTE)) {
423 mmuFlags |= MMU_DESCRIPTOR_L1_SECTION_XN;
426 if (flags & VM_MAP_REGION_FLAG_NS) {
427 mmuFlags |= MMU_DESCRIPTOR_L1_SECTION_NON_SECURE;
430 if (flags & VM_MAP_REGION_FLAG_PERM_USER) {
431 mmuFlags |= MMU_DESCRIPTOR_L1_SECTION_NON_GLOBAL;
440 if (l1Entry & MMU_DESCRIPTOR_L1_SECTION_NON_SECURE) {
441 *flags |= VM_MAP_REGION_FLAG_NS;
444 switch (l1Entry & MMU_DESCRIPTOR_L1_TEX_TYPE_MASK) {
445 case MMU_DESCRIPTOR_L1_TYPE_STRONGLY_ORDERED:
446 *flags |= VM_MAP_REGION_FLAG_STRONGLY_ORDERED;
448 case MMU_DESCRIPTOR_L1_TYPE_NORMAL_NOCACHE:
449 *flags |= VM_MAP_REGION_FLAG_UNCACHED;
451 case MMU_DESCRIPTOR_L1_TYPE_DEVICE_SHARED:
452 case MMU_DESCRIPTOR_L1_TYPE_DEVICE_NON_SHARED:
453 *flags |= VM_MAP_REGION_FLAG_UNCACHED_DEVICE;
459 *flags |= VM_MAP_REGION_FLAG_PERM_READ;
461 switch (l1Entry & MMU_DESCRIPTOR_L1_AP_MASK) {
462 case MMU_DESCRIPTOR_L1_AP_P_RO_U_NA:
464 case MMU_DESCRIPTOR_L1_AP_P_RW_U_NA:
465 *flags |= VM_MAP_REGION_FLAG_PERM_WRITE;
467 case MMU_DESCRIPTOR_L1_AP_P_RO_U_RO:
468 *flags |= VM_MAP_REGION_FLAG_PERM_USER;
470 case MMU_DESCRIPTOR_L1_AP_P_RW_U_RW:
471 *flags |= VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_WRITE;
477 if (!(l1Entry & MMU_DESCRIPTOR_L1_SECTION_XN)) {
478 *flags |= VM_MAP_REGION_FLAG_PERM_EXECUTE;
487 PTE_T *pte2BasePtr = NULL;
491 unmapCount = MIN2(MMU_DESCRIPTOR_L2_NUMBERS_PER_L1 - pte2Index, *count);
499 if (pte2BasePtr == NULL) {
511 *count -= unmapCount;
531 *vaddr += MMU_DESCRIPTOR_L1_SMALL_SIZE;
532 *count -= MMU_DESCRIPTOR_L2_NUMBERS_PER_L1;
534 return MMU_DESCRIPTOR_L2_NUMBERS_PER_L1;
539#ifdef LOSCFG_KERNEL_VM
541 VM_ERR(
"alloc arch mmu asid failed");
546#ifndef LOSCFG_PAGE_TABLE_FINE_LOCK
573 PTE_T* l2Base = NULL;
576 return LOS_ERRNO_VM_NOT_FOUND;
579 *paddr = MMU_DESCRIPTOR_L1_SECTION_ADDR(l1Entry) + (vaddr & (MMU_DESCRIPTOR_L1_SMALL_SIZE - 1));
587 if (l2Base == NULL) {
588 return LOS_ERRNO_VM_NOT_FOUND;
593 *paddr = MMU_DESCRIPTOR_L2_SMALL_PAGE_ADDR(l2Entry) + (vaddr & (MMU_DESCRIPTOR_L2_SMALL_SIZE - 1));
600 LOS_Panic(
"%s %d, large page unimplemented\n", __FUNCTION__, __LINE__);
602 return LOS_ERRNO_VM_NOT_FOUND;
621 PTE_T *l1Entry = NULL;
624 INT32 tryTime = TRY_MAX_TIMES;
631 if (MMU_DESCRIPTOR_IS_L1_SIZE_ALIGNED(vaddr) && count >= MMU_DESCRIPTOR_L2_NUMBERS_PER_L1) {
634 LOS_Panic(
"%s %d, unimplemented\n", __FUNCTION__, __LINE__);
637 unmapCount =
OsUnmapL2PTE(archMmu, l1Entry, vaddr, &count);
639 MMU_DESCRIPTOR_L2_NUMBERS_PER_L1);
640 vaddr += unmapCount << MMU_DESCRIPTOR_L2_SMALL_SHIFT;
642 LOS_Panic(
"%s %d, unimplemented\n", __FUNCTION__, __LINE__);
644 tryTime = (unmapCount == 0) ? (tryTime - 1) : tryTime;
646 return LOS_ERRNO_VM_FAULT;
648 unmapped += unmapCount;
677 OsTruncPte1(*mmuMapInfo->
paddr) | mmuFlags | MMU_DESCRIPTOR_L1_TYPE_SECTION);
679 *count -= MMU_DESCRIPTOR_L2_NUMBERS_PER_L1;
680 *mmuMapInfo->
vaddr += MMU_DESCRIPTOR_L1_SMALL_SIZE;
681 *mmuMapInfo->
paddr += MMU_DESCRIPTOR_L1_SMALL_SIZE;
683 return MMU_DESCRIPTOR_L2_NUMBERS_PER_L1;
691 UINT32 l2Offset = (MMU_DESCRIPTOR_L2_SMALL_SIZE / MMU_DESCRIPTOR_L1_SMALL_L2_TABLES_PER_PAGE) *
692 (l1Index & (MMU_DESCRIPTOR_L1_SMALL_L2_TABLES_PER_PAGE - 1));
694 for (index = 0; index < MMU_DESCRIPTOR_L1_SMALL_L2_TABLES_PER_PAGE; index++) {
695 ttEntry = archMmu->
virtTtb[ROUNDDOWN(l1Index, MMU_DESCRIPTOR_L1_SMALL_L2_TABLES_PER_PAGE) + index];
696 if ((ttEntry & MMU_DESCRIPTOR_L1_TYPE_MASK) == MMU_DESCRIPTOR_L1_TYPE_PAGE_TABLE) {
697 *ppa = (
PADDR_T)ROUNDDOWN(MMU_DESCRIPTOR_L1_PAGE_TABLE_ADDR(ttEntry), MMU_DESCRIPTOR_L2_SMALL_SIZE) +
703#ifdef LOSCFG_KERNEL_VM
706 if (vmPage == NULL) {
707 VM_ERR(
"have no memory to save l2 page");
708 return LOS_ERRNO_VM_NO_MEMORY;
713 kvaddr =
LOS_MemAlloc(OS_SYS_MEM_ADDR, MMU_DESCRIPTOR_L2_SMALL_SIZE);
714 if (kvaddr == NULL) {
715 VM_ERR(
"have no memory to save l2 page");
716 return LOS_ERRNO_VM_NO_MEMORY;
719 (VOID)memset_s(kvaddr, MMU_DESCRIPTOR_L2_SMALL_SIZE, 0, MMU_DESCRIPTOR_L2_SMALL_SIZE);
730 switch (flags & VM_MAP_REGION_FLAG_CACHE_MASK) {
731 case VM_MAP_REGION_FLAG_CACHED:
732#ifdef LOSCFG_KERNEL_SMP
733 mmuFlags |= MMU_DESCRIPTOR_L2_SHAREABLE;
735 mmuFlags |= MMU_DESCRIPTOR_L2_TYPE_NORMAL_WRITE_BACK_ALLOCATE;
737 case VM_MAP_REGION_FLAG_STRONGLY_ORDERED:
738 mmuFlags |= MMU_DESCRIPTOR_L2_TYPE_STRONGLY_ORDERED;
740 case VM_MAP_REGION_FLAG_UNCACHED:
741 mmuFlags |= MMU_DESCRIPTOR_L2_TYPE_NORMAL_NOCACHE;
743 case VM_MAP_REGION_FLAG_UNCACHED_DEVICE:
744 mmuFlags |= MMU_DESCRIPTOR_L2_TYPE_DEVICE_SHARED;
747 return LOS_ERRNO_VM_INVALID_ARGS;
756 switch (flags & (VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE)) {
758 mmuFlags |= MMU_DESCRIPTOR_L1_AP_P_NA_U_NA;
760 case VM_MAP_REGION_FLAG_PERM_READ:
761 case VM_MAP_REGION_FLAG_PERM_USER:
762 mmuFlags |= MMU_DESCRIPTOR_L2_AP_P_RO_U_NA;
764 case VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_READ:
765 mmuFlags |= MMU_DESCRIPTOR_L2_AP_P_RO_U_RO;
767 case VM_MAP_REGION_FLAG_PERM_WRITE:
768 case VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE:
769 mmuFlags |= MMU_DESCRIPTOR_L2_AP_P_RW_U_NA;
771 case VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_WRITE:
772 case VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE:
773 mmuFlags |= MMU_DESCRIPTOR_L2_AP_P_RW_U_RW;
787 if (mmuFlags == LOS_ERRNO_VM_INVALID_ARGS) {
793 if (!(flags & VM_MAP_REGION_FLAG_PERM_EXECUTE)) {
794 mmuFlags |= MMU_DESCRIPTOR_L2_TYPE_SMALL_PAGE_XN;
796 mmuFlags |= MMU_DESCRIPTOR_L2_TYPE_SMALL_PAGE;
799 if (flags & VM_MAP_REGION_FLAG_PERM_USER) {
800 mmuFlags |= MMU_DESCRIPTOR_L2_NON_GLOBAL;
812 PTE_T *pte2BasePtr = NULL;
813 UINT32 saveCounts, archFlags, pte1IntSave, pte2IntSave;
822 LOS_Panic(
"%s %d, failed to allocate pagetable\n", __FUNCTION__, __LINE__);
825 *l1Entry = pte2Base | MMU_DESCRIPTOR_L1_TYPE_PAGE_TABLE;
826 if (*mmuMapInfo->
flags & VM_MAP_REGION_FLAG_NS) {
827 *l1Entry |= MMU_DESCRIPTOR_L1_PAGETABLE_NON_SECURE;
829 *l1Entry &= MMU_DESCRIPTOR_L1_SMALL_DOMAIN_MASK;
830 *l1Entry |= MMU_DESCRIPTOR_L1_SMALL_DOMAIN_CLIENT;
835 if (pte2Lock == NULL) {
845 *mmuMapInfo->
paddr += (saveCounts << MMU_DESCRIPTOR_L2_SMALL_SHIFT);
846 *mmuMapInfo->
vaddr += (saveCounts << MMU_DESCRIPTOR_L2_SMALL_SHIFT);
847 *count -= saveCounts;
853 PTE_T *pte2BasePtr = NULL;
864 if (pte2BasePtr == NULL) {
874 *mmuMapInfo->
paddr += (saveCounts << MMU_DESCRIPTOR_L2_SMALL_SHIFT);
875 *mmuMapInfo->
vaddr += (saveCounts << MMU_DESCRIPTOR_L2_SMALL_SHIFT);
876 *count -= saveCounts;
893 PTE_T *l1Entry = NULL;
896 INT32 tryTime = TRY_MAX_TIMES;
912 if (MMU_DESCRIPTOR_IS_L1_SIZE_ALIGNED(*mmuMapInfo.
vaddr) &&
913 MMU_DESCRIPTOR_IS_L1_SIZE_ALIGNED(*mmuMapInfo.
paddr) &&
914 count >= MMU_DESCRIPTOR_L2_NUMBERS_PER_L1) {
921 saveCounts =
OsMapL1PTE(&mmuMapInfo, l1Entry, &count);
925 LOS_Panic(
"%s %d, unimplemented tt_entry %x\n", __FUNCTION__, __LINE__, l1Entry);
928 mapped += saveCounts;
929 tryTime = (saveCounts == 0) ? (tryTime - 1) : tryTime;
931 return LOS_ERRNO_VM_TIMED_OUT;
954 if ((archMmu == NULL) || (vaddr == 0) || (count == 0)) {
955 VM_ERR(
"invalid args: archMmu %p, vaddr %p, count %d", archMmu, vaddr, count);
962 if (status != LOS_OK) {
963 vaddr += MMU_DESCRIPTOR_L2_SMALL_SIZE;
969 VM_ERR(
"invalid args:aspace %p, vaddr %p, count %d", archMmu, vaddr, count);
975 VM_ERR(
"invalid args:aspace %p, vaddr %p, count %d",
976 archMmu, vaddr, count);
979 vaddr += MMU_DESCRIPTOR_L2_SMALL_SIZE;
1001 if ((archMmu == NULL) || (oldVaddr == 0) || (newVaddr == 0) || (count == 0)) {
1002 VM_ERR(
"invalid args: archMmu %p, oldVaddr %p, newVaddr %p, count %d",
1003 archMmu, oldVaddr, newVaddr, count);
1010 if (status != LOS_OK) {
1011 oldVaddr += MMU_DESCRIPTOR_L2_SMALL_SIZE;
1012 newVaddr += MMU_DESCRIPTOR_L2_SMALL_SIZE;
1018 VM_ERR(
"invalid args: archMmu %p, vaddr %p, count %d",
1019 archMmu, oldVaddr, count);
1025 VM_ERR(
"invalid args:archMmu %p, old_vaddr %p, new_addr %p, count %d",
1026 archMmu, oldVaddr, newVaddr, count);
1029 oldVaddr += MMU_DESCRIPTOR_L2_SMALL_SIZE;
1030 newVaddr += MMU_DESCRIPTOR_L2_SMALL_SIZE;
1049 ttbr = MMU_TTBRx_FLAGS | (archMmu->
physTtb);
1051 ttbcr &= ~MMU_DESCRIPTOR_TTBCR_PD0;
1055 ttbcr |= MMU_DESCRIPTOR_TTBCR_PD0;
1058#ifdef LOSCFG_KERNEL_VM
1067#ifdef LOSCFG_KERNEL_VM
1085#ifdef LOSCFG_KERNEL_VM
1088 while ((page = LOS_ListRemoveHeadType(&archMmu->
ptList,
LosVmPage, node)) != NULL) {
1100 PTE_T *tmpTtbase = NULL;
1106 MMU_DESCRIPTOR_L1_SMALL_ENTRY_NUMBERS);
1107 if (tmpTtbase == NULL) {
1108 VM_ERR(
"memory alloc failed");
1113 err = memcpy_s(kSpace->
archMmu.
virtTtb, MMU_DESCRIPTOR_L1_SMALL_ENTRY_NUMBERS,
1114 g_firstPageTable, MMU_DESCRIPTOR_L1_SMALL_ENTRY_NUMBERS);
1118 VM_ERR(
"memcpy failed, errno: %d", err);
1129 UINT32 offset = virtAddr - KERNEL_VMM_BASE;
1137 UINT32 bssEndBoundary = ROUNDUP(bssEnd, MB);
1140 .
phys = SYS_MEM_BASE + textStart - virtAddr,
1142 .size = ROUNDUP(textEnd - textStart, MMU_DESCRIPTOR_L2_SMALL_SIZE),
1143 .flags = VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_EXECUTE,
1144 .name =
"kernel_text"
1147 .phys = SYS_MEM_BASE + rodataStart - virtAddr,
1148 .virt = rodataStart,
1149 .size = ROUNDUP(rodataEnd - rodataStart, MMU_DESCRIPTOR_L2_SMALL_SIZE),
1150 .flags = VM_MAP_REGION_FLAG_PERM_READ,
1151 .name =
"kernel_rodata"
1154 .phys = SYS_MEM_BASE + ramDataStart - virtAddr,
1155 .virt = ramDataStart,
1156 .size = ROUNDUP(bssEndBoundary - ramDataStart, MMU_DESCRIPTOR_L2_SMALL_SIZE),
1157 .flags = VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE,
1158 .name =
"kernel_data_bss"
1173 (bssEndBoundary - virtAddr) >> MMU_DESCRIPTOR_L2_SMALL_SHIFT);
1174 if (status != ((bssEndBoundary - virtAddr) >> MMU_DESCRIPTOR_L2_SMALL_SHIFT)) {
1175 VM_ERR(
"unmap failed, status: %d", status);
1179 flags = VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE | VM_MAP_REGION_FLAG_PERM_EXECUTE;
1181 flags |= VM_MAP_REGION_FLAG_UNCACHED;
1184 (textStart - virtAddr) >> MMU_DESCRIPTOR_L2_SMALL_SHIFT,
1186 if (status != ((textStart - virtAddr) >> MMU_DESCRIPTOR_L2_SMALL_SHIFT)) {
1187 VM_ERR(
"mmap failed, status: %d", status);
1192 for (i = 0; i < length; i++) {
1193 kernelMap = &mmuKernelMappings[i];
1195 kernelMap->
flags |= VM_MAP_REGION_FLAG_UNCACHED;
1198 kernelMap->
size >> MMU_DESCRIPTOR_L2_SMALL_SHIFT, kernelMap->
flags);
1199 if (status != (kernelMap->
size >> MMU_DESCRIPTOR_L2_SMALL_SHIFT)) {
1200 VM_ERR(
"mmap failed, status: %d", status);
1206 kmallocLength = virtAddr + SYS_MEM_SIZE_DEFAULT - bssEndBoundary;
1207 flags = VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE;
1209 flags |= VM_MAP_REGION_FLAG_UNCACHED;
1212 SYS_MEM_BASE + bssEndBoundary - virtAddr,
1213 kmallocLength >> MMU_DESCRIPTOR_L2_SMALL_SHIFT,
1215 if (status != (kmallocLength >> MMU_DESCRIPTOR_L2_SMALL_SHIFT)) {
1216 VM_ERR(
"mmap failed, status: %d", status);
1232 oldTtPhyBase = oldTtPhyBase & MMU_DESCRIPTOR_L2_SMALL_FRAME;
1245 UINT32 n = __builtin_clz(KERNEL_ASPACE_BASE) + 1;
1246 UINT32 ttbcr = MMU_DESCRIPTOR_TTBCR_PD0 | n;
STATIC INLINE UINT32 OsArmReadTtbr0(VOID)
STATIC INLINE VOID OsArmWriteTtbcr(UINT32 val)
STATIC INLINE VOID OsArmWriteTlbiasidis(UINT32 val)
记录由协处理器记录当前是哪个进程在跑
STATIC INLINE VOID OsArmWriteContextidr(UINT32 val)
STATIC INLINE UINT32 OsArmReadTtbcr(VOID)
STATIC INLINE VOID OsArmWriteTtbr1(UINT32 val)
STATIC INLINE VOID OsArmWriteTtbr0(UINT32 val)
NORETURN VOID LOS_Panic(const CHAR *fmt,...)
Kernel panic function.
LITE_OS_SEC_ALW_INLINE STATIC INLINE VOID LOS_ListInit(LOS_DL_LIST *list)
LITE_OS_SEC_ALW_INLINE STATIC INLINE VOID LOS_ListDelete(LOS_DL_LIST *node)
LITE_OS_SEC_ALW_INLINE STATIC INLINE VOID LOS_ListAdd(LOS_DL_LIST *list, LOS_DL_LIST *node)
Insert a new node to a doubly linked list.
VOID * LOS_MemAlloc(VOID *pool, UINT32 size)
从指定内存池中申请size长度的内存,注意这可不是从内核堆空间中申请内存
VOID * LOS_MemAllocAlign(VOID *pool, UINT32 size, UINT32 boundary)
从指定内存池中申请size长度的内存且地址按boundary字节对齐的内存
UINT32 LOS_MemFree(VOID *pool, VOID *ptr)
释放从指定动态内存中申请的内存
UINT8 * m_aucSysMem0
异常交互动态内存池地址的起始地址,当不支持异常交互特性时,m_aucSysMem0等于m_aucSysMem1。
STATIC INT32 OsMapParamCheck(UINT32 flags, VADDR_T vaddr, PADDR_T paddr)
STATIC INLINE VOID OsUnlockPte1Tmp(SPIN_LOCK_S *lock, UINT32 intSave)
STATIC VOID OsCvtSecAttsToFlags(PTE_T l1Entry, UINT32 *flags)
VOID LOS_ArchMmuContextSwitch(LosArchMmu *archMmu)
LOS_ArchMmuContextSwitch 切换MMU上下文
STATIC UINT32 OsCvtPte2CacheFlagsToMMUFlags(UINT32 flags)
STATIC UINT32 OsMapL2PageContinous(MmuMapInfo *mmuMapInfo, PTE_T *pte1, UINT32 *count)
status_t LOS_ArchMmuMap(LosArchMmu *archMmu, VADDR_T vaddr, PADDR_T paddr, size_t count, UINT32 flags)
LOS_ArchMmuMap 映射进程空间虚拟地址区间与物理地址区间 所谓的map就是生成L1,L2页表项的过程
STATUS_T LOS_ArchMmuDestroy(LosArchMmu *archMmu)
LOS_ArchMmuDestroy 销毁MMU 和 initMmu 相呼应,释放页表页
STATIC VOID OsTryUnmapL1PTE(LosArchMmu *archMmu, PTE_T *l1Entry, vaddr_t vaddr, UINT32 scanIndex, UINT32 scanCount)
__attribute__((aligned(MMU_DESCRIPTOR_L1_SMALL_ENTRY_NUMBERS)))
STATIC UINT32 OsCvtSecFlagsToAttrs(UINT32 flags)
STATIC STATUS_T OsGetL2Table(LosArchMmu *archMmu, UINT32 l1Index, paddr_t *ppa)
获取L2页表,分配L2表(需物理内存)
VADDR_T * OsGFirstTableGet(VOID)
STATUS_T LOS_ArchMmuChangeProt(LosArchMmu *archMmu, VADDR_T vaddr, size_t count, UINT32 flags)
LOS_ArchMmuChangeProt 修改进程空间虚拟地址区间的映射属性 改变内存段的访问权限,例如: 读/写/可执行/不可用 ==
STATIC VOID OsSetKSectionAttr(UINTPTR virtAddr, BOOL uncached)
设置内核空间段属性,可看出内核空间是固定映射到物理地址
STATUS_T LOS_ArchMmuMove(LosArchMmu *archMmu, VADDR_T oldVaddr, VADDR_T newVaddr, size_t count, UINT32 flags)
LOS_ArchMmuMove 将进程空间一个虚拟地址区间的映射关系转移至另一块未使用的虚拟地址区间重新做映射。
STATIC VOID OsPutL2Table(const LosArchMmu *archMmu, UINT32 l1Index, paddr_t l2Paddr)
STATIC INLINE VOID OsUnlockPte2(SPIN_LOCK_S *lock, UINT32 intSave)
BOOL OsArchMmuInit(LosArchMmu *archMmu, VADDR_T *virtTtb)
STATIC UINT32 OsMapSection(MmuMapInfo *mmuMapInfo, UINT32 *count)
OsMapSection section页表格式项映射
STATIC UINT32 OsMapL1PTE(MmuMapInfo *mmuMapInfo, PTE_T *l1Entry, UINT32 *count)
STATIC UINT32 OsUnmapSection(LosArchMmu *archMmu, PTE_T *l1Entry, vaddr_t *vaddr, UINT32 *count)
STATUS_T LOS_ArchMmuUnmap(LosArchMmu *archMmu, VADDR_T vaddr, size_t count)
LOS_ArchMmuUnmap 解除进程空间虚拟地址区间与物理地址区间的映射关系
VOID OsArchMmuInitPerCPU(VOID)
STATIC UINT32 OsCvtPte2FlagsToAttrs(UINT32 flags)
STATIC INLINE VOID OsUnlockPte1(SPIN_LOCK_S *lock, UINT32 intSave)
STATIC UINT32 OsCvtSecAccessFlagsToMMUFlags(UINT32 flags)
STATIC SPIN_LOCK_S * OsGetPte1Lock(LosArchMmu *archMmu, PADDR_T paddr, UINT32 *intSave)
VOID OsInitMappingStartUp(VOID)
OsInitMappingStartUp 开始初始化mmu
STATIC VOID OsKSectionNewAttrEnable(VOID)
STATIC UINT32 OsCvtSecCacheFlagsToMMUFlags(UINT32 flags)
STATIC UINT32 OsCvtPte2AccessFlagsToMMUFlags(UINT32 flags)
STATIC INLINE PTE_T * OsGetPte2BasePtr(PTE_T pte1)
获取页表基地址
STATIC INLINE SPIN_LOCK_S * OsGetPte2Lock(LosArchMmu *archMmu, PTE_T pte1, UINT32 *intSave)
STATIC INLINE UINT32 OsUnmapL1Invalid(vaddr_t *vaddr, UINT32 *count)
解除L1表的映射关系
STATIC SPIN_LOCK_S * OsGetPte1LockTmp(LosArchMmu *archMmu, PADDR_T paddr, UINT32 *intSave)
STATIC UINT32 OsUnmapL2PTE(LosArchMmu *archMmu, PTE_T *pte1, vaddr_t vaddr, UINT32 *count)
STATIC VOID OsSwitchTmpTTB(VOID)
切换临时页表
STATUS_T LOS_ArchMmuQuery(const LosArchMmu *archMmu, VADDR_T vaddr, PADDR_T *paddr, UINT32 *flags)
LOS_ArchMmuQuery 获取进程空间虚拟地址对应的物理地址以及映射属性。 本函数是内核高频函数,通过MMU查询虚拟地址是否映射过,带走映射的物理地址和权限
STATIC VOID OsCvtPte2AttsToFlags(PTE_T l1Entry, PTE_T l2Entry, UINT32 *flags)
https://blog.csdn.net/qq_38410730/article/details/81036768
STATUS_T OsAllocAsid(UINT32 *asid)
VOID OsFreeAsid(UINT32 asid)
释放 asid
CHAR __ram_data_start
RAM开始地址 可读可写
CHAR __bss_end
bss结束地址 attribute((section(".__bss_end")));
CHAR __rodata_start
ROM开始地址 只读
STATIC INLINE PTE_T OsGetPte1(PTE_T *pte1BasePtr, vaddr_t va)
STATIC INLINE VOID OsSavePte1(PTE_T *pte1Ptr, PTE_T pte1)
PTE(Page Table Entry),页表条目,保存L1页表项至L1页表
STATIC INLINE BOOL OsIsPte1PageTable(PTE_T pte1)
STATIC INLINE PTE_T OsGetPte2(PTE_T *pte2BasePtr, vaddr_t va)
STATIC INLINE BOOL OsIsPte2SmallPageXN(PTE_T pte2)
STATIC INLINE BOOL OsIsPte2LargePage(PTE_T pte2)
STATIC INLINE UINT32 OsSavePte2Continuous(PTE_T *pte2BasePtr, UINT32 index, PTE_T pte2, UINT32 count)
STATIC INLINE PTE_T * OsGetPte1Ptr(PTE_T *pte1BasePtr, vaddr_t va)
pte1BasePtr L1 转换页表基地址
STATIC INLINE BOOL OsIsPte2SmallPage(PTE_T pte2)
STATIC INLINE BOOL OsIsPte1Section(PTE_T pte1)
STATIC INLINE UINT32 OsGetPte2Index(vaddr_t va)
STATIC INLINE PADDR_T OsGetPte1Paddr(PADDR_T PhysTtb, vaddr_t va)
STATIC INLINE BOOL OsIsPte1Invalid(PTE_T pte1)
STATIC INLINE VOID OsClearPte1(PTE_T *pte1Ptr)
STATIC INLINE UINT32 OsGetPte1Index(vaddr_t va)
获取L1 页表项索引
STATIC INLINE VOID OsClearPte2Continuous(PTE_T *pte2Ptr, UINT32 count)
STATIC INLINE ADDR_T OsTruncPte1(ADDR_T addr)
生成 L1 section格式项地址
VOID LOS_SpinUnlockRestore(SPIN_LOCK_S *lock, UINT32 intSave)
VOID LOS_SpinLockSave(SPIN_LOCK_S *lock, UINT32 *intSave)
VOID LOS_SpinInit(SPIN_LOCK_S *lock)
STATIC INLINE VOID OsArmInvalidateTlbBarrier(VOID)
STATIC INLINE VOID OsCleanTLB(VOID)
STATIC INLINE VOID OsArmInvalidateTlbMvaNoBarrier(VADDR_T va)
STATIC INLINE VOID OsArmInvalidateTlbMvaRangeNoBarrier(VADDR_T start, UINT32 count)
PADDR_T LOS_PaddrQuery(VOID *vaddr)
通过虚拟地址查询映射的物理地址
LosVmSpace * LOS_GetKVmSpace(VOID)
内核空间只有g_kVmSpace一个,所有的内核进程都共用一个内核空间
STATUS_T LOS_VmSpaceReserve(LosVmSpace *space, size_t size, VADDR_T vaddr)
LosVmPage * LOS_VmPageGet(PADDR_T paddr)
通过物理地址获取页框
LosVmPage * OsVmPaddrToPage(paddr_t paddr)
VOID LOS_PhysPageFree(LosVmPage *page)
释放一个物理页框
PADDR_T OsKVaddrToPaddr(VADDR_T kvaddr)
LosVmPage * LOS_PhysPageAlloc(VOID)
申请一个物理页
VOID * OsVmPageToVaddr(LosVmPage *page)
通过page获取内核空间的虚拟地址 参考OsArchMmuInit #define SYS_MEM_BASE DDR_MEM_ADDR /* physical memory base 物理地址的起始...
VADDR_T * LOS_PaddrToKVaddr(PADDR_T paddr)
通过物理地址获取内核虚拟地址
内存管理单元(英语:memory management unit,缩写为MMU),有时称作分页内存管理单元(英语:paged memory management unit,缩写为PMMU)。
unsigned int flags
标识 读/写/.. VM_MAP_REGION_FLAG_PERM_*
物理页框描述符 虚拟内存体现的是程序对内存资源的需求,而物理内存是对该请求的供应。 伙伴算法的思想是:把内存中连续的空闲页框空间看成是空闲页框块,并按照它们的大小(连续页框的数目)分组
虚拟空间,每个进程都有一个属于自己的虚拟内存地址空间