更新日期: 2022/06/01 来源: https://gitee.com/weharmony/kernel_liteos_a_note
armv7_pmu.c
浏览该文件的文档.
1/*
2 * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
3 * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without modification,
6 * are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this list of
9 * conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
12 * of conditions and the following disclaimer in the documentation and/or other materials
13 * provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors may be used
16 * to endorse or promote products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include "armv7_pmu_pri.h"
33#include "perf_pmu_pri.h"
34#include "los_hw_cpu.h"
35#include "asm/platform.h"
36
37OS_PMU_INTS(LOSCFG_KERNEL_CORE_NUM, g_pmuIrqNr);
39
40STATIC INLINE UINT32 Armv7PmncRead(VOID)
41{
42 UINT32 value = 0;
43 __asm__ volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(value));
44 return value;
45}
46
47STATIC INLINE VOID Armv7PmncWrite(UINT32 value)
48{
49 value &= ARMV7_PMNC_MASK;
50 __asm__ volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(value));
51 ISB;
52}
53
54STATIC INLINE UINT32 Armv7PmuOverflowed(UINT32 pmnc)
55{
56 return pmnc & ARMV7_OVERFLOWED_MASK;
57}
58
59STATIC INLINE UINT32 Armv7PmuCntOverflowed(UINT32 pmnc, UINT32 index)
60{
61 return pmnc & ARMV7_CNT2BIT(ARMV7_IDX2CNT(index));
62}
63
64STATIC INLINE UINT32 Armv7CntValid(UINT32 index)
65{
66 return index <= ARMV7_IDX_COUNTER_LAST;
67}
68
69STATIC INLINE VOID Armv7PmuSelCnt(UINT32 index)
70{
71 UINT32 counter = ARMV7_IDX2CNT(index);
72 __asm__ volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
73 ISB;
74}
75
76STATIC INLINE VOID Armv7PmuSetCntPeriod(UINT32 index, UINT32 period)
77{
78 if (!Armv7CntValid(index)) {
79 PRINT_ERR("CPU writing wrong counter %u\n", index);
80 } else if (index == ARMV7_IDX_CYCLE_COUNTER) {
81 __asm__ volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (period));
82 } else {
83 Armv7PmuSelCnt(index);
84 __asm__ volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (period));
85 }
86}
87
88STATIC INLINE VOID Armv7BindEvt2Cnt(UINT32 index, UINT32 value)
89{
90 PRINT_DEBUG("bind event: %u to counter: %u\n", value, index);
91 Armv7PmuSelCnt(index);
92 value &= ARMV7_EVTYPE_MASK;
93 __asm__ volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (value));
94}
95
96STATIC INLINE VOID Armv7EnableCnt(UINT32 index)
97{
98 UINT32 counter = ARMV7_IDX2CNT(index);
99 PRINT_DEBUG("index : %u, counter: %u\n", index, counter);
100 __asm__ volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (ARMV7_CNT2BIT(counter)));
101}
102
103STATIC INLINE VOID Armv7DisableCnt(UINT32 index)
104{
105 UINT32 counter = ARMV7_IDX2CNT(index);
106 PRINT_DEBUG("index : %u, counter: %u\n", index, counter);
107 __asm__ volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (ARMV7_CNT2BIT(counter)));
108}
109
110STATIC INLINE VOID Armv7EnableCntInterrupt(UINT32 index)
111{
112 UINT32 counter = ARMV7_IDX2CNT(index);
113 __asm__ volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (ARMV7_CNT2BIT(counter)));
114 ISB;
115}
116
117STATIC INLINE VOID Armv7DisableCntInterrupt(UINT32 index)
118{
119 UINT32 counter = ARMV7_IDX2CNT(index);
120 __asm__ volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (ARMV7_CNT2BIT(counter)));
121 /* Clear the overflow flag in case an interrupt is pending. */
122 __asm__ volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (ARMV7_CNT2BIT(counter)));
123 ISB;
124}
125
127{
128 UINT32 value;
129
130 __asm__ volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (value));
131 value &= ARMV7_FLAG_MASK;
132 __asm__ volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (value));
133
134 return value;
135}
136
137STATIC VOID Armv7EnableEvent(Event *event)
138{
139 UINT32 cnt = event->counter;
140
141 if (!Armv7CntValid(cnt)) {
142 PRINT_ERR("CPU enabling wrong PMNC counter IRQ enable %u\n", cnt);
143 return;
144 }
145
146 if (event->period == 0) {
147 PRINT_INFO("event period value not valid, counter: %u\n", cnt);
148 return;
149 }
150 /*
151 * Enable counter and interrupt, and set the counter to count
152 * the event that we're interested in.
153 */
154 UINT32 intSave = LOS_IntLock();
155
156 Armv7DisableCnt(cnt);
157
158 /*
159 * Set event (if destined for PMNx counters)
160 * We only need to set the event for the cycle counter if we
161 * have the ability to perform event filtering.
162 */
163 if (cnt != ARMV7_IDX_CYCLE_COUNTER) {
164 Armv7BindEvt2Cnt(cnt, event->eventId);
165 }
166
167 /* Enable interrupt for this counter */
169 Armv7EnableCnt(cnt);
170 LOS_IntRestore(intSave);
171
172 PRINT_DEBUG("enabled event: %u cnt: %u\n", event->eventId, cnt);
173}
174
175STATIC VOID Armv7DisableEvent(Event *event)
176{
177 UINT32 cnt = event->counter;
178
179 if (!Armv7CntValid(cnt)) {
180 PRINT_ERR("CPU enabling wrong PMNC counter IRQ enable %u\n", cnt);
181 return;
182 }
183
184 UINT32 intSave = LOS_IntLock();
185 Armv7DisableCnt(cnt);
187 LOS_IntRestore(intSave);
188}
189
190
191STATIC VOID Armv7StartAllCnt(VOID)
192{
193 PRINT_DEBUG("starting pmu...\n");
194
195 /* Enable all counters */
196 UINT32 reg = Armv7PmncRead() | ARMV7_PMNC_E;
198 reg |= ARMV7_PMNC_D;
199 } else {
200 reg &= ~ARMV7_PMNC_D;
201 }
202
203 Armv7PmncWrite(reg);
204 HalIrqUnmask(g_pmuIrqNr[ArchCurrCpuid()]);
205}
206
207STATIC VOID Armv7StopAllCnt(VOID)
208{
209 PRINT_DEBUG("stopping pmu...\n");
210 /* Disable all counters */
211 Armv7PmncWrite(Armv7PmncRead() & ~ARMV7_PMNC_E);
212
213 HalIrqMask(g_pmuIrqNr[ArchCurrCpuid()]);
214}
215
216STATIC VOID Armv7ResetAllCnt(VOID)
217{
218 UINT32 index;
219
220 /* The counter and interrupt enable registers are unknown at reset. */
221 for (index = ARMV7_IDX_CYCLE_COUNTER; index < ARMV7_IDX_MAX_COUNTER; index++) {
222 Armv7DisableCnt(index);
224 }
225
226 /* Initialize & Reset PMNC: C and P bits and D bits */
227 UINT32 reg = ARMV7_PMNC_P | ARMV7_PMNC_C | (g_armv7Pmu.cntDivided ? ARMV7_PMNC_D : 0);
228 Armv7PmncWrite(reg);
229}
230
231STATIC VOID Armv7SetEventPeriod(Event *event)
232{
233 if (event->period != 0) {
234 PRINT_INFO("counter: %u, period: 0x%x\n", event->counter, event->period);
235 Armv7PmuSetCntPeriod(event->counter, PERIOD_CALC(event->period));
236 }
237}
238
240{
241 UINT32 value = 0;
242 UINT32 index = event->counter;
243
244 if (!Armv7CntValid(index)) {
245 PRINT_ERR("CPU reading wrong counter %u\n", index);
246 } else if (index == ARMV7_IDX_CYCLE_COUNTER) {
247 __asm__ volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
248 } else {
249 Armv7PmuSelCnt(index);
250 __asm__ volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
251 }
252
253 if (value < PERIOD_CALC(event->period)) {
255 value += event->period;
256 }
257 } else {
258 value -= PERIOD_CALC(event->period);
259 }
260 return value;
261}
262
263STATIC const UINT32 g_armv7Map[] = {
272};
273
275{
276 if (!reverse) { /* Common event to armv7 real event */
277 if (eventType < ARRAY_SIZE(g_armv7Map)) {
278 return g_armv7Map[eventType];
279 }
280 return eventType;
281 } else { /* Armv7 real event to common event */
282 UINT32 i;
283 for (i = 0; i < ARRAY_SIZE(g_armv7Map); i++) {
284 if (g_armv7Map[i] == eventType) {
285 return i;
286 }
287 }
288 return PERF_HW_INVALID_EVENT_TYPE;
289 }
290}
291
292STATIC VOID Armv7PmuIrqHandler(VOID)
293{
294 UINT32 index;
295 PerfRegs regs;
296
297 PerfEvent *events = &(g_armv7Pmu.pmu.events);
298 UINT32 eventNum = events->nr;
299
300 /* Get and reset the IRQ flags */
302 if (!Armv7PmuOverflowed(pmnc)) {
303 return;
304 }
305
306 (VOID)memset_s(&regs, sizeof(PerfRegs), 0, sizeof(PerfRegs));
307 OsPerfFetchIrqRegs(&regs);
308
310
311 for (index = 0; index < eventNum; index++) {
312 Event *event = &(events->per[index]);
313 /*
314 * We have a single interrupt for all counters. Check that
315 * each counter has overflowed before we process it.
316 */
317 if (!Armv7PmuCntOverflowed(pmnc, event->counter) || (event->period == 0)) {
318 continue;
319 }
320
321 Armv7PmuSetCntPeriod(event->counter, PERIOD_CALC(event->period));
322
323 OsPerfUpdateEventCount(event, event->period);
324 OsPerfHandleOverFlow(event, &regs);
325 }
327}
328
330{
331 return ARMV7_IDX_MAX_COUNTER;
332}
333
335{
336 return ARMV7_IDX_CYCLE_COUNTER;
337}
338
340{
341 return ARMV7_IDX_COUNTER0;
342}
343
344STATIC HwPmu g_armv7Pmu = {
345 .canDivided = TRUE,
346 .enable = Armv7EnableEvent,
347 .disable = Armv7DisableEvent,
348 .start = Armv7StartAllCnt,
349 .stop = Armv7StopAllCnt,
350 .clear = Armv7ResetAllCnt,
351 .setPeriod = Armv7SetEventPeriod,
352 .readCnt = Armv7ReadEventCnt,
353 .mapEvent = Armv7PmuMapEvent,
354};
355
357{
358 UINT32 ret;
359 UINT32 index;
360
361 for (index = 0; index < LOSCFG_KERNEL_CORE_NUM; index++) {
362 ret = LOS_HwiCreate(g_pmuIrqNr[index], 0, 0, Armv7PmuIrqHandler, 0);
363 if (ret != LOS_OK) {
364 PRINT_ERR("pmu %u irq handler register failed\n", g_pmuIrqNr[index]);
365 return ret;
366 }
367#ifdef LOSCFG_KERNEL_SMP
368 HalIrqSetAffinity(g_pmuIrqNr[index], CPUID_TO_AFFI_MASK(index));
369#endif
370 }
371 ret = OsPerfHwInit(&g_armv7Pmu);
372 return ret;
373}
STATIC VOID Armv7ResetAllCnt(VOID)
Definition: armv7_pmu.c:216
STATIC INLINE UINT32 Armv7PmuCntOverflowed(UINT32 pmnc, UINT32 index)
Definition: armv7_pmu.c:59
STATIC UINTPTR Armv7ReadEventCnt(Event *event)
Definition: armv7_pmu.c:239
STATIC INLINE UINT32 Armv7PmuOverflowed(UINT32 pmnc)
Definition: armv7_pmu.c:54
STATIC VOID Armv7SetEventPeriod(Event *event)
Definition: armv7_pmu.c:231
STATIC INLINE VOID Armv7EnableCnt(UINT32 index)
Definition: armv7_pmu.c:96
UINT32 OsGetPmuMaxCounter(VOID)
Definition: armv7_pmu.c:329
STATIC VOID Armv7DisableEvent(Event *event)
Definition: armv7_pmu.c:175
STATIC INLINE VOID Armv7EnableCntInterrupt(UINT32 index)
Definition: armv7_pmu.c:110
UINT32 OsGetPmuCounter0(VOID)
Definition: armv7_pmu.c:339
STATIC const UINT32 g_armv7Map[]
Definition: armv7_pmu.c:263
STATIC INLINE VOID Armv7PmuSetCntPeriod(UINT32 index, UINT32 period)
Definition: armv7_pmu.c:76
STATIC INLINE VOID Armv7BindEvt2Cnt(UINT32 index, UINT32 value)
Definition: armv7_pmu.c:88
STATIC INLINE VOID Armv7PmuSelCnt(UINT32 index)
Definition: armv7_pmu.c:69
STATIC HwPmu g_armv7Pmu
Definition: armv7_pmu.c:38
STATIC VOID Armv7StartAllCnt(VOID)
Definition: armv7_pmu.c:191
UINT32 OsGetPmuCycleCounter(VOID)
Definition: armv7_pmu.c:334
STATIC INLINE UINT32 Armv7PmncRead(VOID)
Definition: armv7_pmu.c:40
UINT32 Armv7PmuMapEvent(UINT32 eventType, BOOL reverse)
Definition: armv7_pmu.c:274
STATIC INLINE UINT32 Armv7PmuGetOverflowStatus(VOID)
Definition: armv7_pmu.c:126
STATIC VOID Armv7StopAllCnt(VOID)
Definition: armv7_pmu.c:207
STATIC INLINE VOID Armv7PmncWrite(UINT32 value)
Definition: armv7_pmu.c:47
STATIC INLINE UINT32 Armv7CntValid(UINT32 index)
Definition: armv7_pmu.c:64
STATIC VOID Armv7PmuIrqHandler(VOID)
Definition: armv7_pmu.c:292
OS_PMU_INTS(LOSCFG_KERNEL_CORE_NUM, g_pmuIrqNr)
STATIC INLINE VOID Armv7DisableCnt(UINT32 index)
Definition: armv7_pmu.c:103
STATIC VOID Armv7EnableEvent(Event *event)
Definition: armv7_pmu.c:137
STATIC INLINE VOID Armv7DisableCntInterrupt(UINT32 index)
Definition: armv7_pmu.c:117
UINT32 OsHwPmuInit(VOID)
Definition: armv7_pmu.c:356
@ ARMV7_PERF_HW_BRANCHES
Definition: armv7_pmu_pri.h:79
@ ARMV7_PERF_HW_DCACHE_MISSES
Definition: armv7_pmu_pri.h:76
@ ARMV7_PERF_HW_ICACHES
Definition: armv7_pmu_pri.h:77
@ ARMV7_PERF_HW_BRANCE_MISSES
Definition: armv7_pmu_pri.h:80
@ ARMV7_PERF_HW_INSTRUCTIONS
Definition: armv7_pmu_pri.h:74
@ ARMV7_PERF_HW_CYCLES
Definition: armv7_pmu_pri.h:73
@ ARMV7_PERF_HW_DCACHES
Definition: armv7_pmu_pri.h:75
@ ARMV7_PERF_HW_ICACHE_MISSES
Definition: armv7_pmu_pri.h:78
VOID HalIrqUnmask(UINT32 vector)
撤销中断屏蔽
Definition: gic_v2.c:86
VOID HalIrqMask(UINT32 vector)
屏蔽中断
Definition: gic_v2.c:77
VOID HalIrqSetAffinity(UINT32 vector, UINT32 cpuMask)
Definition: gic_v2.c:63
STATIC INLINE VOID LOS_IntRestore(UINT32 intSave)
Restore interrupts. | 恢复到使用LOS_IntLock关闭所有中断之前的状态
Definition: los_hwi.h:337
LITE_OS_SEC_TEXT_INIT UINT32 LOS_HwiCreate(HWI_HANDLE_T hwiNum, HWI_PRIOR_T hwiPrio, HWI_MODE_T hwiMode, HWI_PROC_FUNC hwiHandler, HwiIrqParam *irqParam)
创建一个硬中断 中断创建,注册中断号、中断触发模式、中断优先级、中断处理程序。中断被触发时, handleIrq会调用该中断处理程序
Definition: los_hwi.c:429
STATIC INLINE UINT32 LOS_IntLock(VOID)
Disable all interrupts. | 关闭当前处理器所有中断响应
Definition: los_hwi.h:286
@ PERF_COUNT_HW_BRANCH_INSTRUCTIONS
Definition: perf.h:75
@ PERF_COUNT_HW_DCACHE_MISSES
Definition: perf.h:72
@ PERF_COUNT_HW_DCACHE_REFERENCES
Definition: perf.h:71
@ PERF_COUNT_HW_BRANCH_MISSES
Definition: perf.h:76
@ PERF_COUNT_HW_ICACHE_MISSES
Definition: perf.h:74
@ PERF_COUNT_HW_ICACHE_REFERENCES
Definition: perf.h:73
@ PERF_COUNT_HW_CPU_CYCLES
Definition: perf.h:69
@ PERF_COUNT_HW_INSTRUCTIONS
Definition: perf.h:70
STATIC INLINE UINT32 ArchCurrCpuid(VOID)
Definition: los_hw_cpu.h:168
VOID OsPerfHandleOverFlow(Event *event, PerfRegs *regs)
Definition: los_perf.c:350
VOID OsPerfUpdateEventCount(Event *event, UINT32 value)
Definition: los_perf.c:342
STATIC INLINE VOID OsPerfFetchIrqRegs(PerfRegs *regs)
Definition: los_perf_pri.h:131
unsigned long UINTPTR
Definition: los_typedef.h:68
unsigned int UINT32
Definition: los_typedef.h:57
size_t BOOL
Definition: los_typedef.h:88
UINT32 OsPerfHwInit(HwPmu *hwPmu)
Definition: perf_hw_pmu.c:169
UINT32 counter
Definition: los_perf_pri.h:88
UINT32 period
Definition: los_perf_pri.h:90
UINT32 eventId
Definition: los_perf_pri.h:89
BOOL canDivided
Definition: perf_pmu_pri.h:49
UINT32 cntDivided
Definition: perf_pmu_pri.h:50
Pmu pmu
Definition: perf_pmu_pri.h:48
UINT8 nr
Definition: los_perf_pri.h:96
Event per[PERF_MAX_EVENT]
Definition: los_perf_pri.h:95
PerfEvent events
Definition: los_perf_pri.h:102