Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions bsp/qemu-virt64-riscv/SConstruct
Original file line number Diff line number Diff line change
Expand Up @@ -38,5 +38,25 @@ if GetDepend('__STACKSIZE__'): stack_size = GetDepend('__STACKSIZE__')
stack_lds.write('__STACKSIZE__ = %d;\n' % stack_size)
stack_lds.close()

# Obtain the number of harts from rtconfig.h and write
# it into link_cpus.lds for the linker script
try:
with open('rtconfig.h', 'r') as f:
rtconfig_content = f.readlines()
except FileNotFoundError:
cpus_nr = 1
else:
cpus_nr = 1 # default value
for line in rtconfig_content:
line = line.strip()
if line.startswith('#define') and 'RT_CPUS_NR' in line:
parts = line.split()
if len(parts) >= 3 and parts[2].isdigit():
cpus_nr = int(parts[2])
break

with open('link_cpus.lds', 'w') as cpus_lds:
cpus_lds.write(f'RT_CPUS_NR = {cpus_nr};\n')

# make a building
DoBuilding(TARGET, objs)
5 changes: 5 additions & 0 deletions bsp/qemu-virt64-riscv/driver/board.c
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,11 @@ void rt_hw_board_init(void)

rt_hw_tick_init();

#ifdef RT_USING_SMP
/* ipi init */
rt_hw_ipi_init();
#endif /* RT_USING_SMP */

#ifdef RT_USING_COMPONENTS_INIT
rt_components_board_init();
#endif
Expand Down
10 changes: 4 additions & 6 deletions bsp/qemu-virt64-riscv/link.lds
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
*/

INCLUDE "link_stacksize.lds"
INCLUDE "link_cpus.lds"

OUTPUT_ARCH( "riscv" )

Expand Down Expand Up @@ -121,12 +122,9 @@ SECTIONS
{
. = ALIGN(64);
__stack_start__ = .;

. += __STACKSIZE__;
__stack_cpu0 = .;

. += __STACKSIZE__;
__stack_cpu1 = .;
/* Dynamically allocate stack areas according to RT_CPUS_NR */
. += (__STACKSIZE__ * RT_CPUS_NR);
__stack_end__ = .;
} > SRAM

.sbss :
Expand Down
1 change: 1 addition & 0 deletions bsp/qemu-virt64-riscv/link_cpus.lds
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
RT_CPUS_NR = 8;
16 changes: 14 additions & 2 deletions bsp/qemu-virt64-riscv/qemu-dbg.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,16 @@
qemu-system-riscv64 -nographic -machine virt -m 256M -kernel rtthread.bin -s -S \
QEMU_CMD="qemu-system-riscv64 -nographic -machine virt -m 256M -kernel rtthread.bin -s -S"

if grep -q "#define RT_USING_SMP" ./rtconfig.h 2>/dev/null; then
hart_num=$(grep "RT_CPUS_NR = [0-9]*;" ./link_cpus.lds | awk -F'[=;]' '{gsub(/ /, "", $2); print $2}')
if [ -z "$hart_num" ]; then
hart_num=1
fi
QEMU_CMD="$QEMU_CMD -smp $hart_num"
fi

QEMU_CMD="$QEMU_CMD \
-drive if=none,file=sd.bin,format=raw,id=blk0 -device virtio-blk-device,drive=blk0,bus=virtio-mmio-bus.0 \
-netdev user,id=tap0 -device virtio-net-device,netdev=tap0,bus=virtio-mmio-bus.1 \
-device virtio-serial-device -chardev socket,host=127.0.0.1,port=4321,server=on,wait=off,telnet=on,id=console0 -device virtserialport,chardev=console0
-device virtio-serial-device -chardev socket,host=127.0.0.1,port=4321,server=on,wait=off,telnet=on,id=console0 -device virtserialport,chardev=console0"

eval $QEMU_CMD
16 changes: 14 additions & 2 deletions bsp/qemu-virt64-riscv/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,19 @@ if [ ! -f $path_image ]; then
exit
fi

qemu-system-riscv64 -nographic -machine virt -m 256M -kernel rtthread.bin \
QEMU_CMD="qemu-system-riscv64 -nographic -machine virt -m 256M -kernel rtthread.bin"

if grep -q "#define RT_USING_SMP" ./rtconfig.h 2>/dev/null; then
hart_num=$(grep "RT_CPUS_NR = [0-9]*;" ./link_cpus.lds | awk -F'[=;]' '{gsub(/ /, "", $2); print $2}')
if [ -z "$hart_num" ]; then
hart_num=1
fi
QEMU_CMD="$QEMU_CMD -smp $hart_num"
fi

QEMU_CMD="$QEMU_CMD \
-drive if=none,file=$path_image,format=raw,id=blk0 -device virtio-blk-device,drive=blk0,bus=virtio-mmio-bus.0 \
-netdev user,id=tap0 -device virtio-net-device,netdev=tap0,bus=virtio-mmio-bus.1 \
-device virtio-serial-device -chardev socket,host=127.0.0.1,port=4321,server=on,wait=off,telnet=on,id=console0 -device virtserialport,chardev=console0
-device virtio-serial-device -chardev socket,host=127.0.0.1,port=4321,server=on,wait=off,telnet=on,id=console0 -device virtserialport,chardev=console0"

eval $QEMU_CMD
159 changes: 159 additions & 0 deletions libcpu/risc-v/common64/atomic_riscv.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,159 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-03-14 WangShun first version
*/

#include <rtthread.h>

rt_atomic_t rt_hw_atomic_exchange(volatile rt_atomic_t *ptr, rt_atomic_t val)
{
rt_atomic_t result = 0;
#if __riscv_xlen == 32
asm volatile("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile("amoswap.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#endif
return result;
}

rt_atomic_t rt_hw_atomic_add(volatile rt_atomic_t *ptr, rt_atomic_t val)
{
rt_atomic_t result = 0;
#if __riscv_xlen == 32
asm volatile("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile("amoadd.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#endif
return result;
}

rt_atomic_t rt_hw_atomic_sub(volatile rt_atomic_t *ptr, rt_atomic_t val)
{
rt_atomic_t result = 0;
val = -val;
#if __riscv_xlen == 32
asm volatile("amoadd.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile("amoadd.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#endif
return result;
}

rt_atomic_t rt_hw_atomic_xor(volatile rt_atomic_t *ptr, rt_atomic_t val)
{
rt_atomic_t result = 0;
#if __riscv_xlen == 32
asm volatile("amoxor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile("amoxor.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#endif
return result;
}

rt_atomic_t rt_hw_atomic_and(volatile rt_atomic_t *ptr, rt_atomic_t val)
{
rt_atomic_t result = 0;
#if __riscv_xlen == 32
asm volatile("amoand.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile("amoand.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#endif
return result;
}

rt_atomic_t rt_hw_atomic_or(volatile rt_atomic_t *ptr, rt_atomic_t val)
{
rt_atomic_t result = 0;
#if __riscv_xlen == 32
asm volatile("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile("amoor.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#endif
return result;
}

rt_atomic_t rt_hw_atomic_load(volatile rt_atomic_t *ptr)
{
rt_atomic_t result = 0;
#if __riscv_xlen == 32
asm volatile("amoxor.w %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile("amoxor.d %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
#endif
return result;
}

void rt_hw_atomic_store(volatile rt_atomic_t *ptr, rt_atomic_t val)
{
rt_atomic_t result = 0;
#if __riscv_xlen == 32
asm volatile("amoswap.w %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile("amoswap.d %0, %1, (%2)" : "=r"(result) : "r"(val), "r"(ptr) : "memory");
#endif
}

rt_atomic_t rt_hw_atomic_flag_test_and_set(volatile rt_atomic_t *ptr)
{
rt_atomic_t result = 0;
rt_atomic_t temp = 1;
#if __riscv_xlen == 32
asm volatile("amoor.w %0, %1, (%2)" : "=r"(result) : "r"(temp), "r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile("amoor.d %0, %1, (%2)" : "=r"(result) : "r"(temp), "r"(ptr) : "memory");
#endif
return result;
}

void rt_hw_atomic_flag_clear(volatile rt_atomic_t *ptr)
{
rt_atomic_t result = 0;
#if __riscv_xlen == 32
asm volatile("amoand.w %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
#elif __riscv_xlen == 64
asm volatile("amoand.d %0, x0, (%1)" : "=r"(result) : "r"(ptr) : "memory");
#endif
}

rt_atomic_t rt_hw_atomic_compare_exchange_strong(volatile rt_atomic_t *ptr, rt_atomic_t *old, rt_atomic_t desired)
{
rt_atomic_t tmp = *old;
rt_atomic_t result = 0;
#if __riscv_xlen == 32
asm volatile(
" fence iorw, ow\n"
"1: lr.w.aq %[result], (%[ptr])\n"
" bne %[result], %[tmp], 2f\n"
" sc.w.rl %[tmp], %[desired], (%[ptr])\n"
" bnez %[tmp], 1b\n"
" li %[result], 1\n"
" j 3f\n"
" 2:sw %[result], (%[old])\n"
" li %[result], 0\n"
" 3:\n"
: [result] "+r"(result), [tmp] "+r"(tmp), [ptr] "+r"(ptr)
: [desired] "r"(desired), [old] "r"(old)
: "memory");
#elif __riscv_xlen == 64
asm volatile(
" fence iorw, ow\n"
"1: lr.d.aq %[result], (%[ptr])\n"
" bne %[result], %[tmp], 2f\n"
" sc.d.rl %[tmp], %[desired], (%[ptr])\n"
" bnez %[tmp], 1b\n"
" li %[result], 1\n"
" j 3f\n"
" 2:sd %[result], (%[old])\n"
" li %[result], 0\n"
" 3:\n"
: [result] "+r"(result), [tmp] "+r"(tmp), [ptr] "+r"(ptr)
: [desired] "r"(desired), [old] "r"(old)
: "memory");
#endif
return result;
}
31 changes: 29 additions & 2 deletions libcpu/risc-v/common64/context_gcc.S
Original file line number Diff line number Diff line change
Expand Up @@ -69,14 +69,27 @@
.endm

/*
* #ifdef RT_USING_SMP
* void rt_hw_context_switch_to(rt_ubase_t to, stuct rt_thread *to_thread);
* #else
* void rt_hw_context_switch_to(rt_ubase_t to);
*
* a0 --> to SP pointer
* #endif
* a0 --> to
* a1 --> to_thread
*/
.globl rt_hw_context_switch_to
rt_hw_context_switch_to:
LOAD sp, (a0)

#ifdef RT_USING_SMP
/*
* Pass the previous CPU lock status to
* rt_cpus_lock_status_restore for restoration
*/
mv a0, a1
call rt_cpus_lock_status_restore
#endif

call rt_thread_self
mv s1, a0

Expand All @@ -88,10 +101,15 @@ rt_hw_context_switch_to:
sret

/*
* #ifdef RT_USING_SMP
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread);
* #else
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to);
* #endif
*
* a0 --> from SP pointer
* a1 --> to SP pointer
* a2 --> to_thread
*
* It should only be used on local interrupt disable
*/
Expand All @@ -103,6 +121,15 @@ rt_hw_context_switch:
// restore to thread SP
LOAD sp, (a1)

#ifdef RT_USING_SMP
/*
* Pass the previous CPU lock status to
* rt_cpus_lock_status_restore for restoration
*/
mv a0, a2
call rt_cpus_lock_status_restore
#endif /*RT_USING_SMP*/

// restore Address Space
call rt_thread_self
mv s1, a0
Expand Down
Loading
Loading