Signed-off-by: Yuri Volchkov <yuri.volchkov@xxxxxxxxx>
---
lib/vfscore/fd.c | 4 ++--
plat/common/arm/time.c | 6 +++---
plat/kvm/irq.c | 6 +++---
plat/kvm/x86/tscclock.c | 6 +++---
plat/xen/events.c | 18 +++++++++---------
plat/xen/xenbus/xs_comms.c | 4 ++--
6 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/lib/vfscore/fd.c b/lib/vfscore/fd.c
index 07a69b5..6c22042 100644
--- a/lib/vfscore/fd.c
+++ b/lib/vfscore/fd.c
@@ -35,7 +35,7 @@
#include <string.h>
#include <uk/essentials.h>
-#include <uk/arch/atomic.h>
+#include <uk/bitops.h>
#include <uk/assert.h>
#include <vfscore/file.h>
#include <uk/plat/lcpu.h>
@@ -78,7 +78,7 @@ void vfscore_put_fd(int fd)
/* Currently it is not allowed to free std(in|out|err) */
UK_ASSERT(fd > 2);
- ukarch_test_and_clr_bit(fd, &fdtable.bitmap);
+ __uk_clear_bit(fd, &fdtable.bitmap);
}
void vfscore_install_fd(int fd, struct vfscore_file *file)
diff --git a/plat/common/arm/time.c b/plat/common/arm/time.c
index 949fa74..f1df98d 100644
--- a/plat/common/arm/time.c
+++ b/plat/common/arm/time.c
@@ -172,7 +172,7 @@ static int generic_timer_init(void)
return 0;
}
-long sched_have_pending_events;
+unsigned long sched_have_pending_events;
void time_block_until(__snsec until)
{
@@ -182,8 +182,8 @@ void time_block_until(__snsec until)
* As we haven't support interrupt on Arm, so we just
* use busy polling for now.
*/
- if (ukarch_test_and_clr_bit(0, &sched_have_pending_events))
- break;
+ if (__uk_test_and_clear_bit(0, &sched_have_pending_events))
+ break;
}
}
diff --git a/plat/kvm/irq.c b/plat/kvm/irq.c
index e708069..0b734f6 100644
--- a/plat/kvm/irq.c
+++ b/plat/kvm/irq.c
@@ -34,7 +34,7 @@
#include <kvm/intctrl.h>
#include <uk/assert.h>
#include <errno.h>
-#include <uk/arch/atomic.h>
+#include <uk/bitops.h>
static struct uk_alloc *allocator;
@@ -75,7 +75,7 @@ int ukplat_irq_register(unsigned long irq, irq_handler_func_t func, void *arg)
* TODO: This is a temporary solution used to identify non TSC clock
* interrupts in order to stop waiting for interrupts with deadline.
*/
-extern long sched_have_pending_events;
+extern unsigned long sched_have_pending_events;
void _ukplat_irq_handle(unsigned long irq)
{
@@ -95,7 +95,7 @@ void _ukplat_irq_handle(unsigned long irq)
* the halting loop, and let it take care of
* that work.
*/
- ukarch_test_and_set_bit(0, &sched_have_pending_events);
+ __uk_test_and_set_bit(0, &sched_have_pending_events);
if (h->func(h->arg) == 1) {
handled = 1;
diff --git a/plat/kvm/x86/tscclock.c b/plat/kvm/x86/tscclock.c
index 4ef2179..f3fa55a 100644
--- a/plat/kvm/x86/tscclock.c
+++ b/plat/kvm/x86/tscclock.c
@@ -58,7 +58,7 @@
#include <uk/timeconv.h>
#include <uk/print.h>
#include <uk/assert.h>
-#include <uk/arch/atomic.h>
+#include <uk/bitops.h>
#define NSEC_PER_SEC 1000000000ULL
@@ -346,14 +346,14 @@ static void tscclock_cpu_block(__u64 until)
ukplat_lcpu_halt_irq();
}
-long sched_have_pending_events;
+unsigned long sched_have_pending_events;
void time_block_until(__snsec until)
{
while ((__snsec) ukplat_monotonic_clock() < until) {
tscclock_cpu_block(until);
- if (ukarch_test_and_clr_bit(0, &sched_have_pending_events))
+ if (__uk_test_and_clear_bit(0, &sched_have_pending_events))
break;
}
}
diff --git a/plat/xen/events.c b/plat/xen/events.c
index 6721985..6df3e4b 100644
--- a/plat/xen/events.c
+++ b/plat/xen/events.c
@@ -38,7 +38,7 @@
#include <common/events.h>
#include <xen/xen.h>
#include <uk/print.h>
-#include <uk/arch/atomic.h>
+#include <uk/bitops.h>
#define NR_EVS 1024
@@ -69,7 +69,7 @@ void unbind_all_ports(void)
continue;
#endif
- if (ukarch_test_and_clr_bit(i, bound_ports)) {
+ if (__uk_test_and_clear_bit(i, bound_ports)) {
uk_pr_warn("Port %d still bound!\n", i);
unbind_evtchn(i);
}
@@ -112,7 +112,7 @@ evtchn_port_t bind_evtchn(evtchn_port_t port,
evtchn_handler_t handler,
ev_actions[port].data = data;
wmb();
ev_actions[port].handler = handler;
- ukarch_set_bit(port, bound_ports);
+ __uk_set_bit(port, bound_ports);
return port;
}
@@ -130,7 +130,7 @@ void unbind_evtchn(evtchn_port_t port)
ev_actions[port].handler = default_handler;
wmb();
ev_actions[port].data = NULL;
- ukarch_clr_bit(port, bound_ports);
+ __uk_clear_bit(port, bound_ports);
close.port = port;
rc = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
@@ -297,7 +297,7 @@ inline void mask_evtchn(uint32_t port)
{
shared_info_t *s = HYPERVISOR_shared_info;
- ukarch_set_bit_sync(port, &s->evtchn_mask[0]);
+ uk_set_bit(port, &s->evtchn_mask[0]);
}
inline void unmask_evtchn(uint32_t port)
@@ -305,15 +305,15 @@ inline void unmask_evtchn(uint32_t port)
shared_info_t *s = HYPERVISOR_shared_info;
vcpu_info_t *vcpu_info = &s->vcpu_info[smp_processor_id()];
- ukarch_clr_bit_sync(port, &s->evtchn_mask[0]);
+ uk_clear_bit(port, &s->evtchn_mask[0]);
/*
* The following is basically the equivalent of 'hw_resend_irq'.
* Just like a real IO-APIC we 'lose the interrupt edge' if the
* channel is masked.
*/
- if (ukarch_test_bit_sync(port, &s->evtchn_pending[0]) &&
- !ukarch_test_and_set_bit_sync(port / (sizeof(unsigned long) * 8),
+ if (uk_test_bit(port, &s->evtchn_pending[0]) &&
+ !uk_test_and_set_bit(port / (sizeof(unsigned long) * 8),
&vcpu_info->evtchn_pending_sel)) {
vcpu_info->evtchn_upcall_pending = 1;
#ifdef XEN_HAVE_PV_UPCALL_MASK
@@ -327,7 +327,7 @@ inline void clear_evtchn(uint32_t port)
{
shared_info_t *s = HYPERVISOR_shared_info;
- ukarch_clr_bit_sync(port, &s->evtchn_pending[0]);
+ uk_clear_bit(port, &s->evtchn_pending[0]);
}
struct uk_alloc;
diff --git a/plat/xen/xenbus/xs_comms.c b/plat/xen/xenbus/xs_comms.c
index c8201a5..274e44b 100644
--- a/plat/xen/xenbus/xs_comms.c
+++ b/plat/xen/xenbus/xs_comms.c
@@ -186,7 +186,7 @@ static void xs_request_put(struct xs_request *xs_req)
ukarch_spin_lock(&xs_req_pool.lock);
- UK_ASSERT(ukarch_test_bit(reqid, xs_req_pool.entries_bm) == 1);
+ UK_ASSERT(uk_test_bit(reqid, xs_req_pool.entries_bm) == 1);
uk_clear_bit(reqid, xs_req_pool.entries_bm);
xs_req_pool.num_live--;
@@ -427,7 +427,7 @@ static void process_reply(struct xsd_sockmsg *hdr, char
*payload)
{
struct xs_request *xs_req;
- if (!ukarch_test_bit(hdr->req_id, xs_req_pool.entries_bm)) {
+ if (!uk_test_bit(hdr->req_id, xs_req_pool.entries_bm)) {
uk_pr_warn("Invalid reply id=%d\n", hdr->req_id);
free(payload);
return;