5
#include "normxcorr_hw_msg.h"
8
//#include "mrses_ppu.h"
11
#define MUTEX_INIT(ctx, name) \
13
ctx->name##_mutex = g_mutex_new(); \
14
if (!ctx->name##_mutex) err = DICT_ERROR_GLIB; \
17
#define MUTEX_FREE(ctx, name) \
18
if (ctx->name##_mutex) g_mutex_free(ctx->name##_mutex);
20
#define COND_INIT(ctx, name) \
21
MUTEX_INIT(ctx, name##_cond) \
23
ctx->name##_cond = g_cond_new(); \
24
if (!ctx->name##_cond) { \
25
err = DICT_ERROR_GLIB; \
26
MUTEX_FREE(ctx, name##_cond) \
30
#define COND_FREE(ctx, name) \
31
if (ctx->name##_cond) g_cond_free(ctx->name##_cond); \
32
MUTEX_FREE(ctx, name##_cond)
34
HWRunFunction ppu_run[] = {
38
static int hw_sched_initialized = 0;
41
if (!hw_sched_initialized) {
43
hw_sched_initialized = 1;
51
HWSched hw_sched_create(int ppu_count) {
59
ctx = (HWSched)malloc(sizeof(HWSchedS));
60
if (!ctx) return NULL;
62
memset(ctx, 0, sizeof(HWSchedS));
66
MUTEX_INIT(ctx, data);
67
COND_INIT(ctx, compl);
71
reportError("Error initializing conditions and mutexes");
72
hw_sched_destroy(ctx);
77
for (i = 0; i < ppu_count; i++) {
78
ctx->thread[ctx->n_threads] = hw_thread_create(ctx, ctx->n_threads, NULL, ppu_run, NULL);
79
if (ctx->thread[ctx->n_threads]) ++ctx->n_threads;
85
static int hw_sched_wait_threads(HWSched ctx) {
88
hw_sched_lock(ctx, compl_cond);
89
while (i < ctx->n_threads) {
90
for (; i < ctx->n_threads; i++) {
91
if (ctx->thread[i]->status == HW_THREAD_STATUS_INIT) {
92
hw_sched_wait(ctx, compl);
98
hw_sched_unlock(ctx, compl_cond);
105
void hw_sched_destroy(HWSched ctx) {
108
if (ctx->n_threads > 0) {
110
hw_sched_wait_threads(ctx);
114
hw_sched_lock(ctx, job_cond);
115
hw_sched_broadcast(ctx, job);
116
hw_sched_unlock(ctx, job_cond);
118
for (i = 0; i < ctx->n_threads; i++) {
119
hw_thread_destroy(ctx->thread[i]);
124
COND_FREE(ctx, compl);
125
MUTEX_FREE(ctx, data);
130
int hw_sched_set_sequential_mode(HWSched ctx, int *n_blocks, int *cur_block) {
131
ctx->mode = HW_SCHED_MODE_SEQUENTIAL;
132
ctx->n_blocks = n_blocks;
133
ctx->cur_block = cur_block;
138
int hw_sched_get_chunk(HWSched ctx, int thread_id) {
142
case HW_SCHED_MODE_PREALLOCATED:
143
if (ctx->thread[thread_id]->status == HW_THREAD_STATUS_IDLE) {
148
case HW_SCHED_MODE_SEQUENTIAL:
149
hw_sched_lock(ctx, data);
150
block = *ctx->cur_block;
151
if (block < *ctx->n_blocks) {
152
*ctx->cur_block = *ctx->cur_block + 1;
156
hw_sched_unlock(ctx, data);
166
int hw_sched_schedule_task(HWSched ctx, void *appctx, HWEntry entry) {
168
hw_sched_wait_threads(ctx);
174
hw_sched_lock(ctx, compl_cond);
176
hw_sched_lock(ctx, job_cond);
177
hw_sched_broadcast(ctx, job);
178
hw_sched_unlock(ctx, job_cond);
183
int hw_sched_wait_task(HWSched ctx) {
186
while (i < ctx->n_threads) {
187
for (; i < ctx->n_threads; i++) {
188
if (ctx->thread[i]->status == HW_THREAD_STATUS_DONE) {
189
ctx->thread[i]->status = HW_THREAD_STATUS_IDLE;
191
hw_sched_wait(ctx, compl);
198
hw_sched_unlock(ctx, compl_cond);