if (minor(req->rq_dev) >= MAX_MTD_DEVICES) panic("%s : minor out of bound", __FUNCTION__);
if (!IS_REQ_CMD(req)) goto end_req;
if ((req->sector + req->current_nr_sectors) > (mtdblk->mtd->size >> 9)) goto end_req;
// Handle the request switch (rq_data_dir(req)) { int err;
case READ: down(&mtdblk->cache_sem); err = do_cached_read (mtdblk, req->sector << 9, req->current_nr_sectors << 9, req->buffer); up(&mtdblk->cache_sem); if (!err) res = 1; break;
case WRITE: // Read only device if ( !(mtdblk->mtd->flags & MTD_WRITEABLE) ) break;
// Do the write down(&mtdblk->cache_sem); err = do_cached_write (mtdblk, req->sector << 9,req->current_nr_sectors << 9, req->buffer); up(&mtdblk->cache_sem); if (!err) res = 1; break; }
end_req: spin_lock_irq(QUEUE_LOCK(QUEUE)); end_request(res); } }
static volatile int leaving = 0; static DECLARE_MUTEX_LOCKED(thread_sem); static DECLARE_WAIT_QUEUE_HEAD(thr_wq);
int mtdblock_thread(void *dummy) { … }
#define RQFUNC_ARG request_queue_t *q
static void mtdblock_request(RQFUNC_ARG) { /* Don't do anything, except wake the thread if necessary */ wake_up(&thr_wq); }
|