/* $Id$ */ /* * Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com) * Copyright (C) 2003-2008 Benny Prijono * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "test.h" /** * \page page_pjlib_timer_test Test: Timer * * This file provides implementation of \b timer_test(). It tests the * functionality of the timer heap. * * * This file is pjlib-test/timer.c * * \include pjlib-test/timer.c */ #if INCLUDE_TIMER_TEST #include #define LOOP 16 #define MIN_COUNT 250 #define MAX_COUNT (LOOP * MIN_COUNT) #define MIN_DELAY 2 #define D (MAX_COUNT / 32000) #define DELAY (D < MIN_DELAY ? MIN_DELAY : D) #define THIS_FILE "timer_test" static void timer_callback(pj_timer_heap_t *ht, pj_timer_entry *e) { PJ_UNUSED_ARG(ht); PJ_UNUSED_ARG(e); } static int test_timer_heap(void) { int i, j; pj_timer_entry *entry; pj_pool_t *pool; pj_timer_heap_t *timer; pj_time_val delay; pj_status_t status; int err=0; pj_size_t size; unsigned count; PJ_LOG(3,("test", "...Basic test")); size = pj_timer_heap_mem_size(MAX_COUNT)+MAX_COUNT*sizeof(pj_timer_entry); pool = pj_pool_create( mem, NULL, size, 4000, NULL); if (!pool) { PJ_LOG(3,("test", "...error: unable to create pool of %u bytes", size)); return -10; } entry = (pj_timer_entry*)pj_pool_calloc(pool, MAX_COUNT, sizeof(*entry)); if (!entry) return -20; for (i=0; i 0) { t_poll.u32.lo += (t2.u32.lo - t1.u32.lo); early += rc; } } // Set the time where all timers should finish pj_gettimeofday(&expire); delay.sec = DELAY; delay.msec = 0; PJ_TIME_VAL_ADD(expire, delay); // Wait unfil all timers finish, cancel some of them. do { int index = pj_rand() % count; pj_get_timestamp(&t1); rc = pj_timer_heap_cancel(timer, &entry[index]); pj_get_timestamp(&t2); if (rc > 0) { cancelled += rc; t_cancel.u32.lo += (t2.u32.lo - t1.u32.lo); } pj_gettimeofday(&now); pj_get_timestamp(&t1); #if defined(PJ_SYMBIAN) && PJ_SYMBIAN!=0 /* On Symbian, we must use OS poll (Active Scheduler poll) since * timer is implemented using Active Object. */ rc = 0; while (pj_symbianos_poll(-1, 0)) ++rc; #else rc = pj_timer_heap_poll(timer, NULL); #endif pj_get_timestamp(&t2); if (rc > 0) { done += rc; t_poll.u32.lo += (t2.u32.lo - t1.u32.lo); } } while (PJ_TIME_VAL_LTE(now, expire)&&pj_timer_heap_count(timer) > 0); if (pj_timer_heap_count(timer)) { PJ_LOG(3, (THIS_FILE, "ERROR: %d timers left", pj_timer_heap_count(timer))); ++err; } t_sched.u32.lo /= count; t_cancel.u32.lo /= count; t_poll.u32.lo /= count; PJ_LOG(4, (THIS_FILE, "...ok (count:%d, early:%d, cancelled:%d, " "sched:%d, cancel:%d poll:%d)", count, early, cancelled, t_sched.u32.lo, t_cancel.u32.lo, t_poll.u32.lo)); count = count * 2; if (count > MAX_COUNT) break; } pj_pool_release(pool); return err; } /*************** * Stress test * *************** * Test scenario (if RANDOMIZED_TEST is 0): * 1. Create and schedule a number of timer entries. * 2. Start threads for polling (simulating normal worker thread). * Each expired entry will try to cancel and re-schedule itself * from within the callback. * 3. Start threads for cancelling random entries. Each successfully * cancelled entry will be re-scheduled after some random delay. * * Test scenario (if RANDOMIZED_TEST is 1): * 1. Create and schedule a number of timer entries. * 2. Start threads which will, based on a configurable probability * setting, randomly perform timer scheduling, cancelling, or * polling (simulating normal worker thread). * This test is considered a failure if: * - It triggers assertion/crash. * - There's an error message in the log, which indicates a potential * bug in the implementation (note that race message is ok). */ #define RANDOMIZED_TEST 1 #define SIMULATE_CRASH PJ_TIMER_USE_COPY #if RANDOMIZED_TEST #define ST_STRESS_THREAD_COUNT 20 #define ST_POLL_THREAD_COUNT 0 #define ST_CANCEL_THREAD_COUNT 0 #else #define ST_STRESS_THREAD_COUNT 0 #define ST_POLL_THREAD_COUNT 10 #define ST_CANCEL_THREAD_COUNT 10 #endif #define ST_ENTRY_COUNT 10000 #define ST_DURATION 30000 #define ST_ENTRY_MAX_TIMEOUT_MS ST_DURATION/10 /* Number of group lock, may be zero, shared by timer entries, group lock * can be useful to evaluate poll vs cancel race condition scenario, i.e: * each group lock must have ref count==1 at the end of the test, otherwise * assertion will raise. */ #define ST_ENTRY_GROUP_LOCK_COUNT 1 #define BT_ENTRY_COUNT 100000 #define BT_ENTRY_SHOW_START 100 #define BT_ENTRY_SHOW_MULT 10 #define BT_REPEAT_RANDOM_TEST 4 #define BT_REPEAT_INC_TEST 4 struct thread_param { pj_timer_heap_t *timer; pj_bool_t stopping; pj_timer_entry *entries; pj_atomic_t **status; pj_atomic_t *n_sched, *n_cancel, *n_poll; pj_grp_lock_t **grp_locks; int err; pj_atomic_t *idx; struct { pj_bool_t is_poll; unsigned cnt; } stat[ST_POLL_THREAD_COUNT + ST_CANCEL_THREAD_COUNT + 1]; /* Plus one here to avoid compile warning of zero-sized array */ }; static pj_status_t st_schedule_entry(pj_timer_heap_t *ht, pj_timer_entry *e) { pj_time_val delay = {0}; pj_grp_lock_t *grp_lock = NULL; pj_status_t status; struct thread_param *tparam = (struct thread_param *)e->user_data; if (ST_ENTRY_GROUP_LOCK_COUNT && pj_rand() % 10) { /* About 90% of entries should have group lock */ grp_lock = tparam->grp_locks[pj_rand() % ST_ENTRY_GROUP_LOCK_COUNT]; } delay.msec = pj_rand() % ST_ENTRY_MAX_TIMEOUT_MS; pj_time_val_normalize(&delay); status = pj_timer_heap_schedule_w_grp_lock(ht, e, &delay, 1, grp_lock); return status; } static void dummy_callback(pj_timer_heap_t *ht, pj_timer_entry *e) { PJ_UNUSED_ARG(ht); PJ_LOG(4,("test", "dummy callback called %p %p", e, e->user_data)); } static void st_entry_callback(pj_timer_heap_t *ht, pj_timer_entry *e) { struct thread_param *tparam = (struct thread_param *)e->user_data; #if RANDOMIZED_TEST /* Make sure the flag has been set. */ while (pj_atomic_get(tparam->status[e - tparam->entries]) != 1) pj_thread_sleep(10); pj_atomic_set(tparam->status[e - tparam->entries], 0); #endif /* try to cancel this */ pj_timer_heap_cancel_if_active(ht, e, 10); /* busy doing something */ pj_thread_sleep(pj_rand() % 50); /* reschedule entry */ if (!ST_STRESS_THREAD_COUNT) st_schedule_entry(ht, e); } /* Randomized stress worker thread function. */ static int stress_worker(void *arg) { /* Enumeration of possible task. */ enum { SCHEDULING = 0, CANCELLING = 1, POLLING = 2, NOTHING = 3 }; /* Probability of a certain task being chosen. * The first number indicates the probability of the first task, * the second number for the second task, and so on. */ int prob[3] = {75, 15, 5}; struct thread_param *tparam = (struct thread_param*)arg; int t_idx, i; t_idx = pj_atomic_inc_and_get(tparam->idx); PJ_LOG(4,("test", "...thread #%d (random) started", t_idx)); while (!tparam->stopping) { int job, task; int idx, count; pj_status_t prev_status, status; /* Randomly choose which task to do */ job = pj_rand() % 100; if (job < prob[0]) task = SCHEDULING; else if (job < (prob[0] + prob[1])) task = CANCELLING; else if (job < (prob[0] + prob[1] + prob[2])) task = POLLING; else task = NOTHING; idx = pj_rand() % ST_ENTRY_COUNT; prev_status = pj_atomic_get(tparam->status[idx]); if (task == SCHEDULING) { if (prev_status != 0) continue; status = st_schedule_entry(tparam->timer, &tparam->entries[idx]); if (prev_status == 0 && status != PJ_SUCCESS) { /* To make sure the flag has been set. */ pj_thread_sleep(20); if (pj_atomic_get(tparam->status[idx]) == 1) { /* Race condition with another scheduling. */ PJ_LOG(3,("test", "race schedule-schedule %d: %p", idx, &tparam->entries[idx])); } else { if (tparam->err != 0) tparam->err = -210; PJ_LOG(3,("test", "error: failed to schedule entry %d: %p", idx, &tparam->entries[idx])); } } else if (prev_status == 1 && status == PJ_SUCCESS) { /* Race condition with another cancellation or * timer poll. */ pj_thread_sleep(20); PJ_LOG(3,("test", "race schedule-cancel/poll %d: %p", idx, &tparam->entries[idx])); } if (status == PJ_SUCCESS) { pj_atomic_set(tparam->status[idx], 1); pj_atomic_inc(tparam->n_sched); } } else if (task == CANCELLING) { count = pj_timer_heap_cancel_if_active(tparam->timer, &tparam->entries[idx], 10); if (prev_status == 0 && count > 0) { /* To make sure the flag has been set. */ pj_thread_sleep(20); if (pj_atomic_get(tparam->status[idx]) == 1) { /* Race condition with scheduling. */ PJ_LOG(3,("test", "race cancel-schedule %d: %p", idx, &tparam->entries[idx])); } else { if (tparam->err != 0) tparam->err = -220; PJ_LOG(3,("test", "error: cancelling invalid entry %d: %p", idx, &tparam->entries[idx])); } } else if (prev_status == 1 && count == 0) { /* To make sure the flag has been cleared. */ pj_thread_sleep(20); if (pj_atomic_get(tparam->status[idx]) == 0) { /* Race condition with polling. */ PJ_LOG(3,("test", "race cancel-poll %d: %p", idx, &tparam->entries[idx])); } else { if (tparam->err != 0) tparam->err = -230; PJ_LOG(3,("test", "error: failed to cancel entry %d: %p", idx, &tparam->entries[idx])); } } if (count > 0) { /* Make sure the flag has been set. */ while (pj_atomic_get(tparam->status[idx]) != 1) pj_thread_sleep(10); pj_atomic_set(tparam->status[idx], 0); pj_atomic_inc(tparam->n_cancel); } } else if (task == POLLING) { count = pj_timer_heap_poll(tparam->timer, NULL); for (i = 0; i < count; i++) { pj_atomic_inc_and_get(tparam->n_poll); } } else { pj_thread_sleep(10); } } PJ_LOG(4,("test", "...thread #%d (poll) stopped", t_idx)); return 0; } /* Poll worker thread function. */ static int poll_worker(void *arg) { struct thread_param *tparam = (struct thread_param*)arg; int idx; idx = pj_atomic_inc_and_get(tparam->idx); tparam->stat[idx].is_poll = PJ_TRUE; PJ_LOG(4,("test", "...thread #%d (poll) started", idx)); while (!tparam->stopping) { unsigned count; count = pj_timer_heap_poll(tparam->timer, NULL); if (count > 0) { /* Count expired entries */ PJ_LOG(5,("test", "...thread #%d called %d entries", idx, count)); tparam->stat[idx].cnt += count; } else { pj_thread_sleep(10); } } PJ_LOG(4,("test", "...thread #%d (poll) stopped", idx)); return 0; } /* Cancel worker thread function. */ static int cancel_worker(void *arg) { struct thread_param *tparam = (struct thread_param*)arg; int idx; idx = pj_atomic_inc_and_get(tparam->idx); tparam->stat[idx].is_poll = PJ_FALSE; PJ_LOG(4,("test", "...thread #%d (cancel) started", idx)); while (!tparam->stopping) { int count; pj_timer_entry *e = &tparam->entries[pj_rand() % ST_ENTRY_COUNT]; count = pj_timer_heap_cancel_if_active(tparam->timer, e, 2); if (count > 0) { /* Count cancelled entries */ PJ_LOG(5,("test", "...thread #%d cancelled %d entries", idx, count)); tparam->stat[idx].cnt += count; /* Reschedule entry after some delay */ pj_thread_sleep(pj_rand() % 100); st_schedule_entry(tparam->timer, e); } } PJ_LOG(4,("test", "...thread #%d (cancel) stopped", idx)); return 0; } static int timer_stress_test(void) { unsigned count = 0, n_sched = 0, n_cancel = 0, n_poll = 0; int i; pj_timer_entry *entries = NULL; pj_atomic_t **entries_status = NULL; pj_grp_lock_t **grp_locks = NULL; pj_pool_t *pool; pj_timer_heap_t *timer = NULL; pj_lock_t *timer_lock; pj_status_t status; int err=0; pj_thread_t **stress_threads = NULL; pj_thread_t **poll_threads = NULL; pj_thread_t **cancel_threads = NULL; struct thread_param tparam = {0}; pj_time_val now; #if SIMULATE_CRASH pj_timer_entry *entry; pj_pool_t *tmp_pool; pj_time_val delay = {0}; #endif PJ_LOG(3,("test", "...Stress test")); pj_gettimeofday(&now); pj_srand(now.sec); pool = pj_pool_create( mem, NULL, 128, 128, NULL); if (!pool) { PJ_LOG(3,("test", "...error: unable to create pool")); err = -10; goto on_return; } /* Create timer heap. * Initially we only create a fraction of what's required, * to test the timer heap growth algorithm. */ status = pj_timer_heap_create(pool, ST_ENTRY_COUNT/64, &timer); if (status != PJ_SUCCESS) { app_perror("...error: unable to create timer heap", status); err = -20; goto on_return; } /* Set recursive lock for the timer heap. */ status = pj_lock_create_recursive_mutex( pool, "lock", &timer_lock); if (status != PJ_SUCCESS) { app_perror("...error: unable to create lock", status); err = -30; goto on_return; } pj_timer_heap_set_lock(timer, timer_lock, PJ_TRUE); /* Create group locks for the timer entry. */ if (ST_ENTRY_GROUP_LOCK_COUNT) { grp_locks = (pj_grp_lock_t**) pj_pool_calloc(pool, ST_ENTRY_GROUP_LOCK_COUNT, sizeof(pj_grp_lock_t*)); tparam.grp_locks = grp_locks; } for (i=0; i 0 && ((j-1) % mult != 0)) { print_bench(test_type, freq, t1, i, j); } return 0; } static int timer_bench_test(void) { pj_pool_t *pool = NULL; pj_timer_heap_t *timer = NULL; pj_status_t status; int err=0; pj_timer_entry *entries = NULL; pj_timestamp freq; int i; PJ_LOG(3,("test", "...Benchmark test")); status = pj_get_timestamp_freq(&freq); if (status != PJ_SUCCESS) { PJ_LOG(3,("test", "...error: unable to get timestamp freq")); err = -10; goto on_return; } pool = pj_pool_create( mem, NULL, 128, 128, NULL); if (!pool) { PJ_LOG(3,("test", "...error: unable to create pool")); err = -20; goto on_return; } /* Create timer heap.*/ status = pj_timer_heap_create(pool, BT_ENTRY_COUNT/64, &timer); if (status != PJ_SUCCESS) { app_perror("...error: unable to create timer heap", status); err = -30; goto on_return; } /* Create and schedule timer entries */ entries = (pj_timer_entry*)pj_pool_calloc(pool, BT_ENTRY_COUNT, sizeof(*entries)); if (!entries) { err = -40; goto on_return; } PJ_LOG(3,("test", "....random scheduling/cancelling test..")); for (i = 0; i < BT_REPEAT_RANDOM_TEST; ++i) { PJ_LOG(3,("test", " test %d of %d..", i+1, BT_REPEAT_RANDOM_TEST)); err = bench_test(timer, entries, freq, RANDOM_SCH); if (err < 0) goto on_return; err = bench_test(timer, entries, freq, RANDOM_CAN); if (err < 0) goto on_return; } PJ_LOG(3,("test", "....increment scheduling/cancelling test..")); for (i = 0; i < BT_REPEAT_INC_TEST; ++i) { PJ_LOG(3,("test", " test %d of %d..", i+1, BT_REPEAT_INC_TEST)); err = bench_test(timer, entries, freq, INCREMENT_SCH); if (err < 0) goto on_return; err = bench_test(timer, entries, freq, INCREMENT_CAN); if (err < 0) goto on_return; } on_return: PJ_LOG(3,("test", "...Cleaning up resources")); if (pool) pj_pool_safe_release(&pool); return err; } int timer_test() { int rc; rc = test_timer_heap(); if (rc != 0) return rc; rc = timer_stress_test(); if (rc != 0) return rc; rc = timer_bench_test(); if (rc != 0) return rc; return 0; } #else /* To prevent warning about "translation unit is empty" * when this test is disabled. */ int dummy_timer_test; #endif /* INCLUDE_TIMER_TEST */