// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- #ifndef BENCHERH #define BENCHERH #include #include "distribution.h" #include "stat_collector.h" #include "backend.h" #include #include "common/Mutex.h" #include "common/Cond.h" #include "common/Thread.h" struct OnWriteApplied; struct OnWriteCommit; struct OnReadComplete; struct Cleanup; class Bencher : public Thread { public: enum OpType { WRITE, READ }; private: boost::scoped_ptr< Distribution > > op_dist; ceph::shared_ptr stat_collector; boost::scoped_ptr backend; const uint64_t max_in_flight; const uint64_t max_duration; const uint64_t max_ops; Mutex lock; Cond open_ops_cond; uint64_t open_ops; void start_op(); void drain_ops(); void complete_op(); public: Bencher( Distribution > *op_gen, ceph::shared_ptr stat_collector, Backend *backend, uint64_t max_in_flight, uint64_t max_duration, uint64_t max_ops) : op_dist(op_gen), stat_collector(stat_collector), backend(backend), max_in_flight(max_in_flight), max_duration(max_duration), max_ops(max_ops), lock("Bencher::lock"), open_ops(0) {} Bencher( Distribution > *op_gen, StatCollector *stat_collector, Backend *backend, uint64_t max_in_flight, uint64_t max_duration, uint64_t max_ops) : op_dist(op_gen), stat_collector(stat_collector), backend(backend), max_in_flight(max_in_flight), max_duration(max_duration), max_ops(max_ops), lock("Bencher::lock"), open_ops(0) {} Bencher( Distribution *object_gen, Distribution *offset_gen, Distribution *length_gen, Distribution *op_type_gen, StatCollector *stat_collector, Backend *backend, uint64_t max_in_flight, uint64_t max_duration, uint64_t max_ops) : op_dist( new FourTupleDist( object_gen, offset_gen, length_gen, op_type_gen)), stat_collector(stat_collector), backend(backend), max_in_flight(max_in_flight), max_duration(max_duration), max_ops(max_ops), lock("Bencher::lock"), open_ops(0) {} void init( const set &objects, uint64_t size, std::ostream *out ); void run_bench(); void *entry() override { run_bench(); return 0; } friend struct OnWriteApplied; friend struct OnWriteCommit; friend struct OnReadComplete; friend struct Cleanup; }; class SequentialLoad : public Distribution< boost::tuple > { set objects; uint64_t size; uint64_t length; set::iterator object_pos; uint64_t cur_pos; boost::scoped_ptr > op_dist; SequentialLoad(const SequentialLoad &other); public: SequentialLoad( const set &_objects, uint64_t size, uint64_t length, Distribution *op_dist) : objects(_objects), size(size), length(length), object_pos(objects.begin()), cur_pos(0), op_dist(op_dist) {} boost::tuple operator()() override { boost::tuple ret = boost::make_tuple(*object_pos, cur_pos, length, (*op_dist)()); cur_pos += length; if (cur_pos >= size) { cur_pos = 0; ++object_pos; } if (object_pos == objects.end()) object_pos = objects.begin(); return ret; } }; #endif