comparison src/neon/rb.c @ 1719:29c35cb8873e

- Add neon HTTP transport plugin sources (for real)
author Ralf Ertzinger <ralf@skytale.net>
date Mon, 17 Sep 2007 21:46:53 +0200
parents
children dc83901850df
comparison
equal deleted inserted replaced
1718:892deefba58d 1719:29c35cb8873e
1 /*
2 * Ringbuffer implementation
3 *
4 * GPL
5 */
6 #include <string.h>
7 #include "rb.h"
8 #include "debug.h"
9
10 #ifdef RB_DEBUG
11 /*
12 * An internal assertion function to make sure that the
13 * ringbuffer structure is consistient.
14 *
15 * WARNING: This function will call abort() if the ringbuffer
16 * is found to be inconsistient.
17 */
18 static void _assert_rb(struct ringbuf* rb) {
19
20 unsigned int realused;
21
22 _ENTER;
23
24 _DEBUG("rb->buf=%p, rb->end=%p, rb->wp=%p, rb->rp=%p, rb->free=%u, rb->used=%u, rb->size=%u",
25 rb->buf, rb->end, rb->wp, rb->rp, rb->free, rb->used, rb->size);
26
27 if (0 == rb->size) {
28 _ERROR("Buffer size is 0");
29 abort();
30 }
31
32 if (NULL == rb->buf) {
33 _ERROR("Buffer start is NULL");
34 abort();
35 }
36
37 if (rb->used+rb->free != rb->size) {
38 _ERROR("rb->free and rb->used do not add up to rb->size");
39 abort();
40 }
41
42 if (rb->buf+(rb->size-1) != rb->end) {
43 _ERROR("rb->buf and rb->end not rb->size bytes apart");
44 abort();
45 }
46
47 if ((rb->wp < rb->buf) || (rb->wp > rb->end)) {
48 _ERROR("Write pointer outside buffer space");
49 abort();
50 }
51
52 if ((rb->rp < rb->buf) || (rb->rp > rb->end)) {
53 _ERROR("Read pointer outside buffer space");
54 abort();
55 }
56
57 if (rb->rp <= rb->wp) {
58 realused = rb->wp - rb->rp;
59 } else {
60 realused = (rb->end - rb->rp) + 1 + (rb->wp-rb->buf);
61 }
62
63 if (rb->used != realused) {
64 _ERROR("Usage count is inconsistient (is %d, should be %d)", rb->used, realused);
65 abort();
66 }
67
68 _LEAVE;
69 }
70 #endif
71
72 /*
73 * Reset a ringbuffer structure (i.e. discard
74 * all data inside of it)
75 */
76 void reset_rb(struct ringbuf* rb) {
77
78 _ENTER;
79
80 pthread_mutex_lock(&rb->lock);
81
82 rb->wp = rb->buf;
83 rb->rp = rb->buf;
84 rb->free = rb->size;
85 rb->used = 0;
86 rb->end = rb->buf+(rb->size-1);
87
88 pthread_mutex_unlock(&rb->lock);
89
90 _LEAVE;
91 }
92
93 /*
94 * Initialize a ringbuffer structure (including
95 * memory allocation.
96 *
97 * Return -1 on error
98 */
99 int init_rb(struct ringbuf* rb, unsigned int size) {
100
101 _ENTER;
102
103 if (0 == size) {
104 _LEAVE -1;
105 }
106
107 if (0 != pthread_mutex_init(&rb->lock, NULL)) {
108 _LEAVE -1;
109 }
110
111 if (NULL == (rb->buf = malloc(size))) {
112 _LEAVE -1;
113 }
114 rb->size = size;
115 reset_rb(rb);
116
117 ASSERT_RB(rb);
118
119 _LEAVE 0;
120 }
121
122 /*
123 * Write size bytes at buf into the ringbuffer.
124 * Return -1 on error (not enough space in buffer)
125 */
126 int write_rb(struct ringbuf* rb, void* buf, unsigned int size) {
127
128 int ret = -1;
129 int endfree;
130
131 _ENTER;
132
133 pthread_mutex_lock(&rb->lock);
134
135 ASSERT_RB(rb);
136
137 if (rb->free < size) {
138 ret = -1;
139 goto out;
140 }
141
142 endfree = (rb->end - rb->wp)+1;
143 if (endfree < size) {
144 /*
145 * There is enough space in the buffer, but not in
146 * one piece. We need to split the copy into two parts.
147 */
148 memcpy(rb->wp, buf, endfree);
149 memcpy(rb->buf, buf+endfree, size-endfree);
150 rb->wp = rb->buf + (size-endfree);
151 } else if (endfree > size) {
152 /*
153 * There is more space than needed at the end
154 */
155 memcpy(rb->wp, buf, size);
156 rb->wp += size;
157 } else {
158 /*
159 * There is exactly the space needed at the end.
160 * We need to wrap around the read pointer.
161 */
162 memcpy(rb->wp, buf, size);
163 rb->wp = rb->buf;
164 }
165
166 rb->free -= size;
167 rb->used += size;
168
169 ret = 0;
170
171 out:
172 ASSERT_RB(rb);
173 pthread_mutex_unlock(&rb->lock);
174
175 _LEAVE ret;
176 }
177
178 /*
179 * Read size byes from buffer into buf.
180 * Return -1 on error (not enough data in buffer)
181 */
182 int read_rb(struct ringbuf* rb, void* buf, unsigned int size) {
183
184 int ret;
185
186 _ENTER;
187
188 pthread_mutex_lock(&rb->lock);
189 ret = read_rb_locked(rb, buf, size);
190 pthread_mutex_unlock(&rb->lock);
191
192 _LEAVE ret;
193 }
194
195 /*
196 * Read size bytes from buffer into buf, assuming the buffer lock
197 * is already held.
198 * Return -1 on error (not enough data in buffer)
199 */
200 int read_rb_locked(struct ringbuf* rb, void* buf, unsigned int size) {
201
202 int endused;
203
204 _ENTER;
205
206 ASSERT_RB(rb);
207
208 if (rb->used < size) {
209 /* Not enough bytes in buffer */
210 _LEAVE -1;
211 }
212
213 if (rb->rp < rb->wp) {
214 /*
215 Read pointer is behind write pointer, all the data is available in one cunk
216 */
217 memcpy(buf, rb->rp, size);
218 rb->rp += size;
219 } else {
220 /*
221 * Read pointer is before write pointer
222 */
223 endused = (rb->end - rb->rp)+1;
224
225 if (size < endused) {
226 /*
227 * Data is available in one chunk
228 */
229 memcpy(buf, rb->rp, size);
230 rb->rp += size;
231 } else {
232 /*
233 * There is enough data in the buffer, but it is fragmented.
234 */
235 memcpy(buf, rb->rp, endused);
236 memcpy(buf+endused, rb->buf, size-endused);
237 rb->rp = rb->buf + (size-endused);
238 }
239 }
240
241 rb->free += size;
242 rb->used -= size;
243
244 ASSERT_RB(rb);
245
246 _LEAVE 0;
247 }
248
249 /*
250 * Return the amount of free space currently in the rb
251 */
252 unsigned int free_rb(struct ringbuf* rb) {
253
254 unsigned int f;
255
256 _ENTER;
257
258 pthread_mutex_lock(&rb->lock);
259 f = rb->free;
260 pthread_mutex_unlock(&rb->lock);
261
262 _LEAVE f;
263 }
264
265
266 /*
267 * Return the amount of used space currently in the rb
268 */
269 unsigned int used_rb(struct ringbuf* rb) {
270
271 unsigned int u;
272
273 _ENTER;
274
275 pthread_mutex_lock(&rb->lock);
276 u = rb->used;
277 pthread_mutex_unlock(&rb->lock);
278
279 _LEAVE u;
280 }
281
282
283 /*
284 * destroy a ringbuffer
285 */
286 void destroy_rb(struct ringbuf* rb) {
287
288 _ENTER;
289 pthread_mutex_lock(&rb->lock);
290 free(rb->buf);
291 pthread_mutex_unlock(&rb->lock);
292
293 _LEAVE;
294 }