comparison read_cache.c @ 103:8905d8de7e91 src

changes to read cache behaviour inspired by Thibaut Mattern: don't read the whole VOBU at once (this will block the application too long with slow drives) but in several blocks of increasing size
author mroi
date Sun, 29 Sep 2002 21:51:06 +0000
parents 0e2abe7083de
children 0a0a749038ff
comparison
equal deleted inserted replaced
102:3e6970dbe8d6 103:8905d8de7e91
74 74
75 typedef struct read_cache_chunk_s { 75 typedef struct read_cache_chunk_s {
76 uint8_t *cache_buffer; 76 uint8_t *cache_buffer;
77 uint8_t *cache_buffer_base; /* used in malloc and free for alignment */ 77 uint8_t *cache_buffer_base; /* used in malloc and free for alignment */
78 int32_t cache_start_sector; /* -1 means cache invalid */ 78 int32_t cache_start_sector; /* -1 means cache invalid */
79 size_t cache_block_count; 79 int32_t cache_read_count; /* this many sectors are already read */
80 size_t cache_block_count; /* this many sectors will go in this chunk */
80 size_t cache_malloc_size; 81 size_t cache_malloc_size;
81 int cache_valid; 82 int cache_valid;
82 int usage_count; /* counts how many buffers where issued from this chunk */ 83 int usage_count; /* counts how many buffers where issued from this chunk */
83 } read_cache_chunk_t; 84 } read_cache_chunk_t;
84 85
85 struct read_cache_s { 86 struct read_cache_s {
86 read_cache_chunk_t chunk[READ_CACHE_CHUNKS]; 87 read_cache_chunk_t chunk[READ_CACHE_CHUNKS];
87 int current; 88 int current;
88 int freeing; /* is set to one when we are about to dispose the cache */ 89 int freeing; /* is set to one when we are about to dispose the cache */
90 int read_ahead_size;
89 pthread_mutex_t lock; 91 pthread_mutex_t lock;
90 92
91 /* Bit of strange cross-linking going on here :) -- Gotta love C :) */ 93 /* Bit of strange cross-linking going on here :) -- Gotta love C :) */
92 dvdnav_t *dvd_self; 94 dvdnav_t *dvd_self;
93 }; 95 };
392 for (i = 0; i < READ_CACHE_CHUNKS; i++) 394 for (i = 0; i < READ_CACHE_CHUNKS; i++)
393 self->chunk[i].cache_valid = 0; 395 self->chunk[i].cache_valid = 0;
394 pthread_mutex_unlock(&self->lock); 396 pthread_mutex_unlock(&self->lock);
395 } 397 }
396 398
397 #ifdef DVDNAV_PROFILE
398 //#ifdef ARCH_X86
399 __inline__ unsigned long long int dvdnav_rdtsc()
400 {
401 unsigned long long int x;
402 __asm__ volatile (".byte 0x0f, 0x31" : "=A" (x));
403 return x;
404 }
405 //#endif
406 #endif
407
408 /* This function is called just after reading the NAV packet. */ 399 /* This function is called just after reading the NAV packet. */
409 void dvdnav_pre_cache_blocks(read_cache_t *self, int sector, size_t block_count) { 400 void dvdnav_pre_cache_blocks(read_cache_t *self, int sector, size_t block_count) {
410 int i, use, result; 401 int i, use;
411 #ifdef DVDNAV_PROFILE
412 struct timeval tv1, tv2, tv3;
413 unsigned long long p1, p2, p3;
414 #endif
415 402
416 if(!self) 403 if(!self)
417 return; 404 return;
418 405
419 if(!self->dvd_self->use_read_ahead) 406 if(!self->dvd_self->use_read_ahead)
420 return; 407 return;
421 408
422 pthread_mutex_lock(&self->lock); 409 pthread_mutex_lock(&self->lock);
423 410
424 /* find a free cache chunk that best fits the required size */ 411 /* find a free cache chunk that best fits the required size */
425 use = -1; 412 use = -1;
426 for (i = 0; i < READ_CACHE_CHUNKS; i++) 413 for (i = 0; i < READ_CACHE_CHUNKS; i++)
427 if (self->chunk[i].usage_count == 0 && self->chunk[i].cache_buffer && 414 if (self->chunk[i].usage_count == 0 && self->chunk[i].cache_buffer &&
428 self->chunk[i].cache_malloc_size >= block_count && 415 self->chunk[i].cache_malloc_size >= block_count &&
466 } 453 }
467 454
468 if (use >= 0) { 455 if (use >= 0) {
469 self->chunk[use].cache_start_sector = sector; 456 self->chunk[use].cache_start_sector = sector;
470 self->chunk[use].cache_block_count = block_count; 457 self->chunk[use].cache_block_count = block_count;
458 self->chunk[use].cache_read_count = 0;
459 self->chunk[use].cache_valid = 1;
460 self->read_ahead_size = 2;
471 self->current = use; 461 self->current = use;
472 #ifdef DVDNAV_PROFILE
473 gettimeofday(&tv1, NULL);
474 p1 = dvdnav_rdtsc();
475 #endif
476 result = DVDReadBlocks (self->dvd_self->file, sector, block_count, self->chunk[use].cache_buffer);
477 #ifdef DVDNAV_PROFILE
478 p2 = dvdnav_rdtsc();
479 gettimeofday(&tv2, NULL);
480 timersub(&tv2, &tv1, &tv3);
481 dprintf("pre_cache DVD read %ld us, profile = %lld, block_count = %d\n",
482 tv3.tv_usec, p2-p1, block_count);
483 #endif
484 self->chunk[use].cache_valid = 1;
485 } else 462 } else
486 dprintf("pre_caching was impossible, no cache chunk available\n"); 463 dprintf("pre_caching was impossible, no cache chunk available\n");
487 464
488 pthread_mutex_unlock(&self->lock); 465 pthread_mutex_unlock(&self->lock);
489 } 466 }
509 sector + block_count <= self->chunk[i].cache_start_sector + self->chunk[i].cache_block_count) 486 sector + block_count <= self->chunk[i].cache_start_sector + self->chunk[i].cache_block_count)
510 use = i; 487 use = i;
511 } 488 }
512 489
513 if (use >= 0) { 490 if (use >= 0) {
514 self->chunk[use].usage_count++; 491 read_cache_chunk_t *chunk = &self->chunk[use];
515 *buf = &self->chunk[use].cache_buffer[(sector - self->chunk[use].cache_start_sector) * 492 int32_t min_sectors = sector + block_count - chunk->cache_start_sector - chunk->cache_read_count;
516 DVD_VIDEO_LB_LEN * block_count]; 493
494 if (chunk->cache_read_count < chunk->cache_block_count) {
495 /* read ahead some buffers but ensure, requested sector is available */
496 if (chunk->cache_read_count + self->read_ahead_size > chunk->cache_block_count)
497 chunk->cache_read_count += DVDReadBlocks(self->dvd_self->file,
498 chunk->cache_start_sector + chunk->cache_read_count,
499 chunk->cache_block_count - chunk->cache_read_count,
500 chunk->cache_buffer + chunk->cache_read_count * DVD_VIDEO_LB_LEN);
501 else
502 chunk->cache_read_count += DVDReadBlocks(self->dvd_self->file,
503 chunk->cache_start_sector + chunk->cache_read_count,
504 (min_sectors > self->read_ahead_size) ? min_sectors : self->read_ahead_size,
505 chunk->cache_buffer + chunk->cache_read_count * DVD_VIDEO_LB_LEN);
506 }
507
508 chunk->usage_count++;
509 *buf = chunk->cache_buffer + (sector - chunk->cache_start_sector) * DVD_VIDEO_LB_LEN;
510 /* the amount of blocks to read ahead is determined based on the lead of the
511 * blocks in cache over those requested */
512 self->read_ahead_size = chunk->cache_read_count + chunk->cache_start_sector - sector - block_count + 1;
517 pthread_mutex_unlock(&self->lock); 513 pthread_mutex_unlock(&self->lock);
518 return DVD_VIDEO_LB_LEN * block_count; 514 return DVD_VIDEO_LB_LEN * block_count;
519 } else { 515 } else {
520 if (self->dvd_self->use_read_ahead) 516 if (self->dvd_self->use_read_ahead)
521 dprintf("cache miss on sector %d\n", sector); 517 dprintf("cache miss on sector %d\n", sector);
522 pthread_mutex_unlock(&self->lock); 518 pthread_mutex_unlock(&self->lock);
523 return DVDReadBlocks(self->dvd_self->file, sector, block_count, *buf); 519 return DVDReadBlocks(self->dvd_self->file, sector, block_count, *buf) * DVD_VIDEO_LB_LEN;
524 } 520 }
525 } 521 }
526 522
527 dvdnav_status_t dvdnav_free_cache_block(dvdnav_t *self, unsigned char *buf) { 523 dvdnav_status_t dvdnav_free_cache_block(dvdnav_t *self, unsigned char *buf) {
528 read_cache_t *cache; 524 read_cache_t *cache;