# HG changeset patch # User reimar # Date 1264242227 0 # Node ID 07ce73fed19f4bbf859d02707b1b04b57a4d8f01 # Parent e5752e0e373d8564d8328bbfcf39e8fcded657e5 Make url_read_complete retry on EAGAIN and return how much data it read if it reached EOF, making it useful in more cases. diff -r e5752e0e373d -r 07ce73fed19f avio.c --- a/avio.c Sat Jan 23 09:42:52 2010 +0000 +++ b/avio.c Sat Jan 23 10:23:47 2010 +0000 @@ -156,8 +156,10 @@ len = 0; while (len < size) { ret = url_read(h, buf+len, size-len); - if (ret < 1) - return ret; + if (ret == AVERROR(EAGAIN)) { + ret = 0; + } else if (ret < 1) + return ret < 0 ? ret : len; len += ret; } return len; diff -r e5752e0e373d -r 07ce73fed19f avio.h --- a/avio.h Sat Jan 23 09:42:52 2010 +0000 +++ b/avio.h Sat Jan 23 10:23:47 2010 +0000 @@ -69,6 +69,14 @@ const char *filename, int flags); int url_open(URLContext **h, const char *filename, int flags); int url_read(URLContext *h, unsigned char *buf, int size); +/** + * Read as many bytes as possible (up to size), calling the + * read function multiple times if necessary. + * Will also retry if the read function returns AVERROR(EAGAIN). + * This makes special short-read handling in applications + * unnecessary, if the return value is < size then it is + * certain there was either an error or the end of file was reached. + */ int url_read_complete(URLContext *h, unsigned char *buf, int size); int url_write(URLContext *h, unsigned char *buf, int size); int64_t url_seek(URLContext *h, int64_t pos, int whence);