Hi,
I wrote a mail a couple of days ago asking for guidelines about adding
Content-Encoding: gzip support for squid.
I have been working, but now I'm stucked.
I modified clientSendMoreData this way (see attached patch for more
detailed info):
....
if (http->out.offset == 0) {
check if log mime headers
rep = clientBuildReply(http, buf, size);
if (rep) {
body too large check
>> if (must_compress) {
>> compress 'size' bytes located at 'buf', producing 'zbytes'
>> of compressed bytes in zipped_buff.
>> size = zbytes - rep->hdr_sz;
>> memcpy(buf + rep->hdr_sz, zipped_buff, zbytes);
>> }
...
} else if (!http->request->range) {
>> if (must_compress) {
>> compress 'size' bytes located at 'buf' to zipped_buf
>> body_size += zbytes - pbytes;
>> size = zbytes;
>> memcpy(buf, zipped_buf, zbytes);
>> }
http->out.offset += body_size;
comm_write(fd, buf, size, clientWriteBodyComplete, http, NULL);
/* NULL because clientWriteBodyComplete frees it */
return;
}
...
And the call sequence from cache_log is this (omitting some entries):
clientProcessRequest: GET $URL
clientProcessRequest: TCP_MISS for $URL
clientProcessMiss: 'GET URL'
storeClientCopy: $HASH_KEY, seen 0, want 0, size 4096
storeClientCopy2: $HASH_KEY
storeClientCopy3: Waiting for more
storeClientCopy2: $HASH_KEY
storeClientCopy3: Copying from memory (1368 bytes, hi=1368, lo=0)
>>> reply got from server is 1368 bytes = 133 headers + 1335 body
clientSendMoreData: $URL, 1368 bytes
clientSendMoreData: FD 9 '$URL', out.offset=0
clientBuildReplyHeader: can't keep-alive, unknown body size
clientSendMoreData: (no data sent yet) (http->out.offset == 0)
gzip_data: Got 1235 bytes, out avail 4086 bytes
>>> have 1235 plain bytes and 4k zipped buffer to write output
>>> gzip_data writes 10 bytes (gzip header) to outbuffer.
clientSendMoreData: Appending 10 bytes after 133 bytes of headers
clientSendMoreData: packed reply: 207 bytes
>>> reply sent to client has: 207 bytes header + 10 bytes content
>>> more content may come later
clientSendMoreData: queueing mb(217 bytes) and clientWriteComplete
clientWriteComplete: FD 9, sz 217, err 0, off 143, len -1
storeClientCopy: $HASH_KEY, seen 143, want 143, size 4096, cb !NULL,
cbdata !NULL
storeClientCopy2: $HASH_KEY
storeClientCopy3: Copying from memory (1225 bytes, hi=1368, lo=0)
>>> ok, here comes my problem:
>>> 1235 bytes have been 'eaten' by last call to clientSendMoreData->
>>> gzip_data, but storeClientCopy3 thinks it has only 'consumed' 10
>>> bytes.
>>> Should I alter http->entry->mem_obj->inmem_hi??
I guess storeClientCopy3 thinks that 10 bytes has been 'consumed'
because http->out.offset has been incremented by 10, rather than 1335
(original body size so far).
How should I fix this? I mean, clientSendMoreData is called with data is
has already processed.
Thank you very much in advance,
-- Gonzalo Arana <garana@uolsinectis.com.ar> UOL-Sinectis S.A.
--- squid-2.5.STABLE3/src/client_side.c Sat May 24 08:08:41 2003
+++ squid-2.5.STABLE3-visolve_tcp_rtsignal-gzip/src/client_side.c Wed Aug 20 15:31:24 2003
@@ -1395,57 +1396,87 @@
getMyHostname(), ntohs(Config.Sockaddr.http->s.sin_port));
#endif
if (httpReplyBodySize(request->method, rep) < 0) {
debug(33, 3) ("clientBuildReplyHeader: can't keep-alive, unknown body size\n");
request->flags.proxy_keepalive = 0;
}
/* Signal keep-alive if needed */
httpHeaderPutStr(hdr,
http->flags.accel ? HDR_CONNECTION : HDR_PROXY_CONNECTION,
request->flags.proxy_keepalive ? "keep-alive" : "close");
#if ADD_X_REQUEST_URI
/*
* Knowing the URI of the request is useful when debugging persistent
* connections in a client; we cannot guarantee the order of http headers,
* but X-Request-URI is likely to be the very last header to ease use from a
* debugger [hdr->entries.count-1].
*/
httpHeaderPutStr(hdr, HDR_X_REQUEST_URI,
http->entry->mem_obj->url ? http->entry->mem_obj->url : http->uri);
#endif
+
+#if USE_CEGZIP
+ /* If no ranges involved, and
+ * client accepts gzipped data, and
+ * content isn't alreadly 'encoded' (compressed, or something else)
+ */
+ if (!request->range &&
+ (httpHeaderGetAcceptEncoding(&http->request->header) & ENCODING_GZIP) &&
+ !httpHeaderGetContentEncoding(&rep->header)) {
+ int cl = 9; /* //TODO: write CompressionLevel(); */
+
+ /* if client accepts gzipped data
+ * and acls are matched, do compress.
+ */
+ httpHeaderPutStr(hdr, HDR_CONTENT_ENCODING, "gzip");
+ assert(http->conn);
+ debug(33, 3)("Setting compression %d level on fd %d\n", cl, http->conn->fd);
+ http->compress.level = cl;
+ http->compress.offset = rep->hdr_sz; /* //TODO: set to header size */
+ http->compress.gzdata = xmalloc(sizeof(z_stream)); /* //TODO: use mem pools instead */
+ /* //TODO: where should I free gzdata */
+ deflateInit2(http->compress.gzdata, http->compress.level,
+ Z_DEFLATED, -MAX_WBITS, 8, Z_DEFAULT_STRATEGY);
+ http->compress.crc = crc32(0L, NULL, 0);
+ http->compress.zlen = 0;
+ debug(33, 3)("Setting compression offset=%d\n", rep->hdr_sz);
+ }
+#endif
httpHdrMangleList(hdr, request);
}
static HttpReply *
clientBuildReply(clientHttpRequest * http, const char *buf, size_t size)
{
HttpReply *rep = httpReplyCreate();
size_t k = headersEnd(buf, size);
if (k && httpReplyParse(rep, buf, k)) {
/* enforce 1.0 reply version */
httpBuildVersion(&rep->sline.version, 1, 0);
/* do header conversions */
clientBuildReplyHeader(http, rep);
/* if we do ranges, change status to "Partial Content" */
if (http->request->range)
httpStatusLineSet(&rep->sline, rep->sline.version,
HTTP_PARTIAL_CONTENT, NULL);
+// else
+// fd_table[http->conn.fd].compress.offset = rep->header->len; /* //TODO: set to header size */
} else {
/* parsing failure, get rid of the invalid reply */
httpReplyDestroy(rep);
rep = NULL;
/* if we were going to do ranges, backoff */
if (http->request->range) {
/* this will fail and destroy request->range */
clientBuildRangeHeader(http, rep);
}
}
return rep;
}
/*
* clientCacheHit should only be called until the HTTP reply headers
* have been parsed. Normally this should be a single call, but
* it might take more than one. As soon as we have the headers,
* we hand off to clientSendMoreData, clientProcessExpired, or
* clientProcessMiss.
*/
@@ -1866,69 +1897,140 @@
* can choose to block these responses where appropriate, but won't get
* mysterious breakages.
*/
static int
clientAlwaysAllowResponse(http_status sline)
{
switch (sline) {
case HTTP_CONTINUE:
case HTTP_SWITCHING_PROTOCOLS:
case HTTP_PROCESSING:
case HTTP_NO_CONTENT:
case HTTP_NOT_MODIFIED:
return 1;
/* unreached */
break;
default:
return 0;
}
}
+#ifdef USE_CEGZIP
+/*
+ * Compression routine:
+ * osize: available output bytes
+ * isize: available input bytes
+ */
+static void gzip_data(struct compress_state* compress,
+ char* obuf, int *osize, char* ibuf, int *isize) {
+#if 0
+ memcpy(obuf, ibuf, *isize);
+ compress->zlen += *isize;
+ compress->plen += *isize;
+ *osize = *isize;
+ *isize = 0;
+ return;
+#else
+ int processed_bytes = 0; /* plain bytes consumed by deflate call */
+ int generated_bytes = 0; /* zipped bytes written by this deflate call */
+ debug(85,3)("gzip_data: Got %d bytes, out avail %d bytes\n", *isize, *osize);
+ /* Have to compress on this socket */
+ assert(compress && obuf && ibuf);
+ compress->gzdata->next_in = ibuf;
+ compress->gzdata->avail_in = *isize;
+ compress->gzdata->next_out = obuf;
+ compress->gzdata->avail_out = *osize;
+ compress->deflate_status = deflate(compress->gzdata, Z_NO_FLUSH);
+ assert(compress->gzdata->avail_out);
+ processed_bytes = *isize - compress->gzdata->avail_in;
+ generated_bytes = *osize - compress->gzdata->avail_out;
+ compress->zlen += generated_bytes;
+ compress->plen += processed_bytes;
+ compress->crc = crc32(compress->crc, ibuf, processed_bytes);
+ *isize = compress->gzdata->avail_in;
+ *osize = compress->gzdata->avail_out;
+ /* all data has to be processed */
+ debug(85,3)("gzip_data: pbytes=%d zbytes=%d avail_out=%d "
+ "avail_in=%d deflate_statu=%d crc=%u plen=%u zlen=%u\n",
+ processed_bytes, generated_bytes, compress->gzdata->avail_out,
+ compress->gzdata->avail_in, compress->deflate_status, compress->crc,
+ compress->plen, compress->zlen);
+#endif
+}
+#endif
+
+static void gzip_done(struct compress_state* compress, char* obuf, int *osize,
+ char* ibuf, int* isize) {
+ gzip_data(compress, obuf, osize, ibuf, isize);
+ if (compress->deflate_status != Z_STREAM_END)
+ do {
+ int zipped_bytes = 0;
+ compress->gzdata->next_in = NULL;
+ compress->gzdata->avail_in = 0;
+ compress->gzdata->next_out = obuf;
+ compress->gzdata->avail_out = *osize;
+ compress->deflate_status = deflate(compress->gzdata, Z_FINISH);
+ assert(compress->gzdata->avail_out);
+ zipped_bytes = *osize - compress->gzdata->avail_out;
+ obuf += zipped_bytes;
+ *osize -= zipped_bytes;
+ compress->zlen += zipped_bytes;
+ debug(85, 3)("gzip_done: zbytes=%d deflate_status=%d \n",
+ zipped_bytes, compress->deflate_status);
+ } while (compress->deflate_status == Z_OK &&
+ compress->gzdata->avail_out >= 8);
+ memcpy(obuf+osize[0], &compress->zlen, 4);
+ *osize -= 4;
+ memcpy(obuf+osize[0], &compress->crc, 4);
+ *osize -= 4;
+}
+
/*
* accepts chunk of a http message in buf, parses prefix, filters headers and
* such, writes processed message to the client's socket
*/
static void
clientSendMoreData(void *data, char *buf, ssize_t size)
{
clientHttpRequest *http = data;
StoreEntry *entry = http->entry;
ConnStateData *conn = http->conn;
int fd = conn->fd;
HttpReply *rep = NULL;
const char *body_buf = buf;
ssize_t body_size = size;
MemBuf mb;
ssize_t check_size = 0;
debug(33, 5) ("clientSendMoreData: %s, %d bytes\n", http->uri, (int) size);
assert(size <= CLIENT_SOCK_SZ);
assert(http->request != NULL);
dlinkDelete(&http->active, &ClientActiveRequests);
dlinkAdd(http, &http->active, &ClientActiveRequests);
debug(33, 5) ("clientSendMoreData: FD %d '%s', out.offset=%ld \n",
fd, storeUrl(entry), (long int) http->out.offset);
if (conn->chr != http) {
/* there is another object in progress, defer this one */
debug(33, 2) ("clientSendMoreData: Deferring %s\n", storeUrl(entry));
memFree(buf, MEM_CLIENT_SOCK_BUF);
return;
} else if (http->request->flags.reset_tcp) {
+ memFree(buf, MEM_CLIENT_SOCK_BUF);
comm_reset_close(fd);
return;
} else if (entry && EBIT_TEST(entry->flags, ENTRY_ABORTED)) {
/* call clientWriteComplete so the client socket gets closed */
clientWriteComplete(fd, NULL, 0, COMM_OK, http);
memFree(buf, MEM_CLIENT_SOCK_BUF);
return;
} else if (size < 0) {
/* call clientWriteComplete so the client socket gets closed */
clientWriteComplete(fd, NULL, 0, COMM_OK, http);
memFree(buf, MEM_CLIENT_SOCK_BUF);
return;
} else if (size == 0) {
/* call clientWriteComplete so the client socket gets closed */
clientWriteComplete(fd, NULL, 0, COMM_OK, http);
memFree(buf, MEM_CLIENT_SOCK_BUF);
return;
}
if (http->out.offset == 0) {
if (Config.onoff.log_mime_hdrs) {
@@ -1937,148 +2039,208 @@
safe_free(http->al.headers.reply);
http->al.headers.reply = xcalloc(k + 1, 1);
xstrncpy(http->al.headers.reply, buf, k);
}
}
rep = clientBuildReply(http, buf, size);
if (rep) {
aclCheck_t *ch;
int rv;
clientMaxBodySize(http->request, http, rep);
if (clientReplyBodyTooLarge(http, rep->content_length)) {
ErrorState *err = errorCon(ERR_TOO_BIG, HTTP_FORBIDDEN);
err->request = requestLink(http->request);
storeUnregister(http->sc, http->entry, http);
http->sc = NULL;
storeUnlockObject(http->entry);
http->entry = clientCreateStoreEntry(http, http->request->method,
null_request_flags);
errorAppendEntry(http->entry, err);
httpReplyDestroy(rep);
+ memFree(buf, MEM_CLIENT_SOCK_BUF);
return;
}
+ /* no data has been sent */
+#ifdef USE_CEGZIP
+ /* check if gzip on content has to be done */
+ /* TODO: check that we are not gziping \r\n\r\n */
+ if (http->compress.gzdata) {
+ char tmpbuf[CLIENT_SOCK_SZ] = {
+ 0x1f, 0x8b, // gzip magick
+ 0x08, // compression method: deflate
+ 0, // flags: is ascii, etc.
+ 0, 0, 0, 0, // mtime of file (now)
+ 0x02, // extra flags: max compression
+ 0x03 // os of compression: UNIX
+ };
+ int avail_zbytes = HTTP_REPLY_BUF_SZ - 10;
+ int zbytes = avail_zbytes; /* zipped bytes */
+ int pbytes = size - rep->hdr_sz; /* plain bytes */
+ debug(85, 3)("clientSendMoreData: (no data sent yet)\n");
+ gzip_data(&http->compress, tmpbuf+10, &zbytes,
+ buf + rep->hdr_sz, &pbytes);
+ zbytes -= avail_zbytes;
+ zbytes += 10; /* header count */
+ size = zbytes + rep->hdr_sz;
+ memcpy(buf + rep->hdr_sz, tmpbuf, zbytes);
+ }
+#endif
body_size = size - rep->hdr_sz;
assert(body_size >= 0);
body_buf = buf + rep->hdr_sz;
http->range_iter.prefix_size = rep->hdr_sz;
debug(33, 3) ("clientSendMoreData: Appending %d bytes after %d bytes of headers\n",
(int) body_size, rep->hdr_sz);
ch = clientAclChecklistCreate(Config.accessList.reply, http);
ch->reply = rep;
rv = aclCheckFast(Config.accessList.reply, ch);
aclChecklistFree(ch);
ch = NULL;
debug(33, 2) ("The reply for %s %s is %s, because it matched '%s'\n",
RequestMethodStr[http->request->method], http->uri,
rv ? "ALLOWED" : "DENIED",
AclMatchedName ? AclMatchedName : "NO ACL's");
if (!rv && rep->sline.status != HTTP_FORBIDDEN
&& !clientAlwaysAllowResponse(rep->sline.status)) {
/* the if above is slightly broken, but there is no way
* to tell if this is a squid generated error page, or one from
* upstream at this point. */
ErrorState *err;
err = errorCon(ERR_ACCESS_DENIED, HTTP_FORBIDDEN);
err->request = requestLink(http->request);
storeUnregister(http->sc, http->entry, http);
http->sc = NULL;
storeUnlockObject(http->entry);
http->entry = clientCreateStoreEntry(http, http->request->method,
null_request_flags);
errorAppendEntry(http->entry, err);
httpReplyDestroy(rep);
+ /*
+ * log with TCP_DENIED, the same as for http_access checks
+ */
+ http->log_type = LOG_TCP_DENIED;
+ memFree(buf, MEM_CLIENT_SOCK_BUF);
return;
}
} else if (size < CLIENT_SOCK_SZ && entry->store_status == STORE_PENDING) {
+ debug(33, 3)("clientSendMoreData: waiting for more to arrive\n");
/* wait for more to arrive */
storeClientCopy(http->sc, entry,
http->out.offset + size,
http->out.offset,
CLIENT_SOCK_SZ,
buf,
clientSendMoreData,
http);
return;
}
/* reset range iterator */
http->range_iter.pos = HttpHdrRangeInitPos;
} else if (!http->request->range) {
/* Avoid copying to MemBuf for non-range requests */
/* Note, if we're here, then 'rep' is known to be NULL */
+ /* queueing data to be sent */
+#ifdef USE_CEGZIP
+ /* compressing data to be sent */
+ if (http->compress.gzdata) {
+ char tmpbuf[CLIENT_SOCK_SZ];
+ int avail_zbytes = HTTP_REPLY_BUF_SZ - size;
+ int zbytes = avail_zbytes;
+ int avail_pbytes = size;
+ int pbytes = avail_pbytes;
+ int orig_size = size;
+ debug(85, 3)("clientSendMoreData: (http->out.offset=%d, "
+ "http->entry->reply->content_length=%d) \n",
+ http->out.offset, http->entry->mem_obj->reply->content_length);
+ gzip_done(&http->compress, tmpbuf, &avail_zbytes, buf, &avail_pbytes);
+ zbytes -= avail_zbytes;
+ pbytes -= avail_pbytes;
+ body_size += zbytes - pbytes;
+ size = zbytes;
+ memcpy(buf, tmpbuf, zbytes);
+ http->entry->mem_obj->inmem_hi += http->compress.zlen - http->compress.plen;
+ }
+#endif
http->out.offset += body_size;
comm_write(fd, buf, size, clientWriteBodyComplete, http, NULL);
/* NULL because clientWriteBodyComplete frees it */
return;
}
if (http->request->method == METHOD_HEAD) {
if (rep) {
/* do not forward body for HEAD replies */
body_size = 0;
http->flags.done_copying = 1;
} else {
/*
* If we are here, then store_status == STORE_OK and it
* seems we have a HEAD repsponse which is missing the
* empty end-of-headers line (home.mira.net, phttpd/0.99.72
* does this). Because clientBuildReply() fails we just
* call this reply a body, set the done_copying flag and
* continue...
*/
http->flags.done_copying = 1;
/*
* And as this is a malformed HTTP reply we cannot keep
* the connection persistent
*/
http->request->flags.proxy_keepalive = 0;
}
}
/* write headers and/or body if any */
assert(rep || (body_buf && body_size));
/* init mb; put status line and headers if any */
if (rep) {
mb = httpReplyPack(rep);
http->out.offset += rep->hdr_sz;
+ debug(85, 3)("clientSendMoreData: packed reply: %d bytes\n", mb.size);
check_size += rep->hdr_sz;
#if HEADERS_LOG
headersLog(0, 0, http->request->method, rep);
#endif
httpReplyDestroy(rep);
rep = NULL;
} else {
memBufDefInit(&mb);
}
/* append body if any */
if (http->request->range) {
/* Only GET requests should have ranges */
assert(http->request->method == METHOD_GET);
/* clientPackMoreRanges() updates http->out.offset */
/* force the end of the transfer if we are done */
if (!clientPackMoreRanges(http, body_buf, body_size, &mb))
http->flags.done_copying = 1;
} else if (body_buf && body_size) {
+ /* TODO: must compress here */
+ debug(33, 5)("clientSendMoreData: appending %d bytes to http->out.offset=%d\n",
+ body_size, http->out.offset);
http->out.offset += body_size;
check_size += body_size;
memBufAppend(&mb, body_buf, body_size);
}
if (!http->request->range && http->request->method == METHOD_GET)
assert(check_size == size);
/* write */
+ debug(33, 3)("clientSendMoreData: queueing mb(%d bytes) and "
+ "clientWriteComplete\n", mb.size);
comm_write_mbuf(fd, mb, clientWriteComplete, http);
/* if we don't do it, who will? */
memFree(buf, MEM_CLIENT_SOCK_BUF);
}
/*
* clientWriteBodyComplete is called for MEM_CLIENT_SOCK_BUF's
* written directly to the client socket, versus copying to a MemBuf
* and going through comm_write_mbuf. Most non-range responses after
* the headers probably go through here.
*/
static void
clientWriteBodyComplete(int fd, char *buf, size_t size, int errflag, void *data)
{
/*
* NOTE: clientWriteComplete doesn't currently use its "buf"
* (second) argument, so we pass in NULL.
*/
clientWriteComplete(fd, NULL, size, errflag, data);
memFree(buf, MEM_CLIENT_SOCK_BUF);
--- squid-2.5.STABLE3/src/structs.h Sat May 10 19:17:44 2003
+++ squid-2.5.STABLE3-visolve_tcp_rtsignal-gzip/src/structs.h Wed Aug 20 14:40:29 2003
@@ -575,87 +575,92 @@
#if HTTP_VIOLATIONS
int reload_into_ims;
#endif
int offline;
int redir_rewrites_host;
int prefer_direct;
int nonhierarchical_direct;
int strip_query_terms;
int redirector_bypass;
int ignore_unknown_nameservers;
int client_pconns;
int server_pconns;
#if USE_CACHE_DIGESTS
int digest_generation;
#endif
int log_ip_on_direct;
int ie_refresh;
int vary_ignore_expire;
int pipeline_prefetch;
int request_entities;
+#if USE_CEGZIP
+ int responce_ce_negotiation;
+#endif
} onoff;
acl *aclList;
struct {
acl_access *http;
acl_access *icp;
acl_access *miss;
acl_access *NeverDirect;
acl_access *AlwaysDirect;
acl_access *ASlists;
acl_access *noCache;
#if SQUID_SNMP
acl_access *snmp;
#endif
acl_access *brokenPosts;
#if USE_IDENT
acl_access *identLookup;
#endif
acl_access *redirector;
acl_access *reply;
+#if USE_CEGZIP
+ acl_access *response_ce_gzip;
+#endif
acl_address *outgoing_address;
acl_tos *outgoing_tos;
} accessList;
acl_deny_info_list *denyInfoList;
struct _authConfig {
authScheme *schemes;
int n_allocated;
int n_configured;
} authConfig;
struct {
size_t list_width;
int list_wrap;
char *anon_user;
int passive;
int sanitycheck;
} Ftp;
refresh_t *Refresh;
struct _cacheSwap {
SwapDir *swapDirs;
int n_allocated;
int n_configured;
} cacheSwap;
struct {
char *directory;
} icons;
char *errorDirectory;
struct {
time_t timeout;
int maxtries;
} retry;
struct {
size_t limit;
} MemPools;
#if DELAY_POOLS
delayConfig Delay;
#endif
struct {
int icp_average;
int dns_average;
int http_average;
int icp_min_poll;
int dns_min_poll;
int http_min_poll;
} comm_incoming;
int max_open_disk_fds;
int uri_whitespace;
size_t rangeOffsetLimit;
#if MULTICAST_MISS_STREAM
@@ -1030,77 +1035,92 @@
icp_opcode opcode;
} icp;
struct {
struct in_addr caddr;
size_t size;
log_type code;
int msec;
const char *rfc931;
const char *authuser;
} cache;
struct {
char *request;
char *reply;
} headers;
struct {
const char *method_str;
} private;
HierarchyLogEntry hier;
};
+#if USE_CEGZIP
+struct compress_state {
+ unsigned int level:4; /* compression level: 1-9 */
+ unsigned int offset; /* how many bytes have to skip before to compress */
+ z_stream* gzdata; /* zlib internal state */
+ unsigned crc; /* crc32 */
+ unsigned zlen; /* compressed bytes */
+ unsigned plen; /* plain data bytes: uncompressed length */
+ int deflate_status; /* last deflate return value */
+}; /* so far, only compressing on network writes */
+#endif
+
struct _clientHttpRequest {
ConnStateData *conn;
request_t *request; /* Parsed URL ... */
store_client *sc; /* The store_client we're using */
store_client *old_sc; /* ... for entry to be validated */
char *uri;
char *log_uri;
struct {
off_t offset;
size_t size;
} out;
HttpHdrRangeIter range_iter; /* data for iterating thru range specs */
size_t req_sz; /* raw request size on input, not current request size */
StoreEntry *entry;
StoreEntry *old_entry;
log_type log_type;
#if USE_CACHE_DIGESTS
const char *lookup_type; /* temporary hack: storeGet() result: HIT/MISS/NONE */
#endif
struct timeval start;
http_version_t http_ver;
int redirect_state;
aclCheck_t *acl_checklist; /* need ptr back so we can unreg if needed */
clientHttpRequest *next;
AccessLogEntry al;
struct {
unsigned int accel:1;
unsigned int internal:1;
unsigned int done_copying:1;
unsigned int purging:1;
} flags;
struct {
http_status status;
char *location;
} redirect;
dlink_node active;
size_t maxBodySize;
+#if USE_CEGZIP
+ struct compress_state compress;
+#endif
};
struct _ConnStateData {
int fd;
struct {
char *buf;
off_t offset;
size_t size;
} in;
struct {
size_t size_left; /* How much body left to process */
request_t *request; /* Parameters passed to clientReadBody */
char *buf;
size_t bufsize;
CBCB *callback;
void *cbdata;
} body;
auth_type_t auth_type; /* Is this connection based authentication ? if so
* what type it is. */
/* note this is ONLY connection based because NTLM is against HTTP spec */
Received on Wed Aug 20 2003 - 12:58:21 MDT
This archive was generated by hypermail pre-2.1.9 : Tue Dec 09 2003 - 16:20:29 MST