|
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ |
|
2 /* This Source Code Form is subject to the terms of the Mozilla Public |
|
3 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
5 |
|
6 // HttpLog.h should generally be included first |
|
7 #include "HttpLog.h" |
|
8 |
|
9 #include "nsHttpPipeline.h" |
|
10 #include "nsHttpHandler.h" |
|
11 #include "nsIOService.h" |
|
12 #include "nsISocketTransport.h" |
|
13 #include "nsIPipe.h" |
|
14 #include "nsCOMPtr.h" |
|
15 #include <algorithm> |
|
16 #include "nsHttpRequestHead.h" |
|
17 |
|
18 #ifdef DEBUG |
|
19 #include "prthread.h" |
|
20 // defined by the socket transport service while active |
|
21 extern PRThread *gSocketThread; |
|
22 #endif |
|
23 |
|
24 namespace mozilla { |
|
25 namespace net { |
|
26 |
|
27 //----------------------------------------------------------------------------- |
|
28 // nsHttpPushBackWriter |
|
29 //----------------------------------------------------------------------------- |
|
30 |
|
31 class nsHttpPushBackWriter : public nsAHttpSegmentWriter |
|
32 { |
|
33 public: |
|
34 nsHttpPushBackWriter(const char *buf, uint32_t bufLen) |
|
35 : mBuf(buf) |
|
36 , mBufLen(bufLen) |
|
37 { } |
|
38 virtual ~nsHttpPushBackWriter() {} |
|
39 |
|
40 nsresult OnWriteSegment(char *buf, uint32_t count, uint32_t *countWritten) |
|
41 { |
|
42 if (mBufLen == 0) |
|
43 return NS_BASE_STREAM_CLOSED; |
|
44 |
|
45 if (count > mBufLen) |
|
46 count = mBufLen; |
|
47 |
|
48 memcpy(buf, mBuf, count); |
|
49 |
|
50 mBuf += count; |
|
51 mBufLen -= count; |
|
52 *countWritten = count; |
|
53 return NS_OK; |
|
54 } |
|
55 |
|
56 private: |
|
57 const char *mBuf; |
|
58 uint32_t mBufLen; |
|
59 }; |
|
60 |
|
61 //----------------------------------------------------------------------------- |
|
62 // nsHttpPipeline <public> |
|
63 //----------------------------------------------------------------------------- |
|
64 |
|
65 nsHttpPipeline::nsHttpPipeline() |
|
66 : mConnection(nullptr) |
|
67 , mStatus(NS_OK) |
|
68 , mRequestIsPartial(false) |
|
69 , mResponseIsPartial(false) |
|
70 , mClosed(false) |
|
71 , mUtilizedPipeline(false) |
|
72 , mPushBackBuf(nullptr) |
|
73 , mPushBackLen(0) |
|
74 , mPushBackMax(0) |
|
75 , mHttp1xTransactionCount(0) |
|
76 , mReceivingFromProgress(0) |
|
77 , mSendingToProgress(0) |
|
78 , mSuppressSendEvents(true) |
|
79 { |
|
80 } |
|
81 |
|
82 nsHttpPipeline::~nsHttpPipeline() |
|
83 { |
|
84 // make sure we aren't still holding onto any transactions! |
|
85 Close(NS_ERROR_ABORT); |
|
86 |
|
87 NS_IF_RELEASE(mConnection); |
|
88 |
|
89 if (mPushBackBuf) |
|
90 free(mPushBackBuf); |
|
91 } |
|
92 |
|
93 // Generate a shuffled request ordering sequence |
|
94 void |
|
95 nsHttpPipeline::ShuffleTransOrder(uint32_t count) |
|
96 { |
|
97 if (count < 2) |
|
98 return; |
|
99 |
|
100 uint32_t pos = mRequestQ[0]->PipelinePosition(); |
|
101 uint32_t i = 0; |
|
102 |
|
103 for (i=0; i < count; ++i) { |
|
104 uint32_t ridx = rand() % count; |
|
105 |
|
106 nsAHttpTransaction *tmp = mRequestQ[i]; |
|
107 mRequestQ[i] = mRequestQ[ridx]; |
|
108 mRequestQ[ridx] = tmp; |
|
109 } |
|
110 |
|
111 for (i=0; i < count; ++i) { |
|
112 mRequestQ[i]->SetPipelinePosition(pos); |
|
113 pos++; |
|
114 } |
|
115 |
|
116 LOG(("nsHttpPipeline::ShuffleTransOrder: Shuffled %d transactions.\n", count)); |
|
117 } |
|
118 |
|
119 nsresult |
|
120 nsHttpPipeline::AddTransaction(nsAHttpTransaction *trans) |
|
121 { |
|
122 LOG(("nsHttpPipeline::AddTransaction [this=%p trans=%x]\n", this, trans)); |
|
123 |
|
124 if (mRequestQ.Length() || mResponseQ.Length()) |
|
125 mUtilizedPipeline = true; |
|
126 |
|
127 NS_ADDREF(trans); |
|
128 mRequestQ.AppendElement(trans); |
|
129 uint32_t qlen = PipelineDepth(); |
|
130 |
|
131 if (qlen != 1) { |
|
132 trans->SetPipelinePosition(qlen); |
|
133 } |
|
134 else { |
|
135 // do it for this case in case an idempotent cancellation |
|
136 // is being repeated and an old value needs to be cleared |
|
137 trans->SetPipelinePosition(0); |
|
138 } |
|
139 |
|
140 // trans->SetConnection() needs to be updated to point back at |
|
141 // the pipeline object. |
|
142 trans->SetConnection(this); |
|
143 |
|
144 ShuffleTransOrder(mRequestQ.Length()); |
|
145 |
|
146 if (mConnection && !mClosed && mRequestQ.Length() == 1) |
|
147 mConnection->ResumeSend(); |
|
148 |
|
149 return NS_OK; |
|
150 } |
|
151 |
|
152 uint32_t |
|
153 nsHttpPipeline::PipelineDepth() |
|
154 { |
|
155 return mRequestQ.Length() + mResponseQ.Length(); |
|
156 } |
|
157 |
|
158 nsresult |
|
159 nsHttpPipeline::SetPipelinePosition(int32_t position) |
|
160 { |
|
161 nsAHttpTransaction *trans = Response(0); |
|
162 if (trans) |
|
163 return trans->SetPipelinePosition(position); |
|
164 return NS_OK; |
|
165 } |
|
166 |
|
167 int32_t |
|
168 nsHttpPipeline::PipelinePosition() |
|
169 { |
|
170 nsAHttpTransaction *trans = Response(0); |
|
171 if (trans) |
|
172 return trans->PipelinePosition(); |
|
173 |
|
174 // The response queue is empty, so return oldest request |
|
175 if (mRequestQ.Length()) |
|
176 return Request(mRequestQ.Length() - 1)->PipelinePosition(); |
|
177 |
|
178 // No transactions in the pipeline |
|
179 return 0; |
|
180 } |
|
181 |
|
182 nsHttpPipeline * |
|
183 nsHttpPipeline::QueryPipeline() |
|
184 { |
|
185 return this; |
|
186 } |
|
187 |
|
188 //----------------------------------------------------------------------------- |
|
189 // nsHttpPipeline::nsISupports |
|
190 //----------------------------------------------------------------------------- |
|
191 |
|
192 NS_IMPL_ADDREF(nsHttpPipeline) |
|
193 NS_IMPL_RELEASE(nsHttpPipeline) |
|
194 |
|
195 // multiple inheritance fun :-) |
|
196 NS_INTERFACE_MAP_BEGIN(nsHttpPipeline) |
|
197 NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, nsAHttpConnection) |
|
198 NS_INTERFACE_MAP_END |
|
199 |
|
200 |
|
201 //----------------------------------------------------------------------------- |
|
202 // nsHttpPipeline::nsAHttpConnection |
|
203 //----------------------------------------------------------------------------- |
|
204 |
|
205 nsresult |
|
206 nsHttpPipeline::OnHeadersAvailable(nsAHttpTransaction *trans, |
|
207 nsHttpRequestHead *requestHead, |
|
208 nsHttpResponseHead *responseHead, |
|
209 bool *reset) |
|
210 { |
|
211 LOG(("nsHttpPipeline::OnHeadersAvailable [this=%p]\n", this)); |
|
212 |
|
213 MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread); |
|
214 MOZ_ASSERT(mConnection, "no connection"); |
|
215 |
|
216 nsRefPtr<nsHttpConnectionInfo> ci; |
|
217 GetConnectionInfo(getter_AddRefs(ci)); |
|
218 MOZ_ASSERT(ci); |
|
219 |
|
220 bool pipeliningBefore = gHttpHandler->ConnMgr()->SupportsPipelining(ci); |
|
221 |
|
222 // trans has now received its response headers; forward to the real connection |
|
223 nsresult rv = mConnection->OnHeadersAvailable(trans, |
|
224 requestHead, |
|
225 responseHead, |
|
226 reset); |
|
227 |
|
228 if (!pipeliningBefore && gHttpHandler->ConnMgr()->SupportsPipelining(ci)) |
|
229 // The received headers have expanded the eligible |
|
230 // pipeline depth for this connection |
|
231 gHttpHandler->ConnMgr()->ProcessPendingQForEntry(ci); |
|
232 |
|
233 return rv; |
|
234 } |
|
235 |
|
236 void |
|
237 nsHttpPipeline::CloseTransaction(nsAHttpTransaction *trans, nsresult reason) |
|
238 { |
|
239 LOG(("nsHttpPipeline::CloseTransaction [this=%p trans=%x reason=%x]\n", |
|
240 this, trans, reason)); |
|
241 |
|
242 MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread); |
|
243 MOZ_ASSERT(NS_FAILED(reason), "expecting failure code"); |
|
244 |
|
245 // the specified transaction is to be closed with the given "reason" |
|
246 |
|
247 int32_t index; |
|
248 bool killPipeline = false; |
|
249 |
|
250 index = mRequestQ.IndexOf(trans); |
|
251 if (index >= 0) { |
|
252 if (index == 0 && mRequestIsPartial) { |
|
253 // the transaction is in the request queue. check to see if any of |
|
254 // its data has been written out yet. |
|
255 killPipeline = true; |
|
256 } |
|
257 mRequestQ.RemoveElementAt(index); |
|
258 } |
|
259 else { |
|
260 index = mResponseQ.IndexOf(trans); |
|
261 if (index >= 0) |
|
262 mResponseQ.RemoveElementAt(index); |
|
263 // while we could avoid killing the pipeline if this transaction is the |
|
264 // last transaction in the pipeline, there doesn't seem to be that much |
|
265 // value in doing so. most likely if this transaction is going away, |
|
266 // the others will be shortly as well. |
|
267 killPipeline = true; |
|
268 } |
|
269 |
|
270 // Marking this connection as non-reusable prevents other items from being |
|
271 // added to it and causes it to be torn down soon. |
|
272 DontReuse(); |
|
273 |
|
274 trans->Close(reason); |
|
275 NS_RELEASE(trans); |
|
276 |
|
277 if (killPipeline) { |
|
278 // reschedule anything from this pipeline onto a different connection |
|
279 CancelPipeline(reason); |
|
280 } |
|
281 |
|
282 // If all the transactions have been removed then we can close the connection |
|
283 // right away. |
|
284 if (!mRequestQ.Length() && !mResponseQ.Length() && mConnection) |
|
285 mConnection->CloseTransaction(this, reason); |
|
286 } |
|
287 |
|
288 nsresult |
|
289 nsHttpPipeline::TakeTransport(nsISocketTransport **aTransport, |
|
290 nsIAsyncInputStream **aInputStream, |
|
291 nsIAsyncOutputStream **aOutputStream) |
|
292 { |
|
293 return mConnection->TakeTransport(aTransport, aInputStream, aOutputStream); |
|
294 } |
|
295 |
|
296 bool |
|
297 nsHttpPipeline::IsPersistent() |
|
298 { |
|
299 return true; // pipelining requires this |
|
300 } |
|
301 |
|
302 bool |
|
303 nsHttpPipeline::IsReused() |
|
304 { |
|
305 if (!mUtilizedPipeline && mConnection) |
|
306 return mConnection->IsReused(); |
|
307 return true; |
|
308 } |
|
309 |
|
310 void |
|
311 nsHttpPipeline::DontReuse() |
|
312 { |
|
313 if (mConnection) |
|
314 mConnection->DontReuse(); |
|
315 } |
|
316 |
|
317 nsresult |
|
318 nsHttpPipeline::PushBack(const char *data, uint32_t length) |
|
319 { |
|
320 LOG(("nsHttpPipeline::PushBack [this=%p len=%u]\n", this, length)); |
|
321 |
|
322 MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread); |
|
323 MOZ_ASSERT(mPushBackLen == 0, "push back buffer already has data!"); |
|
324 |
|
325 // If we have no chance for a pipeline (e.g. due to an Upgrade) |
|
326 // then push this data down to original connection |
|
327 if (!mConnection->IsPersistent()) |
|
328 return mConnection->PushBack(data, length); |
|
329 |
|
330 // PushBack is called recursively from WriteSegments |
|
331 |
|
332 // XXX we have a design decision to make here. either we buffer the data |
|
333 // and process it when we return to WriteSegments, or we attempt to move |
|
334 // onto the next transaction from here. doing so adds complexity with the |
|
335 // benefit of eliminating the extra buffer copy. the buffer is at most |
|
336 // 4096 bytes, so it is really unclear if there is any value in the added |
|
337 // complexity. besides simplicity, buffering this data has the advantage |
|
338 // that we'll call close on the transaction sooner, which will wake up |
|
339 // the HTTP channel sooner to continue with its work. |
|
340 |
|
341 if (!mPushBackBuf) { |
|
342 mPushBackMax = length; |
|
343 mPushBackBuf = (char *) malloc(mPushBackMax); |
|
344 if (!mPushBackBuf) |
|
345 return NS_ERROR_OUT_OF_MEMORY; |
|
346 } |
|
347 else if (length > mPushBackMax) { |
|
348 // grow push back buffer as necessary. |
|
349 MOZ_ASSERT(length <= nsIOService::gDefaultSegmentSize, "too big"); |
|
350 mPushBackMax = length; |
|
351 mPushBackBuf = (char *) realloc(mPushBackBuf, mPushBackMax); |
|
352 if (!mPushBackBuf) |
|
353 return NS_ERROR_OUT_OF_MEMORY; |
|
354 } |
|
355 |
|
356 memcpy(mPushBackBuf, data, length); |
|
357 mPushBackLen = length; |
|
358 |
|
359 return NS_OK; |
|
360 } |
|
361 |
|
362 nsHttpConnection * |
|
363 nsHttpPipeline::TakeHttpConnection() |
|
364 { |
|
365 if (mConnection) |
|
366 return mConnection->TakeHttpConnection(); |
|
367 return nullptr; |
|
368 } |
|
369 |
|
370 nsAHttpTransaction::Classifier |
|
371 nsHttpPipeline::Classification() |
|
372 { |
|
373 if (mConnection) |
|
374 return mConnection->Classification(); |
|
375 |
|
376 LOG(("nsHttpPipeline::Classification this=%p " |
|
377 "has null mConnection using CLASS_SOLO default", this)); |
|
378 return nsAHttpTransaction::CLASS_SOLO; |
|
379 } |
|
380 |
|
381 void |
|
382 nsHttpPipeline::SetProxyConnectFailed() |
|
383 { |
|
384 nsAHttpTransaction *trans = Request(0); |
|
385 |
|
386 if (trans) |
|
387 trans->SetProxyConnectFailed(); |
|
388 } |
|
389 |
|
390 nsHttpRequestHead * |
|
391 nsHttpPipeline::RequestHead() |
|
392 { |
|
393 nsAHttpTransaction *trans = Request(0); |
|
394 |
|
395 if (trans) |
|
396 return trans->RequestHead(); |
|
397 return nullptr; |
|
398 } |
|
399 |
|
400 uint32_t |
|
401 nsHttpPipeline::Http1xTransactionCount() |
|
402 { |
|
403 return mHttp1xTransactionCount; |
|
404 } |
|
405 |
|
406 nsresult |
|
407 nsHttpPipeline::TakeSubTransactions( |
|
408 nsTArray<nsRefPtr<nsAHttpTransaction> > &outTransactions) |
|
409 { |
|
410 LOG(("nsHttpPipeline::TakeSubTransactions [this=%p]\n", this)); |
|
411 |
|
412 if (mResponseQ.Length() || mRequestIsPartial) |
|
413 return NS_ERROR_ALREADY_OPENED; |
|
414 |
|
415 int32_t i, count = mRequestQ.Length(); |
|
416 for (i = 0; i < count; ++i) { |
|
417 nsAHttpTransaction *trans = Request(i); |
|
418 // set the transaction conneciton object back to the underlying |
|
419 // nsHttpConnectionHandle |
|
420 trans->SetConnection(mConnection); |
|
421 outTransactions.AppendElement(trans); |
|
422 NS_RELEASE(trans); |
|
423 } |
|
424 mRequestQ.Clear(); |
|
425 |
|
426 LOG((" took %d\n", count)); |
|
427 return NS_OK; |
|
428 } |
|
429 |
|
430 //----------------------------------------------------------------------------- |
|
431 // nsHttpPipeline::nsAHttpTransaction |
|
432 //----------------------------------------------------------------------------- |
|
433 |
|
434 void |
|
435 nsHttpPipeline::SetConnection(nsAHttpConnection *conn) |
|
436 { |
|
437 LOG(("nsHttpPipeline::SetConnection [this=%p conn=%x]\n", this, conn)); |
|
438 |
|
439 MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread); |
|
440 MOZ_ASSERT(!mConnection, "already have a connection"); |
|
441 |
|
442 NS_IF_ADDREF(mConnection = conn); |
|
443 } |
|
444 |
|
445 nsAHttpConnection * |
|
446 nsHttpPipeline::Connection() |
|
447 { |
|
448 LOG(("nsHttpPipeline::Connection [this=%p conn=%x]\n", this, mConnection)); |
|
449 |
|
450 MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread); |
|
451 return mConnection; |
|
452 } |
|
453 |
|
454 void |
|
455 nsHttpPipeline::GetSecurityCallbacks(nsIInterfaceRequestor **result) |
|
456 { |
|
457 MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread); |
|
458 |
|
459 // depending on timing this could be either the request or the response |
|
460 // that is needed - but they both go to the same host. A request for these |
|
461 // callbacks directly in nsHttpTransaction would not make a distinction |
|
462 // over whether the the request had been transmitted yet. |
|
463 nsAHttpTransaction *trans = Request(0); |
|
464 if (!trans) |
|
465 trans = Response(0); |
|
466 if (trans) |
|
467 trans->GetSecurityCallbacks(result); |
|
468 else { |
|
469 *result = nullptr; |
|
470 } |
|
471 } |
|
472 |
|
473 void |
|
474 nsHttpPipeline::OnTransportStatus(nsITransport* transport, |
|
475 nsresult status, uint64_t progress) |
|
476 { |
|
477 LOG(("nsHttpPipeline::OnStatus [this=%p status=%x progress=%llu]\n", |
|
478 this, status, progress)); |
|
479 |
|
480 MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread); |
|
481 |
|
482 nsAHttpTransaction *trans; |
|
483 int32_t i, count; |
|
484 |
|
485 switch (status) { |
|
486 |
|
487 case NS_NET_STATUS_RESOLVING_HOST: |
|
488 case NS_NET_STATUS_RESOLVED_HOST: |
|
489 case NS_NET_STATUS_CONNECTING_TO: |
|
490 case NS_NET_STATUS_CONNECTED_TO: |
|
491 // These should only appear at most once per pipeline. |
|
492 // Deliver to the first transaction. |
|
493 |
|
494 trans = Request(0); |
|
495 if (!trans) |
|
496 trans = Response(0); |
|
497 if (trans) |
|
498 trans->OnTransportStatus(transport, status, progress); |
|
499 |
|
500 break; |
|
501 |
|
502 case NS_NET_STATUS_SENDING_TO: |
|
503 // This is generated by the socket transport when (part) of |
|
504 // a transaction is written out |
|
505 // |
|
506 // In pipelining this is generated out of FillSendBuf(), but it cannot do |
|
507 // so until the connection is confirmed by CONNECTED_TO. |
|
508 // See patch for bug 196827. |
|
509 // |
|
510 |
|
511 if (mSuppressSendEvents) { |
|
512 mSuppressSendEvents = false; |
|
513 |
|
514 // catch up by sending the event to all the transactions that have |
|
515 // moved from request to response and any that have been partially |
|
516 // sent. Also send WAITING_FOR to those that were completely sent |
|
517 count = mResponseQ.Length(); |
|
518 for (i = 0; i < count; ++i) { |
|
519 Response(i)->OnTransportStatus(transport, |
|
520 NS_NET_STATUS_SENDING_TO, |
|
521 progress); |
|
522 Response(i)->OnTransportStatus(transport, |
|
523 NS_NET_STATUS_WAITING_FOR, |
|
524 progress); |
|
525 } |
|
526 if (mRequestIsPartial && Request(0)) |
|
527 Request(0)->OnTransportStatus(transport, |
|
528 NS_NET_STATUS_SENDING_TO, |
|
529 progress); |
|
530 mSendingToProgress = progress; |
|
531 } |
|
532 // otherwise ignore it |
|
533 break; |
|
534 |
|
535 case NS_NET_STATUS_WAITING_FOR: |
|
536 // Created by nsHttpConnection when request pipeline has been totally |
|
537 // sent. Ignore it here because it is simulated in FillSendBuf() when |
|
538 // a request is moved from request to response. |
|
539 |
|
540 // ignore it |
|
541 break; |
|
542 |
|
543 case NS_NET_STATUS_RECEIVING_FROM: |
|
544 // Forward this only to the transaction currently recieving data. It is |
|
545 // normally generated by the socket transport, but can also |
|
546 // be repeated by the pushbackwriter if necessary. |
|
547 mReceivingFromProgress = progress; |
|
548 if (Response(0)) |
|
549 Response(0)->OnTransportStatus(transport, status, progress); |
|
550 break; |
|
551 |
|
552 default: |
|
553 // forward other notifications to all request transactions |
|
554 count = mRequestQ.Length(); |
|
555 for (i = 0; i < count; ++i) |
|
556 Request(i)->OnTransportStatus(transport, status, progress); |
|
557 break; |
|
558 } |
|
559 } |
|
560 |
|
561 bool |
|
562 nsHttpPipeline::IsDone() |
|
563 { |
|
564 bool done = true; |
|
565 |
|
566 uint32_t i, count = mRequestQ.Length(); |
|
567 for (i = 0; done && (i < count); i++) |
|
568 done = Request(i)->IsDone(); |
|
569 |
|
570 count = mResponseQ.Length(); |
|
571 for (i = 0; done && (i < count); i++) |
|
572 done = Response(i)->IsDone(); |
|
573 |
|
574 return done; |
|
575 } |
|
576 |
|
577 nsresult |
|
578 nsHttpPipeline::Status() |
|
579 { |
|
580 return mStatus; |
|
581 } |
|
582 |
|
583 uint32_t |
|
584 nsHttpPipeline::Caps() |
|
585 { |
|
586 nsAHttpTransaction *trans = Request(0); |
|
587 if (!trans) |
|
588 trans = Response(0); |
|
589 |
|
590 return trans ? trans->Caps() : 0; |
|
591 } |
|
592 |
|
593 void |
|
594 nsHttpPipeline::SetDNSWasRefreshed() |
|
595 { |
|
596 nsAHttpTransaction *trans = Request(0); |
|
597 if (!trans) |
|
598 trans = Response(0); |
|
599 |
|
600 if (trans) |
|
601 trans->SetDNSWasRefreshed(); |
|
602 } |
|
603 |
|
604 uint64_t |
|
605 nsHttpPipeline::Available() |
|
606 { |
|
607 uint64_t result = 0; |
|
608 |
|
609 int32_t i, count = mRequestQ.Length(); |
|
610 for (i=0; i<count; ++i) |
|
611 result += Request(i)->Available(); |
|
612 return result; |
|
613 } |
|
614 |
|
615 NS_METHOD |
|
616 nsHttpPipeline::ReadFromPipe(nsIInputStream *stream, |
|
617 void *closure, |
|
618 const char *buf, |
|
619 uint32_t offset, |
|
620 uint32_t count, |
|
621 uint32_t *countRead) |
|
622 { |
|
623 nsHttpPipeline *self = (nsHttpPipeline *) closure; |
|
624 return self->mReader->OnReadSegment(buf, count, countRead); |
|
625 } |
|
626 |
|
627 nsresult |
|
628 nsHttpPipeline::ReadSegments(nsAHttpSegmentReader *reader, |
|
629 uint32_t count, |
|
630 uint32_t *countRead) |
|
631 { |
|
632 LOG(("nsHttpPipeline::ReadSegments [this=%p count=%u]\n", this, count)); |
|
633 |
|
634 MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread); |
|
635 |
|
636 if (mClosed) { |
|
637 *countRead = 0; |
|
638 return mStatus; |
|
639 } |
|
640 |
|
641 nsresult rv; |
|
642 uint64_t avail = 0; |
|
643 if (mSendBufIn) { |
|
644 rv = mSendBufIn->Available(&avail); |
|
645 if (NS_FAILED(rv)) return rv; |
|
646 } |
|
647 |
|
648 if (avail == 0) { |
|
649 rv = FillSendBuf(); |
|
650 if (NS_FAILED(rv)) return rv; |
|
651 |
|
652 rv = mSendBufIn->Available(&avail); |
|
653 if (NS_FAILED(rv)) return rv; |
|
654 |
|
655 // return EOF if send buffer is empty |
|
656 if (avail == 0) { |
|
657 *countRead = 0; |
|
658 return NS_OK; |
|
659 } |
|
660 } |
|
661 |
|
662 // read no more than what was requested |
|
663 if (avail > count) |
|
664 avail = count; |
|
665 |
|
666 mReader = reader; |
|
667 |
|
668 // avail is under 4GB, so casting to uint32_t is safe |
|
669 rv = mSendBufIn->ReadSegments(ReadFromPipe, this, (uint32_t)avail, countRead); |
|
670 |
|
671 mReader = nullptr; |
|
672 return rv; |
|
673 } |
|
674 |
|
675 nsresult |
|
676 nsHttpPipeline::WriteSegments(nsAHttpSegmentWriter *writer, |
|
677 uint32_t count, |
|
678 uint32_t *countWritten) |
|
679 { |
|
680 LOG(("nsHttpPipeline::WriteSegments [this=%p count=%u]\n", this, count)); |
|
681 |
|
682 MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread); |
|
683 |
|
684 if (mClosed) |
|
685 return NS_SUCCEEDED(mStatus) ? NS_BASE_STREAM_CLOSED : mStatus; |
|
686 |
|
687 nsAHttpTransaction *trans; |
|
688 nsresult rv; |
|
689 |
|
690 trans = Response(0); |
|
691 // This code deals with the establishment of a CONNECT tunnel through |
|
692 // an HTTP proxy. It allows the connection to do the CONNECT/200 |
|
693 // HTTP transaction to establish a tunnel as a precursor to the |
|
694 // actual pipeline of regular HTTP transactions. |
|
695 if (!trans && mRequestQ.Length() && |
|
696 mConnection->IsProxyConnectInProgress()) { |
|
697 LOG(("nsHttpPipeline::WriteSegments [this=%p] Forced Delegation\n", |
|
698 this)); |
|
699 trans = Request(0); |
|
700 } |
|
701 |
|
702 if (!trans) { |
|
703 if (mRequestQ.Length() > 0) |
|
704 rv = NS_BASE_STREAM_WOULD_BLOCK; |
|
705 else |
|
706 rv = NS_BASE_STREAM_CLOSED; |
|
707 } |
|
708 else { |
|
709 // |
|
710 // ask the transaction to consume data from the connection. |
|
711 // PushBack may be called recursively. |
|
712 // |
|
713 rv = trans->WriteSegments(writer, count, countWritten); |
|
714 |
|
715 if (rv == NS_BASE_STREAM_CLOSED || trans->IsDone()) { |
|
716 trans->Close(NS_OK); |
|
717 |
|
718 // Release the transaction if it is not IsProxyConnectInProgress() |
|
719 if (trans == Response(0)) { |
|
720 NS_RELEASE(trans); |
|
721 mResponseQ.RemoveElementAt(0); |
|
722 mResponseIsPartial = false; |
|
723 ++mHttp1xTransactionCount; |
|
724 } |
|
725 |
|
726 // ask the connection manager to add additional transactions |
|
727 // to our pipeline. |
|
728 nsRefPtr<nsHttpConnectionInfo> ci; |
|
729 GetConnectionInfo(getter_AddRefs(ci)); |
|
730 if (ci) |
|
731 gHttpHandler->ConnMgr()->ProcessPendingQForEntry(ci); |
|
732 } |
|
733 else |
|
734 mResponseIsPartial = true; |
|
735 } |
|
736 |
|
737 if (mPushBackLen) { |
|
738 nsHttpPushBackWriter writer(mPushBackBuf, mPushBackLen); |
|
739 uint32_t len = mPushBackLen, n; |
|
740 mPushBackLen = 0; |
|
741 |
|
742 // This progress notification has previously been sent from |
|
743 // the socket transport code, but it was delivered to the |
|
744 // previous transaction on the pipeline. |
|
745 nsITransport *transport = Transport(); |
|
746 if (transport) |
|
747 OnTransportStatus(transport, NS_NET_STATUS_RECEIVING_FROM, |
|
748 mReceivingFromProgress); |
|
749 |
|
750 // the push back buffer is never larger than NS_HTTP_SEGMENT_SIZE, |
|
751 // so we are guaranteed that the next response will eat the entire |
|
752 // push back buffer (even though it might again call PushBack). |
|
753 rv = WriteSegments(&writer, len, &n); |
|
754 } |
|
755 |
|
756 return rv; |
|
757 } |
|
758 |
|
759 uint32_t |
|
760 nsHttpPipeline::CancelPipeline(nsresult originalReason) |
|
761 { |
|
762 uint32_t i, reqLen, respLen, total; |
|
763 nsAHttpTransaction *trans; |
|
764 |
|
765 reqLen = mRequestQ.Length(); |
|
766 respLen = mResponseQ.Length(); |
|
767 total = reqLen + respLen; |
|
768 |
|
769 // don't count the first response, if presnet |
|
770 if (respLen) |
|
771 total--; |
|
772 |
|
773 if (!total) |
|
774 return 0; |
|
775 |
|
776 // any pending requests can ignore this error and be restarted |
|
777 // unless it is during a CONNECT tunnel request |
|
778 for (i = 0; i < reqLen; ++i) { |
|
779 trans = Request(i); |
|
780 if (mConnection && mConnection->IsProxyConnectInProgress()) |
|
781 trans->Close(originalReason); |
|
782 else |
|
783 trans->Close(NS_ERROR_NET_RESET); |
|
784 NS_RELEASE(trans); |
|
785 } |
|
786 mRequestQ.Clear(); |
|
787 |
|
788 // any pending responses can be restarted except for the first one, |
|
789 // that we might want to finish on this pipeline or cancel individually. |
|
790 // Higher levels of callers ensure that we don't process non-idempotent |
|
791 // tranasction with the NS_HTTP_ALLOW_PIPELINING bit set |
|
792 for (i = 1; i < respLen; ++i) { |
|
793 trans = Response(i); |
|
794 trans->Close(NS_ERROR_NET_RESET); |
|
795 NS_RELEASE(trans); |
|
796 } |
|
797 |
|
798 if (respLen > 1) |
|
799 mResponseQ.TruncateLength(1); |
|
800 |
|
801 /* Don't flag timed out connections as unreusable.. Tor is just slow :( */ |
|
802 if (originalReason != NS_ERROR_NET_TIMEOUT) { |
|
803 DontReuse(); |
|
804 Classify(nsAHttpTransaction::CLASS_SOLO); |
|
805 } |
|
806 |
|
807 return total; |
|
808 } |
|
809 |
|
810 void |
|
811 nsHttpPipeline::Close(nsresult reason) |
|
812 { |
|
813 LOG(("nsHttpPipeline::Close [this=%p reason=%x]\n", this, reason)); |
|
814 |
|
815 if (mClosed) { |
|
816 LOG((" already closed\n")); |
|
817 return; |
|
818 } |
|
819 |
|
820 // the connection is going away! |
|
821 mStatus = reason; |
|
822 mClosed = true; |
|
823 |
|
824 nsRefPtr<nsHttpConnectionInfo> ci; |
|
825 GetConnectionInfo(getter_AddRefs(ci)); |
|
826 uint32_t numRescheduled = CancelPipeline(reason); |
|
827 |
|
828 // numRescheduled can be 0 if there is just a single response in the |
|
829 // pipeline object. That isn't really a meaningful pipeline that |
|
830 // has been forced to be rescheduled so it does not need to generate |
|
831 // negative feedback. |
|
832 if (ci && numRescheduled) |
|
833 gHttpHandler->ConnMgr()->PipelineFeedbackInfo( |
|
834 ci, nsHttpConnectionMgr::RedCanceledPipeline, nullptr, 0); |
|
835 |
|
836 nsAHttpTransaction *trans = Response(0); |
|
837 if (!trans) |
|
838 return; |
|
839 |
|
840 // The current transaction can be restarted via reset |
|
841 // if the response has not started to arrive and the reason |
|
842 // for failure is innocuous (e.g. not an SSL error) |
|
843 if (!mResponseIsPartial && |
|
844 (reason == NS_ERROR_NET_RESET || |
|
845 reason == NS_OK || |
|
846 reason == NS_ERROR_NET_TIMEOUT || |
|
847 reason == NS_BASE_STREAM_CLOSED)) { |
|
848 trans->Close(NS_ERROR_NET_RESET); |
|
849 } |
|
850 else { |
|
851 trans->Close(reason); |
|
852 } |
|
853 |
|
854 NS_RELEASE(trans); |
|
855 mResponseQ.Clear(); |
|
856 } |
|
857 |
|
858 nsresult |
|
859 nsHttpPipeline::OnReadSegment(const char *segment, |
|
860 uint32_t count, |
|
861 uint32_t *countRead) |
|
862 { |
|
863 return mSendBufOut->Write(segment, count, countRead); |
|
864 } |
|
865 |
|
866 nsresult |
|
867 nsHttpPipeline::FillSendBuf() |
|
868 { |
|
869 // reads from request queue, moving transactions to response queue |
|
870 // when they have been completely read. |
|
871 |
|
872 nsresult rv; |
|
873 |
|
874 if (!mSendBufIn) { |
|
875 // allocate a single-segment pipe |
|
876 rv = NS_NewPipe(getter_AddRefs(mSendBufIn), |
|
877 getter_AddRefs(mSendBufOut), |
|
878 nsIOService::gDefaultSegmentSize, /* segment size */ |
|
879 nsIOService::gDefaultSegmentSize, /* max size */ |
|
880 true, true); |
|
881 if (NS_FAILED(rv)) return rv; |
|
882 } |
|
883 |
|
884 uint32_t n; |
|
885 uint64_t avail; |
|
886 uint64_t totalSent = 0; |
|
887 uint64_t reqsSent = 0; |
|
888 uint64_t alreadyPending = 0; |
|
889 |
|
890 mSendBufIn->Available(&alreadyPending); |
|
891 |
|
892 nsAHttpTransaction *trans; |
|
893 nsITransport *transport = Transport(); |
|
894 #ifdef WTF_TEST |
|
895 uint64_t totalAvailable = Available(); |
|
896 nsRefPtr<nsHttpConnectionInfo> ci; |
|
897 GetConnectionInfo(getter_AddRefs(ci)); |
|
898 #endif |
|
899 |
|
900 while ((trans = Request(0)) != nullptr) { |
|
901 avail = trans->Available(); |
|
902 if (avail) { |
|
903 // if there is already a response in the responseq then this |
|
904 // new data comprises a pipeline. Update the transaction in the |
|
905 // response queue to reflect that if necessary. We are now sending |
|
906 // out a request while we haven't received all responses. |
|
907 nsAHttpTransaction *response = Response(0); |
|
908 if (response && !response->PipelinePosition()) |
|
909 response->SetPipelinePosition(1); |
|
910 rv = trans->ReadSegments(this, (uint32_t)std::min(avail, (uint64_t)UINT32_MAX), &n); |
|
911 if (NS_FAILED(rv)) return rv; |
|
912 |
|
913 if (n == 0) { |
|
914 LOG(("send pipe is full")); |
|
915 break; |
|
916 } |
|
917 |
|
918 mSendingToProgress += n; |
|
919 totalSent += n; |
|
920 if (!mSuppressSendEvents && transport) { |
|
921 // Simulate a SENDING_TO event |
|
922 trans->OnTransportStatus(transport, |
|
923 NS_NET_STATUS_SENDING_TO, |
|
924 mSendingToProgress); |
|
925 } |
|
926 } |
|
927 |
|
928 avail = trans->Available(); |
|
929 if (avail == 0) { |
|
930 #ifdef WTF_TEST |
|
931 nsHttpRequestHead *head = trans->RequestHead(); |
|
932 fprintf(stderr, "WTF-order: Pipelined req %d/%d (%dB). Url: %s%s\n", |
|
933 trans->PipelinePosition(), PipelineDepth(), n, |
|
934 ci->Host(), head ? head->RequestURI().BeginReading() : "<unknown?>"); |
|
935 #endif |
|
936 reqsSent++; |
|
937 |
|
938 // move transaction from request queue to response queue |
|
939 mRequestQ.RemoveElementAt(0); |
|
940 mResponseQ.AppendElement(trans); |
|
941 mRequestIsPartial = false; |
|
942 |
|
943 if (!mSuppressSendEvents && transport) { |
|
944 // Simulate a WAITING_FOR event |
|
945 trans->OnTransportStatus(transport, |
|
946 NS_NET_STATUS_WAITING_FOR, |
|
947 mSendingToProgress); |
|
948 } |
|
949 |
|
950 // It would be good to re-enable data read handlers via ResumeRecv() |
|
951 // except the read handler code can be synchronously dispatched on |
|
952 // the stack. |
|
953 } |
|
954 else |
|
955 mRequestIsPartial = true; |
|
956 } |
|
957 |
|
958 #ifdef WTF_TEST |
|
959 if (totalSent) |
|
960 fprintf(stderr, "WTF-combine: Sent %ld/%ld bytes of %ld combined pipelined requests for host %s\n", |
|
961 alreadyPending+totalSent, totalAvailable, reqsSent, ci->Host()); |
|
962 #endif |
|
963 |
|
964 return NS_OK; |
|
965 } |
|
966 |
|
967 } // namespace mozilla::net |
|
968 } // namespace mozilla |