1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/netwerk/protocol/http/nsHttpPipeline.cpp Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,968 @@ 1.4 +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ 1.5 +/* This Source Code Form is subject to the terms of the Mozilla Public 1.6 + * License, v. 2.0. If a copy of the MPL was not distributed with this 1.7 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 1.8 + 1.9 +// HttpLog.h should generally be included first 1.10 +#include "HttpLog.h" 1.11 + 1.12 +#include "nsHttpPipeline.h" 1.13 +#include "nsHttpHandler.h" 1.14 +#include "nsIOService.h" 1.15 +#include "nsISocketTransport.h" 1.16 +#include "nsIPipe.h" 1.17 +#include "nsCOMPtr.h" 1.18 +#include <algorithm> 1.19 +#include "nsHttpRequestHead.h" 1.20 + 1.21 +#ifdef DEBUG 1.22 +#include "prthread.h" 1.23 +// defined by the socket transport service while active 1.24 +extern PRThread *gSocketThread; 1.25 +#endif 1.26 + 1.27 +namespace mozilla { 1.28 +namespace net { 1.29 + 1.30 +//----------------------------------------------------------------------------- 1.31 +// nsHttpPushBackWriter 1.32 +//----------------------------------------------------------------------------- 1.33 + 1.34 +class nsHttpPushBackWriter : public nsAHttpSegmentWriter 1.35 +{ 1.36 +public: 1.37 + nsHttpPushBackWriter(const char *buf, uint32_t bufLen) 1.38 + : mBuf(buf) 1.39 + , mBufLen(bufLen) 1.40 + { } 1.41 + virtual ~nsHttpPushBackWriter() {} 1.42 + 1.43 + nsresult OnWriteSegment(char *buf, uint32_t count, uint32_t *countWritten) 1.44 + { 1.45 + if (mBufLen == 0) 1.46 + return NS_BASE_STREAM_CLOSED; 1.47 + 1.48 + if (count > mBufLen) 1.49 + count = mBufLen; 1.50 + 1.51 + memcpy(buf, mBuf, count); 1.52 + 1.53 + mBuf += count; 1.54 + mBufLen -= count; 1.55 + *countWritten = count; 1.56 + return NS_OK; 1.57 + } 1.58 + 1.59 +private: 1.60 + const char *mBuf; 1.61 + uint32_t mBufLen; 1.62 +}; 1.63 + 1.64 +//----------------------------------------------------------------------------- 1.65 +// nsHttpPipeline <public> 1.66 +//----------------------------------------------------------------------------- 1.67 + 1.68 +nsHttpPipeline::nsHttpPipeline() 1.69 + : mConnection(nullptr) 1.70 + , mStatus(NS_OK) 1.71 + , mRequestIsPartial(false) 1.72 + , mResponseIsPartial(false) 1.73 + , mClosed(false) 1.74 + , mUtilizedPipeline(false) 1.75 + , mPushBackBuf(nullptr) 1.76 + , mPushBackLen(0) 1.77 + , mPushBackMax(0) 1.78 + , mHttp1xTransactionCount(0) 1.79 + , mReceivingFromProgress(0) 1.80 + , mSendingToProgress(0) 1.81 + , mSuppressSendEvents(true) 1.82 +{ 1.83 +} 1.84 + 1.85 +nsHttpPipeline::~nsHttpPipeline() 1.86 +{ 1.87 + // make sure we aren't still holding onto any transactions! 1.88 + Close(NS_ERROR_ABORT); 1.89 + 1.90 + NS_IF_RELEASE(mConnection); 1.91 + 1.92 + if (mPushBackBuf) 1.93 + free(mPushBackBuf); 1.94 +} 1.95 + 1.96 +// Generate a shuffled request ordering sequence 1.97 +void 1.98 +nsHttpPipeline::ShuffleTransOrder(uint32_t count) 1.99 +{ 1.100 + if (count < 2) 1.101 + return; 1.102 + 1.103 + uint32_t pos = mRequestQ[0]->PipelinePosition(); 1.104 + uint32_t i = 0; 1.105 + 1.106 + for (i=0; i < count; ++i) { 1.107 + uint32_t ridx = rand() % count; 1.108 + 1.109 + nsAHttpTransaction *tmp = mRequestQ[i]; 1.110 + mRequestQ[i] = mRequestQ[ridx]; 1.111 + mRequestQ[ridx] = tmp; 1.112 + } 1.113 + 1.114 + for (i=0; i < count; ++i) { 1.115 + mRequestQ[i]->SetPipelinePosition(pos); 1.116 + pos++; 1.117 + } 1.118 + 1.119 + LOG(("nsHttpPipeline::ShuffleTransOrder: Shuffled %d transactions.\n", count)); 1.120 +} 1.121 + 1.122 +nsresult 1.123 +nsHttpPipeline::AddTransaction(nsAHttpTransaction *trans) 1.124 +{ 1.125 + LOG(("nsHttpPipeline::AddTransaction [this=%p trans=%x]\n", this, trans)); 1.126 + 1.127 + if (mRequestQ.Length() || mResponseQ.Length()) 1.128 + mUtilizedPipeline = true; 1.129 + 1.130 + NS_ADDREF(trans); 1.131 + mRequestQ.AppendElement(trans); 1.132 + uint32_t qlen = PipelineDepth(); 1.133 + 1.134 + if (qlen != 1) { 1.135 + trans->SetPipelinePosition(qlen); 1.136 + } 1.137 + else { 1.138 + // do it for this case in case an idempotent cancellation 1.139 + // is being repeated and an old value needs to be cleared 1.140 + trans->SetPipelinePosition(0); 1.141 + } 1.142 + 1.143 + // trans->SetConnection() needs to be updated to point back at 1.144 + // the pipeline object. 1.145 + trans->SetConnection(this); 1.146 + 1.147 + ShuffleTransOrder(mRequestQ.Length()); 1.148 + 1.149 + if (mConnection && !mClosed && mRequestQ.Length() == 1) 1.150 + mConnection->ResumeSend(); 1.151 + 1.152 + return NS_OK; 1.153 +} 1.154 + 1.155 +uint32_t 1.156 +nsHttpPipeline::PipelineDepth() 1.157 +{ 1.158 + return mRequestQ.Length() + mResponseQ.Length(); 1.159 +} 1.160 + 1.161 +nsresult 1.162 +nsHttpPipeline::SetPipelinePosition(int32_t position) 1.163 +{ 1.164 + nsAHttpTransaction *trans = Response(0); 1.165 + if (trans) 1.166 + return trans->SetPipelinePosition(position); 1.167 + return NS_OK; 1.168 +} 1.169 + 1.170 +int32_t 1.171 +nsHttpPipeline::PipelinePosition() 1.172 +{ 1.173 + nsAHttpTransaction *trans = Response(0); 1.174 + if (trans) 1.175 + return trans->PipelinePosition(); 1.176 + 1.177 + // The response queue is empty, so return oldest request 1.178 + if (mRequestQ.Length()) 1.179 + return Request(mRequestQ.Length() - 1)->PipelinePosition(); 1.180 + 1.181 + // No transactions in the pipeline 1.182 + return 0; 1.183 +} 1.184 + 1.185 +nsHttpPipeline * 1.186 +nsHttpPipeline::QueryPipeline() 1.187 +{ 1.188 + return this; 1.189 +} 1.190 + 1.191 +//----------------------------------------------------------------------------- 1.192 +// nsHttpPipeline::nsISupports 1.193 +//----------------------------------------------------------------------------- 1.194 + 1.195 +NS_IMPL_ADDREF(nsHttpPipeline) 1.196 +NS_IMPL_RELEASE(nsHttpPipeline) 1.197 + 1.198 +// multiple inheritance fun :-) 1.199 +NS_INTERFACE_MAP_BEGIN(nsHttpPipeline) 1.200 + NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, nsAHttpConnection) 1.201 +NS_INTERFACE_MAP_END 1.202 + 1.203 + 1.204 +//----------------------------------------------------------------------------- 1.205 +// nsHttpPipeline::nsAHttpConnection 1.206 +//----------------------------------------------------------------------------- 1.207 + 1.208 +nsresult 1.209 +nsHttpPipeline::OnHeadersAvailable(nsAHttpTransaction *trans, 1.210 + nsHttpRequestHead *requestHead, 1.211 + nsHttpResponseHead *responseHead, 1.212 + bool *reset) 1.213 +{ 1.214 + LOG(("nsHttpPipeline::OnHeadersAvailable [this=%p]\n", this)); 1.215 + 1.216 + MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread); 1.217 + MOZ_ASSERT(mConnection, "no connection"); 1.218 + 1.219 + nsRefPtr<nsHttpConnectionInfo> ci; 1.220 + GetConnectionInfo(getter_AddRefs(ci)); 1.221 + MOZ_ASSERT(ci); 1.222 + 1.223 + bool pipeliningBefore = gHttpHandler->ConnMgr()->SupportsPipelining(ci); 1.224 + 1.225 + // trans has now received its response headers; forward to the real connection 1.226 + nsresult rv = mConnection->OnHeadersAvailable(trans, 1.227 + requestHead, 1.228 + responseHead, 1.229 + reset); 1.230 + 1.231 + if (!pipeliningBefore && gHttpHandler->ConnMgr()->SupportsPipelining(ci)) 1.232 + // The received headers have expanded the eligible 1.233 + // pipeline depth for this connection 1.234 + gHttpHandler->ConnMgr()->ProcessPendingQForEntry(ci); 1.235 + 1.236 + return rv; 1.237 +} 1.238 + 1.239 +void 1.240 +nsHttpPipeline::CloseTransaction(nsAHttpTransaction *trans, nsresult reason) 1.241 +{ 1.242 + LOG(("nsHttpPipeline::CloseTransaction [this=%p trans=%x reason=%x]\n", 1.243 + this, trans, reason)); 1.244 + 1.245 + MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread); 1.246 + MOZ_ASSERT(NS_FAILED(reason), "expecting failure code"); 1.247 + 1.248 + // the specified transaction is to be closed with the given "reason" 1.249 + 1.250 + int32_t index; 1.251 + bool killPipeline = false; 1.252 + 1.253 + index = mRequestQ.IndexOf(trans); 1.254 + if (index >= 0) { 1.255 + if (index == 0 && mRequestIsPartial) { 1.256 + // the transaction is in the request queue. check to see if any of 1.257 + // its data has been written out yet. 1.258 + killPipeline = true; 1.259 + } 1.260 + mRequestQ.RemoveElementAt(index); 1.261 + } 1.262 + else { 1.263 + index = mResponseQ.IndexOf(trans); 1.264 + if (index >= 0) 1.265 + mResponseQ.RemoveElementAt(index); 1.266 + // while we could avoid killing the pipeline if this transaction is the 1.267 + // last transaction in the pipeline, there doesn't seem to be that much 1.268 + // value in doing so. most likely if this transaction is going away, 1.269 + // the others will be shortly as well. 1.270 + killPipeline = true; 1.271 + } 1.272 + 1.273 + // Marking this connection as non-reusable prevents other items from being 1.274 + // added to it and causes it to be torn down soon. 1.275 + DontReuse(); 1.276 + 1.277 + trans->Close(reason); 1.278 + NS_RELEASE(trans); 1.279 + 1.280 + if (killPipeline) { 1.281 + // reschedule anything from this pipeline onto a different connection 1.282 + CancelPipeline(reason); 1.283 + } 1.284 + 1.285 + // If all the transactions have been removed then we can close the connection 1.286 + // right away. 1.287 + if (!mRequestQ.Length() && !mResponseQ.Length() && mConnection) 1.288 + mConnection->CloseTransaction(this, reason); 1.289 +} 1.290 + 1.291 +nsresult 1.292 +nsHttpPipeline::TakeTransport(nsISocketTransport **aTransport, 1.293 + nsIAsyncInputStream **aInputStream, 1.294 + nsIAsyncOutputStream **aOutputStream) 1.295 +{ 1.296 + return mConnection->TakeTransport(aTransport, aInputStream, aOutputStream); 1.297 +} 1.298 + 1.299 +bool 1.300 +nsHttpPipeline::IsPersistent() 1.301 +{ 1.302 + return true; // pipelining requires this 1.303 +} 1.304 + 1.305 +bool 1.306 +nsHttpPipeline::IsReused() 1.307 +{ 1.308 + if (!mUtilizedPipeline && mConnection) 1.309 + return mConnection->IsReused(); 1.310 + return true; 1.311 +} 1.312 + 1.313 +void 1.314 +nsHttpPipeline::DontReuse() 1.315 +{ 1.316 + if (mConnection) 1.317 + mConnection->DontReuse(); 1.318 +} 1.319 + 1.320 +nsresult 1.321 +nsHttpPipeline::PushBack(const char *data, uint32_t length) 1.322 +{ 1.323 + LOG(("nsHttpPipeline::PushBack [this=%p len=%u]\n", this, length)); 1.324 + 1.325 + MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread); 1.326 + MOZ_ASSERT(mPushBackLen == 0, "push back buffer already has data!"); 1.327 + 1.328 + // If we have no chance for a pipeline (e.g. due to an Upgrade) 1.329 + // then push this data down to original connection 1.330 + if (!mConnection->IsPersistent()) 1.331 + return mConnection->PushBack(data, length); 1.332 + 1.333 + // PushBack is called recursively from WriteSegments 1.334 + 1.335 + // XXX we have a design decision to make here. either we buffer the data 1.336 + // and process it when we return to WriteSegments, or we attempt to move 1.337 + // onto the next transaction from here. doing so adds complexity with the 1.338 + // benefit of eliminating the extra buffer copy. the buffer is at most 1.339 + // 4096 bytes, so it is really unclear if there is any value in the added 1.340 + // complexity. besides simplicity, buffering this data has the advantage 1.341 + // that we'll call close on the transaction sooner, which will wake up 1.342 + // the HTTP channel sooner to continue with its work. 1.343 + 1.344 + if (!mPushBackBuf) { 1.345 + mPushBackMax = length; 1.346 + mPushBackBuf = (char *) malloc(mPushBackMax); 1.347 + if (!mPushBackBuf) 1.348 + return NS_ERROR_OUT_OF_MEMORY; 1.349 + } 1.350 + else if (length > mPushBackMax) { 1.351 + // grow push back buffer as necessary. 1.352 + MOZ_ASSERT(length <= nsIOService::gDefaultSegmentSize, "too big"); 1.353 + mPushBackMax = length; 1.354 + mPushBackBuf = (char *) realloc(mPushBackBuf, mPushBackMax); 1.355 + if (!mPushBackBuf) 1.356 + return NS_ERROR_OUT_OF_MEMORY; 1.357 + } 1.358 + 1.359 + memcpy(mPushBackBuf, data, length); 1.360 + mPushBackLen = length; 1.361 + 1.362 + return NS_OK; 1.363 +} 1.364 + 1.365 +nsHttpConnection * 1.366 +nsHttpPipeline::TakeHttpConnection() 1.367 +{ 1.368 + if (mConnection) 1.369 + return mConnection->TakeHttpConnection(); 1.370 + return nullptr; 1.371 +} 1.372 + 1.373 +nsAHttpTransaction::Classifier 1.374 +nsHttpPipeline::Classification() 1.375 +{ 1.376 + if (mConnection) 1.377 + return mConnection->Classification(); 1.378 + 1.379 + LOG(("nsHttpPipeline::Classification this=%p " 1.380 + "has null mConnection using CLASS_SOLO default", this)); 1.381 + return nsAHttpTransaction::CLASS_SOLO; 1.382 +} 1.383 + 1.384 +void 1.385 +nsHttpPipeline::SetProxyConnectFailed() 1.386 +{ 1.387 + nsAHttpTransaction *trans = Request(0); 1.388 + 1.389 + if (trans) 1.390 + trans->SetProxyConnectFailed(); 1.391 +} 1.392 + 1.393 +nsHttpRequestHead * 1.394 +nsHttpPipeline::RequestHead() 1.395 +{ 1.396 + nsAHttpTransaction *trans = Request(0); 1.397 + 1.398 + if (trans) 1.399 + return trans->RequestHead(); 1.400 + return nullptr; 1.401 +} 1.402 + 1.403 +uint32_t 1.404 +nsHttpPipeline::Http1xTransactionCount() 1.405 +{ 1.406 + return mHttp1xTransactionCount; 1.407 +} 1.408 + 1.409 +nsresult 1.410 +nsHttpPipeline::TakeSubTransactions( 1.411 + nsTArray<nsRefPtr<nsAHttpTransaction> > &outTransactions) 1.412 +{ 1.413 + LOG(("nsHttpPipeline::TakeSubTransactions [this=%p]\n", this)); 1.414 + 1.415 + if (mResponseQ.Length() || mRequestIsPartial) 1.416 + return NS_ERROR_ALREADY_OPENED; 1.417 + 1.418 + int32_t i, count = mRequestQ.Length(); 1.419 + for (i = 0; i < count; ++i) { 1.420 + nsAHttpTransaction *trans = Request(i); 1.421 + // set the transaction conneciton object back to the underlying 1.422 + // nsHttpConnectionHandle 1.423 + trans->SetConnection(mConnection); 1.424 + outTransactions.AppendElement(trans); 1.425 + NS_RELEASE(trans); 1.426 + } 1.427 + mRequestQ.Clear(); 1.428 + 1.429 + LOG((" took %d\n", count)); 1.430 + return NS_OK; 1.431 +} 1.432 + 1.433 +//----------------------------------------------------------------------------- 1.434 +// nsHttpPipeline::nsAHttpTransaction 1.435 +//----------------------------------------------------------------------------- 1.436 + 1.437 +void 1.438 +nsHttpPipeline::SetConnection(nsAHttpConnection *conn) 1.439 +{ 1.440 + LOG(("nsHttpPipeline::SetConnection [this=%p conn=%x]\n", this, conn)); 1.441 + 1.442 + MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread); 1.443 + MOZ_ASSERT(!mConnection, "already have a connection"); 1.444 + 1.445 + NS_IF_ADDREF(mConnection = conn); 1.446 +} 1.447 + 1.448 +nsAHttpConnection * 1.449 +nsHttpPipeline::Connection() 1.450 +{ 1.451 + LOG(("nsHttpPipeline::Connection [this=%p conn=%x]\n", this, mConnection)); 1.452 + 1.453 + MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread); 1.454 + return mConnection; 1.455 +} 1.456 + 1.457 +void 1.458 +nsHttpPipeline::GetSecurityCallbacks(nsIInterfaceRequestor **result) 1.459 +{ 1.460 + MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread); 1.461 + 1.462 + // depending on timing this could be either the request or the response 1.463 + // that is needed - but they both go to the same host. A request for these 1.464 + // callbacks directly in nsHttpTransaction would not make a distinction 1.465 + // over whether the the request had been transmitted yet. 1.466 + nsAHttpTransaction *trans = Request(0); 1.467 + if (!trans) 1.468 + trans = Response(0); 1.469 + if (trans) 1.470 + trans->GetSecurityCallbacks(result); 1.471 + else { 1.472 + *result = nullptr; 1.473 + } 1.474 +} 1.475 + 1.476 +void 1.477 +nsHttpPipeline::OnTransportStatus(nsITransport* transport, 1.478 + nsresult status, uint64_t progress) 1.479 +{ 1.480 + LOG(("nsHttpPipeline::OnStatus [this=%p status=%x progress=%llu]\n", 1.481 + this, status, progress)); 1.482 + 1.483 + MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread); 1.484 + 1.485 + nsAHttpTransaction *trans; 1.486 + int32_t i, count; 1.487 + 1.488 + switch (status) { 1.489 + 1.490 + case NS_NET_STATUS_RESOLVING_HOST: 1.491 + case NS_NET_STATUS_RESOLVED_HOST: 1.492 + case NS_NET_STATUS_CONNECTING_TO: 1.493 + case NS_NET_STATUS_CONNECTED_TO: 1.494 + // These should only appear at most once per pipeline. 1.495 + // Deliver to the first transaction. 1.496 + 1.497 + trans = Request(0); 1.498 + if (!trans) 1.499 + trans = Response(0); 1.500 + if (trans) 1.501 + trans->OnTransportStatus(transport, status, progress); 1.502 + 1.503 + break; 1.504 + 1.505 + case NS_NET_STATUS_SENDING_TO: 1.506 + // This is generated by the socket transport when (part) of 1.507 + // a transaction is written out 1.508 + // 1.509 + // In pipelining this is generated out of FillSendBuf(), but it cannot do 1.510 + // so until the connection is confirmed by CONNECTED_TO. 1.511 + // See patch for bug 196827. 1.512 + // 1.513 + 1.514 + if (mSuppressSendEvents) { 1.515 + mSuppressSendEvents = false; 1.516 + 1.517 + // catch up by sending the event to all the transactions that have 1.518 + // moved from request to response and any that have been partially 1.519 + // sent. Also send WAITING_FOR to those that were completely sent 1.520 + count = mResponseQ.Length(); 1.521 + for (i = 0; i < count; ++i) { 1.522 + Response(i)->OnTransportStatus(transport, 1.523 + NS_NET_STATUS_SENDING_TO, 1.524 + progress); 1.525 + Response(i)->OnTransportStatus(transport, 1.526 + NS_NET_STATUS_WAITING_FOR, 1.527 + progress); 1.528 + } 1.529 + if (mRequestIsPartial && Request(0)) 1.530 + Request(0)->OnTransportStatus(transport, 1.531 + NS_NET_STATUS_SENDING_TO, 1.532 + progress); 1.533 + mSendingToProgress = progress; 1.534 + } 1.535 + // otherwise ignore it 1.536 + break; 1.537 + 1.538 + case NS_NET_STATUS_WAITING_FOR: 1.539 + // Created by nsHttpConnection when request pipeline has been totally 1.540 + // sent. Ignore it here because it is simulated in FillSendBuf() when 1.541 + // a request is moved from request to response. 1.542 + 1.543 + // ignore it 1.544 + break; 1.545 + 1.546 + case NS_NET_STATUS_RECEIVING_FROM: 1.547 + // Forward this only to the transaction currently recieving data. It is 1.548 + // normally generated by the socket transport, but can also 1.549 + // be repeated by the pushbackwriter if necessary. 1.550 + mReceivingFromProgress = progress; 1.551 + if (Response(0)) 1.552 + Response(0)->OnTransportStatus(transport, status, progress); 1.553 + break; 1.554 + 1.555 + default: 1.556 + // forward other notifications to all request transactions 1.557 + count = mRequestQ.Length(); 1.558 + for (i = 0; i < count; ++i) 1.559 + Request(i)->OnTransportStatus(transport, status, progress); 1.560 + break; 1.561 + } 1.562 +} 1.563 + 1.564 +bool 1.565 +nsHttpPipeline::IsDone() 1.566 +{ 1.567 + bool done = true; 1.568 + 1.569 + uint32_t i, count = mRequestQ.Length(); 1.570 + for (i = 0; done && (i < count); i++) 1.571 + done = Request(i)->IsDone(); 1.572 + 1.573 + count = mResponseQ.Length(); 1.574 + for (i = 0; done && (i < count); i++) 1.575 + done = Response(i)->IsDone(); 1.576 + 1.577 + return done; 1.578 +} 1.579 + 1.580 +nsresult 1.581 +nsHttpPipeline::Status() 1.582 +{ 1.583 + return mStatus; 1.584 +} 1.585 + 1.586 +uint32_t 1.587 +nsHttpPipeline::Caps() 1.588 +{ 1.589 + nsAHttpTransaction *trans = Request(0); 1.590 + if (!trans) 1.591 + trans = Response(0); 1.592 + 1.593 + return trans ? trans->Caps() : 0; 1.594 +} 1.595 + 1.596 +void 1.597 +nsHttpPipeline::SetDNSWasRefreshed() 1.598 +{ 1.599 + nsAHttpTransaction *trans = Request(0); 1.600 + if (!trans) 1.601 + trans = Response(0); 1.602 + 1.603 + if (trans) 1.604 + trans->SetDNSWasRefreshed(); 1.605 +} 1.606 + 1.607 +uint64_t 1.608 +nsHttpPipeline::Available() 1.609 +{ 1.610 + uint64_t result = 0; 1.611 + 1.612 + int32_t i, count = mRequestQ.Length(); 1.613 + for (i=0; i<count; ++i) 1.614 + result += Request(i)->Available(); 1.615 + return result; 1.616 +} 1.617 + 1.618 +NS_METHOD 1.619 +nsHttpPipeline::ReadFromPipe(nsIInputStream *stream, 1.620 + void *closure, 1.621 + const char *buf, 1.622 + uint32_t offset, 1.623 + uint32_t count, 1.624 + uint32_t *countRead) 1.625 +{ 1.626 + nsHttpPipeline *self = (nsHttpPipeline *) closure; 1.627 + return self->mReader->OnReadSegment(buf, count, countRead); 1.628 +} 1.629 + 1.630 +nsresult 1.631 +nsHttpPipeline::ReadSegments(nsAHttpSegmentReader *reader, 1.632 + uint32_t count, 1.633 + uint32_t *countRead) 1.634 +{ 1.635 + LOG(("nsHttpPipeline::ReadSegments [this=%p count=%u]\n", this, count)); 1.636 + 1.637 + MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread); 1.638 + 1.639 + if (mClosed) { 1.640 + *countRead = 0; 1.641 + return mStatus; 1.642 + } 1.643 + 1.644 + nsresult rv; 1.645 + uint64_t avail = 0; 1.646 + if (mSendBufIn) { 1.647 + rv = mSendBufIn->Available(&avail); 1.648 + if (NS_FAILED(rv)) return rv; 1.649 + } 1.650 + 1.651 + if (avail == 0) { 1.652 + rv = FillSendBuf(); 1.653 + if (NS_FAILED(rv)) return rv; 1.654 + 1.655 + rv = mSendBufIn->Available(&avail); 1.656 + if (NS_FAILED(rv)) return rv; 1.657 + 1.658 + // return EOF if send buffer is empty 1.659 + if (avail == 0) { 1.660 + *countRead = 0; 1.661 + return NS_OK; 1.662 + } 1.663 + } 1.664 + 1.665 + // read no more than what was requested 1.666 + if (avail > count) 1.667 + avail = count; 1.668 + 1.669 + mReader = reader; 1.670 + 1.671 + // avail is under 4GB, so casting to uint32_t is safe 1.672 + rv = mSendBufIn->ReadSegments(ReadFromPipe, this, (uint32_t)avail, countRead); 1.673 + 1.674 + mReader = nullptr; 1.675 + return rv; 1.676 +} 1.677 + 1.678 +nsresult 1.679 +nsHttpPipeline::WriteSegments(nsAHttpSegmentWriter *writer, 1.680 + uint32_t count, 1.681 + uint32_t *countWritten) 1.682 +{ 1.683 + LOG(("nsHttpPipeline::WriteSegments [this=%p count=%u]\n", this, count)); 1.684 + 1.685 + MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread); 1.686 + 1.687 + if (mClosed) 1.688 + return NS_SUCCEEDED(mStatus) ? NS_BASE_STREAM_CLOSED : mStatus; 1.689 + 1.690 + nsAHttpTransaction *trans; 1.691 + nsresult rv; 1.692 + 1.693 + trans = Response(0); 1.694 + // This code deals with the establishment of a CONNECT tunnel through 1.695 + // an HTTP proxy. It allows the connection to do the CONNECT/200 1.696 + // HTTP transaction to establish a tunnel as a precursor to the 1.697 + // actual pipeline of regular HTTP transactions. 1.698 + if (!trans && mRequestQ.Length() && 1.699 + mConnection->IsProxyConnectInProgress()) { 1.700 + LOG(("nsHttpPipeline::WriteSegments [this=%p] Forced Delegation\n", 1.701 + this)); 1.702 + trans = Request(0); 1.703 + } 1.704 + 1.705 + if (!trans) { 1.706 + if (mRequestQ.Length() > 0) 1.707 + rv = NS_BASE_STREAM_WOULD_BLOCK; 1.708 + else 1.709 + rv = NS_BASE_STREAM_CLOSED; 1.710 + } 1.711 + else { 1.712 + // 1.713 + // ask the transaction to consume data from the connection. 1.714 + // PushBack may be called recursively. 1.715 + // 1.716 + rv = trans->WriteSegments(writer, count, countWritten); 1.717 + 1.718 + if (rv == NS_BASE_STREAM_CLOSED || trans->IsDone()) { 1.719 + trans->Close(NS_OK); 1.720 + 1.721 + // Release the transaction if it is not IsProxyConnectInProgress() 1.722 + if (trans == Response(0)) { 1.723 + NS_RELEASE(trans); 1.724 + mResponseQ.RemoveElementAt(0); 1.725 + mResponseIsPartial = false; 1.726 + ++mHttp1xTransactionCount; 1.727 + } 1.728 + 1.729 + // ask the connection manager to add additional transactions 1.730 + // to our pipeline. 1.731 + nsRefPtr<nsHttpConnectionInfo> ci; 1.732 + GetConnectionInfo(getter_AddRefs(ci)); 1.733 + if (ci) 1.734 + gHttpHandler->ConnMgr()->ProcessPendingQForEntry(ci); 1.735 + } 1.736 + else 1.737 + mResponseIsPartial = true; 1.738 + } 1.739 + 1.740 + if (mPushBackLen) { 1.741 + nsHttpPushBackWriter writer(mPushBackBuf, mPushBackLen); 1.742 + uint32_t len = mPushBackLen, n; 1.743 + mPushBackLen = 0; 1.744 + 1.745 + // This progress notification has previously been sent from 1.746 + // the socket transport code, but it was delivered to the 1.747 + // previous transaction on the pipeline. 1.748 + nsITransport *transport = Transport(); 1.749 + if (transport) 1.750 + OnTransportStatus(transport, NS_NET_STATUS_RECEIVING_FROM, 1.751 + mReceivingFromProgress); 1.752 + 1.753 + // the push back buffer is never larger than NS_HTTP_SEGMENT_SIZE, 1.754 + // so we are guaranteed that the next response will eat the entire 1.755 + // push back buffer (even though it might again call PushBack). 1.756 + rv = WriteSegments(&writer, len, &n); 1.757 + } 1.758 + 1.759 + return rv; 1.760 +} 1.761 + 1.762 +uint32_t 1.763 +nsHttpPipeline::CancelPipeline(nsresult originalReason) 1.764 +{ 1.765 + uint32_t i, reqLen, respLen, total; 1.766 + nsAHttpTransaction *trans; 1.767 + 1.768 + reqLen = mRequestQ.Length(); 1.769 + respLen = mResponseQ.Length(); 1.770 + total = reqLen + respLen; 1.771 + 1.772 + // don't count the first response, if presnet 1.773 + if (respLen) 1.774 + total--; 1.775 + 1.776 + if (!total) 1.777 + return 0; 1.778 + 1.779 + // any pending requests can ignore this error and be restarted 1.780 + // unless it is during a CONNECT tunnel request 1.781 + for (i = 0; i < reqLen; ++i) { 1.782 + trans = Request(i); 1.783 + if (mConnection && mConnection->IsProxyConnectInProgress()) 1.784 + trans->Close(originalReason); 1.785 + else 1.786 + trans->Close(NS_ERROR_NET_RESET); 1.787 + NS_RELEASE(trans); 1.788 + } 1.789 + mRequestQ.Clear(); 1.790 + 1.791 + // any pending responses can be restarted except for the first one, 1.792 + // that we might want to finish on this pipeline or cancel individually. 1.793 + // Higher levels of callers ensure that we don't process non-idempotent 1.794 + // tranasction with the NS_HTTP_ALLOW_PIPELINING bit set 1.795 + for (i = 1; i < respLen; ++i) { 1.796 + trans = Response(i); 1.797 + trans->Close(NS_ERROR_NET_RESET); 1.798 + NS_RELEASE(trans); 1.799 + } 1.800 + 1.801 + if (respLen > 1) 1.802 + mResponseQ.TruncateLength(1); 1.803 + 1.804 + /* Don't flag timed out connections as unreusable.. Tor is just slow :( */ 1.805 + if (originalReason != NS_ERROR_NET_TIMEOUT) { 1.806 + DontReuse(); 1.807 + Classify(nsAHttpTransaction::CLASS_SOLO); 1.808 + } 1.809 + 1.810 + return total; 1.811 +} 1.812 + 1.813 +void 1.814 +nsHttpPipeline::Close(nsresult reason) 1.815 +{ 1.816 + LOG(("nsHttpPipeline::Close [this=%p reason=%x]\n", this, reason)); 1.817 + 1.818 + if (mClosed) { 1.819 + LOG((" already closed\n")); 1.820 + return; 1.821 + } 1.822 + 1.823 + // the connection is going away! 1.824 + mStatus = reason; 1.825 + mClosed = true; 1.826 + 1.827 + nsRefPtr<nsHttpConnectionInfo> ci; 1.828 + GetConnectionInfo(getter_AddRefs(ci)); 1.829 + uint32_t numRescheduled = CancelPipeline(reason); 1.830 + 1.831 + // numRescheduled can be 0 if there is just a single response in the 1.832 + // pipeline object. That isn't really a meaningful pipeline that 1.833 + // has been forced to be rescheduled so it does not need to generate 1.834 + // negative feedback. 1.835 + if (ci && numRescheduled) 1.836 + gHttpHandler->ConnMgr()->PipelineFeedbackInfo( 1.837 + ci, nsHttpConnectionMgr::RedCanceledPipeline, nullptr, 0); 1.838 + 1.839 + nsAHttpTransaction *trans = Response(0); 1.840 + if (!trans) 1.841 + return; 1.842 + 1.843 + // The current transaction can be restarted via reset 1.844 + // if the response has not started to arrive and the reason 1.845 + // for failure is innocuous (e.g. not an SSL error) 1.846 + if (!mResponseIsPartial && 1.847 + (reason == NS_ERROR_NET_RESET || 1.848 + reason == NS_OK || 1.849 + reason == NS_ERROR_NET_TIMEOUT || 1.850 + reason == NS_BASE_STREAM_CLOSED)) { 1.851 + trans->Close(NS_ERROR_NET_RESET); 1.852 + } 1.853 + else { 1.854 + trans->Close(reason); 1.855 + } 1.856 + 1.857 + NS_RELEASE(trans); 1.858 + mResponseQ.Clear(); 1.859 +} 1.860 + 1.861 +nsresult 1.862 +nsHttpPipeline::OnReadSegment(const char *segment, 1.863 + uint32_t count, 1.864 + uint32_t *countRead) 1.865 +{ 1.866 + return mSendBufOut->Write(segment, count, countRead); 1.867 +} 1.868 + 1.869 +nsresult 1.870 +nsHttpPipeline::FillSendBuf() 1.871 +{ 1.872 + // reads from request queue, moving transactions to response queue 1.873 + // when they have been completely read. 1.874 + 1.875 + nsresult rv; 1.876 + 1.877 + if (!mSendBufIn) { 1.878 + // allocate a single-segment pipe 1.879 + rv = NS_NewPipe(getter_AddRefs(mSendBufIn), 1.880 + getter_AddRefs(mSendBufOut), 1.881 + nsIOService::gDefaultSegmentSize, /* segment size */ 1.882 + nsIOService::gDefaultSegmentSize, /* max size */ 1.883 + true, true); 1.884 + if (NS_FAILED(rv)) return rv; 1.885 + } 1.886 + 1.887 + uint32_t n; 1.888 + uint64_t avail; 1.889 + uint64_t totalSent = 0; 1.890 + uint64_t reqsSent = 0; 1.891 + uint64_t alreadyPending = 0; 1.892 + 1.893 + mSendBufIn->Available(&alreadyPending); 1.894 + 1.895 + nsAHttpTransaction *trans; 1.896 + nsITransport *transport = Transport(); 1.897 +#ifdef WTF_TEST 1.898 + uint64_t totalAvailable = Available(); 1.899 + nsRefPtr<nsHttpConnectionInfo> ci; 1.900 + GetConnectionInfo(getter_AddRefs(ci)); 1.901 +#endif 1.902 + 1.903 + while ((trans = Request(0)) != nullptr) { 1.904 + avail = trans->Available(); 1.905 + if (avail) { 1.906 + // if there is already a response in the responseq then this 1.907 + // new data comprises a pipeline. Update the transaction in the 1.908 + // response queue to reflect that if necessary. We are now sending 1.909 + // out a request while we haven't received all responses. 1.910 + nsAHttpTransaction *response = Response(0); 1.911 + if (response && !response->PipelinePosition()) 1.912 + response->SetPipelinePosition(1); 1.913 + rv = trans->ReadSegments(this, (uint32_t)std::min(avail, (uint64_t)UINT32_MAX), &n); 1.914 + if (NS_FAILED(rv)) return rv; 1.915 + 1.916 + if (n == 0) { 1.917 + LOG(("send pipe is full")); 1.918 + break; 1.919 + } 1.920 + 1.921 + mSendingToProgress += n; 1.922 + totalSent += n; 1.923 + if (!mSuppressSendEvents && transport) { 1.924 + // Simulate a SENDING_TO event 1.925 + trans->OnTransportStatus(transport, 1.926 + NS_NET_STATUS_SENDING_TO, 1.927 + mSendingToProgress); 1.928 + } 1.929 + } 1.930 + 1.931 + avail = trans->Available(); 1.932 + if (avail == 0) { 1.933 +#ifdef WTF_TEST 1.934 + nsHttpRequestHead *head = trans->RequestHead(); 1.935 + fprintf(stderr, "WTF-order: Pipelined req %d/%d (%dB). Url: %s%s\n", 1.936 + trans->PipelinePosition(), PipelineDepth(), n, 1.937 + ci->Host(), head ? head->RequestURI().BeginReading() : "<unknown?>"); 1.938 +#endif 1.939 + reqsSent++; 1.940 + 1.941 + // move transaction from request queue to response queue 1.942 + mRequestQ.RemoveElementAt(0); 1.943 + mResponseQ.AppendElement(trans); 1.944 + mRequestIsPartial = false; 1.945 + 1.946 + if (!mSuppressSendEvents && transport) { 1.947 + // Simulate a WAITING_FOR event 1.948 + trans->OnTransportStatus(transport, 1.949 + NS_NET_STATUS_WAITING_FOR, 1.950 + mSendingToProgress); 1.951 + } 1.952 + 1.953 + // It would be good to re-enable data read handlers via ResumeRecv() 1.954 + // except the read handler code can be synchronously dispatched on 1.955 + // the stack. 1.956 + } 1.957 + else 1.958 + mRequestIsPartial = true; 1.959 + } 1.960 + 1.961 +#ifdef WTF_TEST 1.962 + if (totalSent) 1.963 + fprintf(stderr, "WTF-combine: Sent %ld/%ld bytes of %ld combined pipelined requests for host %s\n", 1.964 + alreadyPending+totalSent, totalAvailable, reqsSent, ci->Host()); 1.965 +#endif 1.966 + 1.967 + return NS_OK; 1.968 +} 1.969 + 1.970 +} // namespace mozilla::net 1.971 +} // namespace mozilla