rippled
Loading...
Searching...
No Matches
GRPCServer.cpp
1//------------------------------------------------------------------------------
2/*
3 This file is part of rippled: https://github.com/ripple/rippled
4 Copyright (c) 2020 Ripple Labs Inc.
5
6 Permission to use, copy, modify, and/or distribute this software for any
7 purpose with or without fee is hereby granted, provided that the above
8 copyright notice and this permission notice appear in all copies.
9
10 THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17*/
18//==============================================================================
19
20#include <xrpld/app/main/GRPCServer.h>
21#include <xrpld/core/ConfigSections.h>
22#include <xrpl/beast/core/CurrentThreadName.h>
23#include <xrpl/beast/net/IPAddressConversion.h>
24#include <xrpl/resource/Fees.h>
25
26namespace ripple {
27
28namespace {
29
30// helper function. converts string to endpoint. handles ipv4 and ipv6, with or
31// without port, with or without prepended scheme
33getEndpoint(std::string const& peer)
34{
35 try
36 {
37 std::size_t first = peer.find_first_of(":");
38 std::size_t last = peer.find_last_of(":");
39 std::string peerClean(peer);
40 if (first != last)
41 {
42 peerClean = peer.substr(first + 1);
43 }
44
47 if (endpoint)
48 return beast::IP::to_asio_endpoint(endpoint.value());
49 }
50 catch (std::exception const&)
51 {
52 }
53 return {};
54}
55
56} // namespace
57
58template <class Request, class Response>
60 org::xrpl::rpc::v1::XRPLedgerAPIService::AsyncService& service,
61 grpc::ServerCompletionQueue& cq,
62 Application& app,
66 RPC::Condition requiredCondition,
67 Resource::Charge loadType,
68 std::vector<boost::asio::ip::address> const& secureGatewayIPs)
69 : service_(service)
70 , cq_(cq)
71 , finished_(false)
72 , app_(app)
73 , responder_(&ctx_)
74 , bindListener_(std::move(bindListener))
75 , handler_(std::move(handler))
76 , forward_(std::move(forward))
77 , requiredCondition_(std::move(requiredCondition))
78 , loadType_(std::move(loadType))
79 , secureGatewayIPs_(secureGatewayIPs)
80{
81 // Bind a listener. When a request is received, "this" will be returned
82 // from CompletionQueue::Next
84}
85
86template <class Request, class Response>
89{
90 return std::make_shared<CallData<Request, Response>>(
92 cq_,
93 app_,
94 bindListener_,
95 handler_,
96 forward_,
97 requiredCondition_,
98 loadType_,
100}
101
102template <class Request, class Response>
103void
105{
106 // sanity check
107 BOOST_ASSERT(!finished_);
108
110 this->shared_from_this();
111
112 // Need to set finished to true before processing the response,
113 // because as soon as the response is posted to the completion
114 // queue (via responder_.Finish(...) or responder_.FinishWithError(...)),
115 // the CallData object is returned as a tag in handleRpcs().
116 // handleRpcs() checks the finished variable, and if true, destroys
117 // the object. Setting finished to true before calling process
118 // ensures that finished is always true when this CallData object
119 // is returned as a tag in handleRpcs(), after sending the response
120 finished_ = true;
121 auto coro = app_.getJobQueue().postCoro(
123 "gRPC-Client",
124 [thisShared](std::shared_ptr<JobQueue::Coro> coro) {
125 thisShared->process(coro);
126 });
127
128 // If coro is null, then the JobQueue has already been shutdown
129 if (!coro)
130 {
131 grpc::Status status{
132 grpc::StatusCode::INTERNAL, "Job Queue is already stopped"};
133 responder_.FinishWithError(status, this);
134 }
135}
136
137template <class Request, class Response>
138void
141{
142 try
143 {
144 auto usage = getUsage();
145 bool isUnlimited = clientIsUnlimited();
146 if (!isUnlimited && usage.disconnect(app_.journal("gRPCServer")))
147 {
148 grpc::Status status{
149 grpc::StatusCode::RESOURCE_EXHAUSTED,
150 "usage balance exceeds threshold"};
151 responder_.FinishWithError(status, this);
152 }
153 else
154 {
155 auto loadType = getLoadType();
156 usage.charge(loadType);
157 auto role = getRole(isUnlimited);
158
159 {
160 std::stringstream toLog;
161 toLog << "role = " << (int)role;
162
163 toLog << " address = ";
164 if (auto clientIp = getClientIpAddress())
165 toLog << clientIp.value();
166
167 toLog << " user = ";
168 if (auto user = getUser())
169 toLog << user.value();
170 toLog << " isUnlimited = " << isUnlimited;
171
172 JLOG(app_.journal("GRPCServer::Calldata").debug())
173 << toLog.str();
174 }
175
177 {app_.journal("gRPCServer"),
178 app_,
179 loadType,
180 app_.getOPs(),
182 usage,
183 role,
184 coro,
186 apiVersion},
187 request_};
188
189 // Make sure we can currently handle the rpc
190 error_code_i conditionMetRes =
191 RPC::conditionMet(requiredCondition_, context);
192
193 if (conditionMetRes != rpcSUCCESS)
194 {
195 RPC::ErrorInfo errorInfo = RPC::get_error_info(conditionMetRes);
196 grpc::Status status{
197 grpc::StatusCode::FAILED_PRECONDITION,
198 errorInfo.message.c_str()};
199 responder_.FinishWithError(status, this);
200 }
201 else
202 {
203 std::pair<Response, grpc::Status> result = handler_(context);
204 setIsUnlimited(result.first, isUnlimited);
205 responder_.Finish(result.first, result.second, this);
206 }
207 }
208 }
209 catch (std::exception const& ex)
210 {
211 grpc::Status status{grpc::StatusCode::INTERNAL, ex.what()};
212 responder_.FinishWithError(status, this);
213 }
214}
215
216template <class Request, class Response>
217bool
219{
220 return finished_;
221}
222
223template <class Request, class Response>
226{
227 return loadType_;
228}
229
230template <class Request, class Response>
231Role
233{
234 if (isUnlimited)
235 return Role::IDENTIFIED;
236 else
237 return Role::USER;
238}
239
240template <class Request, class Response>
243{
244 if (auto descriptor = Request::GetDescriptor()->FindFieldByName("user"))
245 {
246 std::string user =
247 Request::GetReflection()->GetString(request_, descriptor);
248 if (!user.empty())
249 {
250 return user;
251 }
252 }
253 return {};
254}
255
256template <class Request, class Response>
259{
260 auto endpoint = getClientEndpoint();
261 if (endpoint)
262 return endpoint->address();
263 return {};
264}
265
266template <class Request, class Response>
269{
270 return ripple::getEndpoint(ctx_.peer());
271}
272
273template <class Request, class Response>
274bool
276{
277 if (!getUser())
278 return false;
279 auto clientIp = getClientIpAddress();
280 if (clientIp)
281 {
282 for (auto& ip : secureGatewayIPs_)
283 {
284 if (ip == clientIp)
285 return true;
286 }
287 }
288 return false;
289}
290
291template <class Request, class Response>
292void
294 Response& response,
295 bool isUnlimited)
296{
297 if (isUnlimited)
298 {
299 if (auto descriptor =
300 Response::GetDescriptor()->FindFieldByName("is_unlimited"))
301 {
302 Response::GetReflection()->SetBool(&response, descriptor, true);
303 }
304 }
305}
306
307template <class Request, class Response>
310{
311 auto endpoint = getClientEndpoint();
312 if (endpoint)
314 beast::IP::from_asio(endpoint.value()));
315 Throw<std::runtime_error>("Failed to get client endpoint");
316}
317
319 : app_(app), journal_(app_.journal("gRPC Server"))
320{
321 // if present, get endpoint from config
322 if (app_.config().exists(SECTION_PORT_GRPC))
323 {
324 Section const& section = app_.config().section(SECTION_PORT_GRPC);
325
326 auto const optIp = section.get("ip");
327 if (!optIp)
328 return;
329
330 auto const optPort = section.get("port");
331 if (!optPort)
332 return;
333 try
334 {
335 boost::asio::ip::tcp::endpoint endpoint(
336 boost::asio::ip::make_address(*optIp), std::stoi(*optPort));
337
339 ss << endpoint;
340 serverAddress_ = ss.str();
341 }
342 catch (std::exception const&)
343 {
344 JLOG(journal_.error()) << "Error setting grpc server address";
345 Throw<std::runtime_error>("Error setting grpc server address");
346 }
347
348 auto const optSecureGateway = section.get("secure_gateway");
349 if (optSecureGateway)
350 {
351 try
352 {
353 std::stringstream ss{*optSecureGateway};
354 std::string ip;
355 while (std::getline(ss, ip, ','))
356 {
357 boost::algorithm::trim(ip);
358 auto const addr = boost::asio::ip::make_address(ip);
359
360 if (addr.is_unspecified())
361 {
362 JLOG(journal_.error())
363 << "Can't pass unspecified IP in "
364 << "secure_gateway section of port_grpc";
365 Throw<std::runtime_error>(
366 "Unspecified IP in secure_gateway section");
367 }
368
370 }
371 }
372 catch (std::exception const&)
373 {
374 JLOG(journal_.error())
375 << "Error parsing secure gateway IPs for grpc server";
376 Throw<std::runtime_error>(
377 "Error parsing secure_gateway section");
378 }
379 }
380 }
381}
382
383void
385{
386 JLOG(journal_.debug()) << "Shutting down";
387
388 // The below call cancels all "listeners" (CallData objects that are waiting
389 // for a request, as opposed to processing a request), and blocks until all
390 // requests being processed are completed. CallData objects in the midst of
391 // processing requests need to actually send data back to the client, via
392 // responder_.Finish(...) or responder_.FinishWithError(...), for this call
393 // to unblock. Each cancelled listener is returned via cq_.Next(...) with ok
394 // set to false
395 server_->Shutdown();
396 JLOG(journal_.debug()) << "Server has been shutdown";
397
398 // Always shutdown the completion queue after the server. This call allows
399 // cq_.Next() to return false, once all events posted to the completion
400 // queue have been processed. See handleRpcs() for more details.
401 cq_->Shutdown();
402 JLOG(journal_.debug()) << "Completion Queue has been shutdown";
403}
404
405void
407{
408 // This collection should really be an unordered_set. However, to delete
409 // from the unordered_set, we need a shared_ptr, but cq_.Next() (see below
410 // while loop) sets the tag to a raw pointer.
412
413 auto erase = [&requests](Processor* ptr) {
414 auto it = std::find_if(
415 requests.begin(),
416 requests.end(),
417 [ptr](std::shared_ptr<Processor>& sPtr) {
418 return sPtr.get() == ptr;
419 });
420 BOOST_ASSERT(it != requests.end());
421 it->swap(requests.back());
422 requests.pop_back();
423 };
424
425 void* tag; // uniquely identifies a request.
426 bool ok;
427 // Block waiting to read the next event from the completion queue. The
428 // event is uniquely identified by its tag, which in this case is the
429 // memory address of a CallData instance.
430 // The return value of Next should always be checked. This return value
431 // tells us whether there is any kind of event or cq_ is shutting down.
432 // When cq_.Next(...) returns false, all work has been completed and the
433 // loop can exit. When the server is shutdown, each CallData object that is
434 // listening for a request is forceably cancelled, and is returned by
435 // cq_->Next() with ok set to false. Then, each CallData object processing
436 // a request must complete (by sending data to the client), each of which
437 // will be returned from cq_->Next() with ok set to true. After all
438 // cancelled listeners and all CallData objects processing requests are
439 // returned via cq_->Next(), cq_->Next() will return false, causing the
440 // loop to exit.
441 while (cq_->Next(&tag, &ok))
442 {
443 auto ptr = static_cast<Processor*>(tag);
444 JLOG(journal_.trace()) << "Processing CallData object."
445 << " ptr = " << ptr << " ok = " << ok;
446
447 if (!ok)
448 {
449 JLOG(journal_.debug())
450 << "Request listener cancelled. " << "Destroying object";
451 erase(ptr);
452 }
453 else
454 {
455 if (!ptr->isFinished())
456 {
457 JLOG(journal_.debug()) << "Received new request. Processing";
458 // ptr is now processing a request, so create a new CallData
459 // object to handle additional requests
460 auto cloned = ptr->clone();
461 requests.push_back(cloned);
462 // process the request
463 ptr->process();
464 }
465 else
466 {
467 JLOG(journal_.debug()) << "Sent response. Destroying object";
468 erase(ptr);
469 }
470 }
471 }
472 JLOG(journal_.debug()) << "Completion Queue drained";
473}
474
475// create a CallData instance for each RPC
478{
480
481 auto addToRequests = [&requests](auto callData) {
482 requests.push_back(std::move(callData));
483 };
484
485 {
486 using cd = CallData<
487 org::xrpl::rpc::v1::GetLedgerRequest,
488 org::xrpl::rpc::v1::GetLedgerResponse>;
489
490 addToRequests(std::make_shared<cd>(
491 service_,
492 *cq_,
493 app_,
494 &org::xrpl::rpc::v1::XRPLedgerAPIService::AsyncService::
495 RequestGetLedger,
497 &org::xrpl::rpc::v1::XRPLedgerAPIService::Stub::GetLedger,
501 }
502 {
503 using cd = CallData<
504 org::xrpl::rpc::v1::GetLedgerDataRequest,
505 org::xrpl::rpc::v1::GetLedgerDataResponse>;
506
507 addToRequests(std::make_shared<cd>(
508 service_,
509 *cq_,
510 app_,
511 &org::xrpl::rpc::v1::XRPLedgerAPIService::AsyncService::
512 RequestGetLedgerData,
514 &org::xrpl::rpc::v1::XRPLedgerAPIService::Stub::GetLedgerData,
518 }
519 {
520 using cd = CallData<
521 org::xrpl::rpc::v1::GetLedgerDiffRequest,
522 org::xrpl::rpc::v1::GetLedgerDiffResponse>;
523
524 addToRequests(std::make_shared<cd>(
525 service_,
526 *cq_,
527 app_,
528 &org::xrpl::rpc::v1::XRPLedgerAPIService::AsyncService::
529 RequestGetLedgerDiff,
531 &org::xrpl::rpc::v1::XRPLedgerAPIService::Stub::GetLedgerDiff,
535 }
536 {
537 using cd = CallData<
538 org::xrpl::rpc::v1::GetLedgerEntryRequest,
539 org::xrpl::rpc::v1::GetLedgerEntryResponse>;
540
541 addToRequests(std::make_shared<cd>(
542 service_,
543 *cq_,
544 app_,
545 &org::xrpl::rpc::v1::XRPLedgerAPIService::AsyncService::
546 RequestGetLedgerEntry,
548 &org::xrpl::rpc::v1::XRPLedgerAPIService::Stub::GetLedgerEntry,
552 }
553 return requests;
554}
555
556bool
558{
559 // if config does not specify a grpc server address, don't start
560 if (serverAddress_.empty())
561 return false;
562
563 JLOG(journal_.info()) << "Starting gRPC server at " << serverAddress_;
564
565 grpc::ServerBuilder builder;
566
567 // Listen on the given address without any authentication mechanism.
568 // Actually binded port will be returned into "port" variable.
569 int port = 0;
570 builder.AddListeningPort(
571 serverAddress_, grpc::InsecureServerCredentials(), &port);
572 // Register "service_" as the instance through which we'll communicate with
573 // clients. In this case it corresponds to an *asynchronous* service.
574 builder.RegisterService(&service_);
575 // Get hold of the completion queue used for the asynchronous communication
576 // with the gRPC runtime.
577 cq_ = builder.AddCompletionQueue();
578 // Finally assemble the server.
579 server_ = builder.BuildAndStart();
580 serverPort_ = static_cast<std::uint16_t>(port);
581
582 return static_cast<bool>(serverPort_);
583}
584
585boost::asio::ip::tcp::endpoint
587{
588 std::string const addr =
590 return boost::asio::ip::tcp::endpoint(
591 boost::asio::ip::make_address(addr), serverPort_);
592}
593
594bool
596{
597 // Start the server and setup listeners
598 if (running_ = impl_.start(); running_)
599 {
600 thread_ = std::thread([this]() {
601 // Start the event loop and begin handling requests
602 beast::setCurrentThreadName("rippled: grpc");
603 this->impl_.handleRpcs();
604 });
605 }
606 return running_;
607}
608
609void
611{
612 if (running_)
613 {
614 impl_.shutdown();
615 thread_.join();
616 running_ = false;
617 }
618}
619
621{
622 XRPL_ASSERT(!running_, "ripple::GRPCServer::~GRPCServer : is not running");
623}
624
625boost::asio::ip::tcp::endpoint
627{
628 return impl_.getEndpoint();
629}
630
631} // namespace ripple
T back(T... args)
T begin(T... args)
constexpr const char * c_str() const
Definition: json_value.h:75
static std::optional< Endpoint > from_string_checked(std::string const &s)
Create an Endpoint from a string.
Definition: IPEndpoint.cpp:45
Stream error() const
Definition: Journal.h:346
Stream debug() const
Definition: Journal.h:328
Stream info() const
Definition: Journal.h:334
Stream trace() const
Severity stream access functions.
Definition: Journal.h:322
virtual Config & config()=0
virtual beast::Journal journal(std::string const &name)=0
virtual JobQueue & getJobQueue()=0
virtual Resource::Manager & getResourceManager()=0
virtual NetworkOPs & getOPs()=0
virtual LedgerMaster & getLedgerMaster()=0
bool exists(std::string const &name) const
Returns true if a section with the given name exists.
Section & section(std::string const &name)
Returns the section with the given name.
Resource::Consumer getUsage()
Definition: GRPCServer.cpp:309
std::optional< std::string > getUser()
Definition: GRPCServer.cpp:242
grpc::ServerCompletionQueue & cq_
Definition: GRPCServer.h:160
grpc::ServerContext ctx_
Definition: GRPCServer.h:165
Resource::Charge getLoadType()
Definition: GRPCServer.cpp:225
void setIsUnlimited(Response &response, bool isUnlimited)
Definition: GRPCServer.cpp:293
std::optional< boost::asio::ip::address > getClientIpAddress()
Definition: GRPCServer.cpp:258
BindListener< Request, Response > bindListener_
Definition: GRPCServer.h:183
grpc::ServerAsyncResponseWriter< Response > responder_
Definition: GRPCServer.h:180
Role getRole(bool isUnlimited)
Definition: GRPCServer.cpp:232
std::shared_ptr< Processor > clone() override
Definition: GRPCServer.cpp:88
CallData(org::xrpl::rpc::v1::XRPLedgerAPIService::AsyncService &service, grpc::ServerCompletionQueue &cq, Application &app, BindListener< Request, Response > bindListener, Handler< Request, Response > handler, Forward< Request, Response > forward, RPC::Condition requiredCondition, Resource::Charge loadType, std::vector< boost::asio::ip::address > const &secureGatewayIPs)
Definition: GRPCServer.cpp:59
std::optional< boost::asio::ip::tcp::endpoint > getClientEndpoint()
Definition: GRPCServer.cpp:268
org::xrpl::rpc::v1::XRPLedgerAPIService::AsyncService & service_
Definition: GRPCServer.h:157
virtual void process() override
Definition: GRPCServer.cpp:104
virtual bool isFinished() override
Definition: GRPCServer.cpp:218
std::string serverAddress_
Definition: GRPCServer.h:84
Application & app_
Definition: GRPCServer.h:82
std::vector< std::shared_ptr< Processor > > setupListeners()
Definition: GRPCServer.cpp:477
std::unique_ptr< grpc::Server > server_
Definition: GRPCServer.h:80
std::unique_ptr< grpc::ServerCompletionQueue > cq_
Definition: GRPCServer.h:73
org::xrpl::rpc::v1::XRPLedgerAPIService::AsyncService service_
Definition: GRPCServer.h:78
GRPCServerImpl(Application &app)
Definition: GRPCServer.cpp:318
static unsigned constexpr apiVersion
Definition: GRPCServer.h:110
boost::asio::ip::tcp::endpoint getEndpoint() const
Definition: GRPCServer.cpp:586
std::vector< boost::asio::ip::address > secureGatewayIPs_
Definition: GRPCServer.h:87
std::uint16_t serverPort_
Definition: GRPCServer.h:85
beast::Journal journal_
Definition: GRPCServer.h:89
std::thread thread_
Definition: GRPCServer.h:324
boost::asio::ip::tcp::endpoint getEndpoint() const
Definition: GRPCServer.cpp:626
GRPCServerImpl impl_
Definition: GRPCServer.h:323
std::shared_ptr< InfoSub > pointer
Definition: InfoSub.h:53
std::shared_ptr< Coro > postCoro(JobType t, std::string const &name, F &&f)
Creates a coroutine and adds a job to the queue which will run it.
Definition: JobQueue.h:410
A consumption charge.
Definition: Charge.h:31
An endpoint that consumes resources.
Definition: Consumer.h:35
virtual Consumer newInboundEndpoint(beast::IP::Endpoint const &address)=0
Create a new endpoint keyed by inbound IP address or the forwarded IP if proxied.
Holds a collection of configuration values.
Definition: BasicConfig.h:45
std::optional< T > get(std::string const &name) const
Definition: BasicConfig.h:140
T emplace_back(T... args)
T empty(T... args)
T end(T... args)
T find_first_of(T... args)
T find_if(T... args)
T find_last_of(T... args)
T getline(T... args)
T join(T... args)
boost::asio::ip::tcp::endpoint to_asio_endpoint(Endpoint const &endpoint)
Convert to asio::ip::tcp::endpoint.
Endpoint from_asio(boost::asio::ip::address const &address)
Convert to Endpoint.
void setCurrentThreadName(std::string_view newThreadName)
Changes the name of the caller thread.
@ NO_CONDITION
Definition: Handler.h:39
error_code_i conditionMet(Condition condition_required, T &context)
Definition: Handler.h:80
ErrorInfo const & get_error_info(error_code_i code)
Returns an ErrorInfo that reflects the error code.
Definition: ErrorCodes.cpp:177
Charge const feeMediumBurdenRPC
Use hash_* containers for keys that do not need a cryptographically secure hashing algorithm.
Definition: algorithm.h:26
error_code_i
Definition: ErrorCodes.h:40
@ rpcSUCCESS
Definition: ErrorCodes.h:44
bool isUnlimited(Role const &role)
ADMIN and IDENTIFIED roles shall have unlimited resources.
Definition: Role.cpp:125
std::pair< org::xrpl::rpc::v1::GetLedgerResponse, grpc::Status > doLedgerGrpc(RPC::GRPCContext< org::xrpl::rpc::v1::GetLedgerRequest > &context)
std::pair< org::xrpl::rpc::v1::GetLedgerEntryResponse, grpc::Status > doLedgerEntryGrpc(RPC::GRPCContext< org::xrpl::rpc::v1::GetLedgerEntryRequest > &context)
void erase(STObject &st, TypedField< U > const &f)
Remove a field in an STObject.
Definition: STExchange.h:172
Role
Indicates the level of administrative permission to grant.
Definition: Role.h:44
@ jtRPC
Definition: Job.h:51
std::pair< org::xrpl::rpc::v1::GetLedgerDiffResponse, grpc::Status > doLedgerDiffGrpc(RPC::GRPCContext< org::xrpl::rpc::v1::GetLedgerDiffRequest > &context)
Definition: LedgerDiff.cpp:6
std::pair< org::xrpl::rpc::v1::GetLedgerDataResponse, grpc::Status > doLedgerDataGrpc(RPC::GRPCContext< org::xrpl::rpc::v1::GetLedgerDataRequest > &context)
Definition: LedgerData.cpp:135
STL namespace.
T pop_back(T... args)
T push_back(T... args)
T stoi(T... args)
T str(T... args)
Maps an rpc error code to its token, default message, and HTTP status.
Definition: ErrorCodes.h:179
Json::StaticString message
Definition: ErrorCodes.h:211
T substr(T... args)
T value(T... args)
T what(T... args)