text
string
size
int64
token_count
int64
#include <stdio.h> #include <sstream> #include "ServiceStatusInfo.h" #include "GetServiceStatusMessage.h" #include "ServiceStatusManager.h" #include "curl/curl.h" #include "jsoncpp/json.h" #include "libmicrohttpd/microhttpd.h" #include "log4cplus/version.h" #include "cryptopp/cryptlib.h" #define _STR(x) _VAL(x) #define _VAL(x) #x ServiceStatusManager::ServiceStatusManager(MessageRoute *messageRoute) :BaseProcess(messageRoute, "ServiceStatusManager") { // 初始化状态信息 initServiceStatusInfo(); // 获取第三方库信息 initLibraryVersionInfo(); // 订阅消息 subscribeMessage(Service_Status_Message); } // 初始化服务的状态信息 void ServiceStatusManager::initServiceStatusInfo() { // 服务的状态信息 mServiceStatusInfo = std::make_shared<ServiceStatusInfo>(); mServiceStatusInfo->setStartupTime(QDateTime::currentDateTime()); #ifdef GIT_BRANCH mServiceStatusInfo->setGitBranch(_STR(GIT_BRANCH)); #endif #ifdef GIT_COMMIT_ID mServiceStatusInfo->setGitCommitId(_STR(GIT_COMMIT_ID)); #endif } // 初始化模块的版本信息 void ServiceStatusManager::initLibraryVersionInfo() { std::string curlVersion = LIBCURL_VERSION; std::string jsoncppVersion = JSONCPP_VERSION_STRING; std::string libmicrohttpdVersion = MHD_get_version(); std::string log4cplusVersion = LOG4CPLUS_VERSION_STR; std::string qtVersion = qVersion(); mServiceStatusInfo->setLibraryVersion("curl", curlVersion); mServiceStatusInfo->setLibraryVersion("jsoncpp", jsoncppVersion); mServiceStatusInfo->setLibraryVersion("libmicrohttpd", libmicrohttpdVersion); mServiceStatusInfo->setLibraryVersion("log4cplus", log4cplusVersion); mServiceStatusInfo->setLibraryVersion("Qt", qtVersion); } bool ServiceStatusManager::init() { LOG_I(mClassName, "init module " << getModuleName()); // 打印git信息 LOG_I(mClassName, "git info:" << mServiceStatusInfo->gitInfoToString()); // 打印库信息 LOG_I(mClassName, "library version info:" << mServiceStatusInfo->libraryInfoToString()); return true; } void ServiceStatusManager::beginWork() { LOG_I(mClassName, "begin work, module " << getModuleName()); } // 卸载模块 void ServiceStatusManager::uninit() { LOG_I(mClassName, "begin uninit"); BaseProcess::uninit(); LOG_I(mClassName, "end uninit"); } // 处理消息的函数 std::shared_ptr<BaseResponse> ServiceStatusManager::onProcessMessage(std::shared_ptr<BaseMessage> &message) { std::shared_ptr<BaseResponse> response; switch(message->getMessageType()) { case Service_Status_Message: // 获取服务状态信息 response = onProcessGetServiceStatusMessage(message); } return response; } // 偷窥消息的处理函数 bool ServiceStatusManager::onForeseeMessage(std::shared_ptr<BaseMessage> &message) { return false; } // 处理消息的回应 void ServiceStatusManager::onProcessResponse(std::shared_ptr<BaseResponse> &response) { } // 处理获取服务状态消息 std::shared_ptr<BaseResponse> ServiceStatusManager::onProcessGetServiceStatusMessage(std::shared_ptr<BaseMessage> &message) { std::shared_ptr<GetServiceStatusResponse> response = std::make_shared<GetServiceStatusResponse>(mServiceStatusInfo, message, Common::noError()); return response; }
3,146
1,091
// // This source file is part of appleseed. // Visit https://appleseedhq.net/ for additional information and resources. // // This software is released under the MIT license. // // Copyright (c) 2016-2018 Esteban Tovagliari, The appleseedhq Organization // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // // Interface header. #include "attributeutils.h" // Maya headers. #include "appleseedmaya/_beginmayaheaders.h" #include <maya/MFnMatrixData.h> #include "appleseedmaya/_endmayaheaders.h" namespace { template <typename T> MStatus get3(const MPlug& plug, T& x, T& y, T& z) { if (!plug.isCompound()) return MS::kFailure; if (plug.numChildren() != 3) return MS::kFailure; MStatus status; if (status) status = plug.child(0).getValue(x); if (status) status = plug.child(1).getValue(y); if (status) status = plug.child(2).getValue(z); return status; } } namespace AttributeUtils { MStatus get(const MPlug& plug, MAngle& value) { return plug.getValue(value); } MStatus get(const MPlug& plug, MColor& value) { value.a = 1.0f; return get3(plug, value.r, value.g, value.b); } MStatus get(const MPlug& plug, MPoint& value) { return get3(plug, value.x, value.y, value.z); } MStatus get(const MPlug& plug, MVector& value) { return get3(plug, value.x, value.y, value.z); } MStatus get(const MPlug& plug, MMatrix& value) { value.setToIdentity(); MObject matrixObject; MStatus status = plug.getValue(matrixObject); if (!status) return status; MFnMatrixData matrixDataFn(matrixObject); value = matrixDataFn.matrix(&status); return status; } MStatus getPlugConnectedTo(const MPlug& dstPlug, MPlug& srcPlug) { if (!dstPlug.isConnected()) return MS::kFailure; MStatus status; MPlugArray inputConnections; dstPlug.connectedTo(inputConnections, true, false, &status); if (status) { if (inputConnections.length() == 0) return MS::kFailure; srcPlug = inputConnections[0]; } return status; } bool hasConnections(const MPlug& plug, bool input) { MStatus status; if (!plug.isConnected(&status)) return false; MPlugArray connections; plug.connectedTo( connections, input ? true : false, input ? false : true, &status); if (status) return connections.length() != 0; return false; } bool anyChildPlugConnected(const MPlug& plug, bool input) { MStatus status; if (!plug.isCompound(&status)) return false; if (!status) return false; int numChildren = plug.numChildren(&status); if (!status) return false; for (int i = 0, e = plug.numChildren(); i < e; ++i) { MPlug c = plug.child(i, &status); if (!status) continue; if (hasConnections(c, input)) return true; } return false; } MStatus makeInput(MFnAttribute& attr) { attr.setStorable(true); attr.setReadable(false); attr.setWritable(true); attr.setKeyable(true); return MS::kSuccess; } MStatus makeOutput(MFnAttribute& attr) { attr.setStorable(false); attr.setReadable(true); attr.setWritable(false); attr.setKeyable(false); //attr.setHidden(true); return MS::kSuccess; } }
4,405
1,496
#include "library/common/extensions/filters/http/platform_bridge/filter.h" #include "envoy/server/filter_config.h" #include "source/common/buffer/buffer_impl.h" #include "source/common/common/assert.h" #include "source/common/common/dump_state_utils.h" #include "source/common/common/scope_tracker.h" #include "source/common/common/utility.h" #include "library/common/api/external.h" #include "library/common/buffer/bridge_fragment.h" #include "library/common/data/utility.h" #include "library/common/extensions/filters/http/platform_bridge/c_type_definitions.h" #include "library/common/http/header_utility.h" #include "library/common/http/headers.h" namespace Envoy { namespace Extensions { namespace HttpFilters { namespace PlatformBridge { namespace { // TODO: https://github.com/envoyproxy/envoy-mobile/issues/1287 void replaceHeaders(Http::HeaderMap& headers, envoy_headers c_headers) { headers.clear(); for (envoy_map_size_t i = 0; i < c_headers.length; i++) { headers.addCopy(Http::LowerCaseString(Data::Utility::copyToString(c_headers.entries[i].key)), Data::Utility::copyToString(c_headers.entries[i].value)); } // The C envoy_headers struct can be released now because the headers have been copied. release_envoy_headers(c_headers); } } // namespace static void envoy_filter_release_callbacks(const void* context) { PlatformBridgeFilterWeakPtr* weak_filter = static_cast<PlatformBridgeFilterWeakPtr*>(const_cast<void*>(context)); delete weak_filter; } static void envoy_filter_callback_resume_decoding(const void* context) { PlatformBridgeFilterWeakPtr* weak_filter = static_cast<PlatformBridgeFilterWeakPtr*>(const_cast<void*>(context)); if (auto filter = weak_filter->lock()) { filter->resumeDecoding(); } } static void envoy_filter_callback_resume_encoding(const void* context) { PlatformBridgeFilterWeakPtr* weak_filter = static_cast<PlatformBridgeFilterWeakPtr*>(const_cast<void*>(context)); if (auto filter = weak_filter->lock()) { filter->resumeEncoding(); } } static void envoy_filter_reset_idle(const void* context) { PlatformBridgeFilterWeakPtr* weak_filter = static_cast<PlatformBridgeFilterWeakPtr*>(const_cast<void*>(context)); if (auto filter = weak_filter->lock()) { filter->resetIdleTimer(); } } PlatformBridgeFilterConfig::PlatformBridgeFilterConfig( const envoymobile::extensions::filters::http::platform_bridge::PlatformBridge& proto_config) : filter_name_(proto_config.platform_filter_name()), platform_filter_(static_cast<envoy_http_filter*>( Api::External::retrieveApi(proto_config.platform_filter_name()))) {} PlatformBridgeFilter::PlatformBridgeFilter(PlatformBridgeFilterConfigSharedPtr config, Event::Dispatcher& dispatcher) : dispatcher_(dispatcher), filter_name_(config->filter_name()), platform_filter_(*config->platform_filter()) { // The initialization above sets platform_filter_ to a copy of the struct stored on the config. // In the typical case, this will represent a filter implementation that needs to be intantiated. // static_context will contain the necessary platform-specific mechanism to produce a filter // instance. instance_context will initially be null, but after initialization, set to the // context needed for actual filter invocations. ENVOY_LOG(trace, "PlatformBridgeFilter({})::PlatformBridgeFilter", filter_name_); if (platform_filter_.init_filter) { // Set the instance_context to the result of the initialization call. Cleanup will ultimately // occur within the onDestroy() invocation below. ENVOY_LOG(trace, "PlatformBridgeFilter({})->init_filter", filter_name_); platform_filter_.instance_context = platform_filter_.init_filter(&platform_filter_); ASSERT(platform_filter_.instance_context, fmt::format("PlatformBridgeFilter({}): init_filter unsuccessful", filter_name_)); } else { // If init_filter is missing, zero out the rest of the struct for safety. ENVOY_LOG(debug, "PlatformBridgeFilter({}): missing initializer", filter_name_); platform_filter_ = {}; } // Set directional filters now that the platform_filter_ has been updated (initialized or zero'ed // out). request_filter_base_ = std::make_unique<RequestFilterBase>(*this); response_filter_base_ = std::make_unique<ResponseFilterBase>(*this); } void PlatformBridgeFilter::setDecoderFilterCallbacks( Http::StreamDecoderFilterCallbacks& callbacks) { ENVOY_LOG(trace, "PlatformBridgeFilter({})::setDecoderCallbacks", filter_name_); decoder_callbacks_ = &callbacks; // TODO(goaway): currently both platform APIs unconditionally set this field, meaning that the // heap allocation below occurs when it could be avoided. if (platform_filter_.set_request_callbacks) { platform_request_callbacks_.resume_iteration = envoy_filter_callback_resume_decoding; platform_request_callbacks_.reset_idle = envoy_filter_reset_idle; platform_request_callbacks_.release_callbacks = envoy_filter_release_callbacks; // We use a weak_ptr wrapper for the filter to ensure presence before dispatching callbacks. // The weak_ptr is heap-allocated, because it must be managed (and eventually released) by // platform code. platform_request_callbacks_.callback_context = new PlatformBridgeFilterWeakPtr{shared_from_this()}; ENVOY_LOG(trace, "PlatformBridgeFilter({})->set_request_callbacks", filter_name_); platform_filter_.set_request_callbacks(platform_request_callbacks_, platform_filter_.instance_context); } } void PlatformBridgeFilter::setEncoderFilterCallbacks( Http::StreamEncoderFilterCallbacks& callbacks) { ENVOY_LOG(trace, "PlatformBridgeFilter({})::setEncoderCallbacks", filter_name_); encoder_callbacks_ = &callbacks; // TODO(goaway): currently both platform APIs unconditionally set this field, meaning that the // heap allocation below occurs when it could be avoided. if (platform_filter_.set_response_callbacks) { platform_response_callbacks_.resume_iteration = envoy_filter_callback_resume_encoding; platform_response_callbacks_.reset_idle = envoy_filter_reset_idle; platform_response_callbacks_.release_callbacks = envoy_filter_release_callbacks; // We use a weak_ptr wrapper for the filter to ensure presence before dispatching callbacks. // The weak_ptr is heap-allocated, because it must be managed (and eventually released) by // platform code. platform_response_callbacks_.callback_context = new PlatformBridgeFilterWeakPtr{shared_from_this()}; ENVOY_LOG(trace, "PlatformBridgeFilter({})->set_response_callbacks", filter_name_); platform_filter_.set_response_callbacks(platform_response_callbacks_, platform_filter_.instance_context); } } void PlatformBridgeFilter::onDestroy() { ENVOY_LOG(trace, "PlatformBridgeFilter({})::onDestroy", filter_name_); alive_ = false; // If the filter chain is destroyed before a response is received, treat as cancellation. if (!response_filter_base_->state_.stream_complete_ && platform_filter_.on_cancel) { ENVOY_LOG(trace, "PlatformBridgeFilter({})->on_cancel", filter_name_); platform_filter_.on_cancel(platform_filter_.instance_context); } // Allow nullptr as no-op only if nothing was initialized. if (platform_filter_.release_filter == nullptr) { ASSERT(!platform_filter_.instance_context, fmt::format("PlatformBridgeFilter({}): release_filter required", filter_name_)); return; } ENVOY_LOG(trace, "PlatformBridgeFilter({})->release_filter", filter_name_); platform_filter_.release_filter(platform_filter_.instance_context); platform_filter_.instance_context = nullptr; } void PlatformBridgeFilter::dumpState(std::ostream& os, int indent_level) const { std::stringstream ss; const char* spaces = spacesForLevel(indent_level); ss << spaces << "PlatformBridgeFilter" << DUMP_MEMBER(filter_name_) << DUMP_MEMBER(error_response_) << std::endl; const char* inner_spaces = spacesForLevel(indent_level + 1); if (request_filter_base_) { ss << inner_spaces << "Request Filter"; request_filter_base_->dumpState(ss, 0); } if (response_filter_base_) { ss << inner_spaces << "Response Filter"; response_filter_base_->dumpState(ss, 0); } // TODO(junr03): only output to ostream arg // https://github.com/envoyproxy/envoy-mobile/issues/1497. ENVOY_LOG(error, "\n{}", ss.str()); os << ss.str(); } Http::FilterHeadersStatus PlatformBridgeFilter::FilterBase::onHeaders(Http::HeaderMap& headers, bool end_stream) { ScopeTrackerScopeState scope(&parent_, parent_.scopeTracker()); state_.stream_complete_ = end_stream; // Allow nullptr to act as no-op. if (on_headers_ == nullptr) { state_.headers_forwarded_ = true; return Http::FilterHeadersStatus::Continue; } envoy_headers in_headers = Http::Utility::toBridgeHeaders(headers); ENVOY_LOG(trace, "PlatformBridgeFilter({})->on_*_headers", parent_.filter_name_); envoy_filter_headers_status result = on_headers_(in_headers, end_stream, parent_.platform_filter_.instance_context); state_.on_headers_called_ = true; switch (result.status) { case kEnvoyFilterHeadersStatusContinue: replaceHeaders(headers, result.headers); state_.headers_forwarded_ = true; return Http::FilterHeadersStatus::Continue; case kEnvoyFilterHeadersStatusStopIteration: pending_headers_ = &headers; state_.iteration_state_ = IterationState::Stopped; ASSERT(result.headers.length == 0 && result.headers.entries == NULL); return Http::FilterHeadersStatus::StopIteration; default: PANIC("invalid filter state: unsupported status for platform filters"); } NOT_REACHED_GCOVR_EXCL_LINE; } Http::FilterDataStatus PlatformBridgeFilter::FilterBase::onData(Buffer::Instance& data, bool end_stream) { ScopeTrackerScopeState scope(&parent_, parent_.scopeTracker()); state_.stream_complete_ = end_stream; // Allow nullptr to act as no-op. if (on_data_ == nullptr) { state_.data_forwarded_ = true; return Http::FilterDataStatus::Continue; } auto internal_buffer = buffer(); envoy_data in_data; // Decide whether to preemptively buffer data to present aggregate to platform. bool prebuffer_data = state_.iteration_state_ == IterationState::Stopped && internal_buffer && &data != internal_buffer && internal_buffer->length() > 0; if (prebuffer_data) { internal_buffer->move(data); in_data = Data::Utility::copyToBridgeData(*internal_buffer); } else { in_data = Data::Utility::copyToBridgeData(data); } ENVOY_LOG(trace, "PlatformBridgeFilter({})->on_*_data", parent_.filter_name_); envoy_filter_data_status result = on_data_(in_data, end_stream, parent_.platform_filter_.instance_context); state_.on_data_called_ = true; switch (result.status) { case kEnvoyFilterDataStatusContinue: RELEASE_ASSERT(state_.iteration_state_ != IterationState::Stopped, "invalid filter state: filter iteration must be resumed with ResumeIteration"); data.drain(data.length()); data.addBufferFragment(*Buffer::BridgeFragment::createBridgeFragment(result.data)); state_.data_forwarded_ = true; return Http::FilterDataStatus::Continue; case kEnvoyFilterDataStatusStopIterationAndBuffer: if (prebuffer_data) { // Data will already have been added to the internal buffer (above). return Http::FilterDataStatus::StopIterationNoBuffer; } // Data will be buffered on return. state_.iteration_state_ = IterationState::Stopped; return Http::FilterDataStatus::StopIterationAndBuffer; case kEnvoyFilterDataStatusStopIterationNoBuffer: // In this context all previously buffered data can/should be dropped. If no data has been // buffered, this is a no-op. If data was previously buffered, the most likely case is // that a filter has decided to handle generating a response itself and no longer needs it. // We opt for making this assumption since it's otherwise ambiguous how we should handle // buffering when switching between the two stopped states, and since data can be arbitrarily // interleaved, it's unclear that there's any legitimate case to support any more complex // behavior. if (internal_buffer) { internal_buffer->drain(internal_buffer->length()); } state_.iteration_state_ = IterationState::Stopped; return Http::FilterDataStatus::StopIterationNoBuffer; // Resume previously-stopped iteration, possibly forwarding headers if iteration was stopped // during an on*Headers invocation. case kEnvoyFilterDataStatusResumeIteration: RELEASE_ASSERT(state_.iteration_state_ == IterationState::Stopped, "invalid filter state: ResumeIteration may only be used when filter iteration " "is stopped"); // Update pending henders before resuming iteration, if needed. if (result.pending_headers) { replaceHeaders(*pending_headers_, *result.pending_headers); pending_headers_ = nullptr; free(result.pending_headers); } // We've already moved data into the internal buffer and presented it to the platform. Replace // the internal buffer with any modifications returned by the platform filter prior to // resumption. if (internal_buffer) { internal_buffer->drain(internal_buffer->length()); internal_buffer->addBufferFragment( *Buffer::BridgeFragment::createBridgeFragment(result.data)); } else { data.drain(data.length()); data.addBufferFragment(*Buffer::BridgeFragment::createBridgeFragment(result.data)); } state_.iteration_state_ = IterationState::Ongoing; state_.data_forwarded_ = true; return Http::FilterDataStatus::Continue; default: PANIC("invalid filter state: unsupported status for platform filters"); } NOT_REACHED_GCOVR_EXCL_LINE; } Http::FilterTrailersStatus PlatformBridgeFilter::FilterBase::onTrailers(Http::HeaderMap& trailers) { ScopeTrackerScopeState scope(&parent_, parent_.scopeTracker()); state_.stream_complete_ = true; // Allow nullptr to act as no-op. if (on_trailers_ == nullptr) { state_.trailers_forwarded_ = true; return Http::FilterTrailersStatus::Continue; } auto internal_buffer = buffer(); envoy_headers in_trailers = Http::Utility::toBridgeHeaders(trailers); ENVOY_LOG(trace, "PlatformBridgeFilter({})->on_*_trailers", parent_.filter_name_); envoy_filter_trailers_status result = on_trailers_(in_trailers, parent_.platform_filter_.instance_context); state_.on_trailers_called_ = true; switch (result.status) { case kEnvoyFilterTrailersStatusContinue: RELEASE_ASSERT(state_.iteration_state_ != IterationState::Stopped, "invalid filter state: ResumeIteration may only be used when filter iteration " "is stopped"); replaceHeaders(trailers, result.trailers); state_.trailers_forwarded_ = true; return Http::FilterTrailersStatus::Continue; case kEnvoyFilterTrailersStatusStopIteration: pending_trailers_ = &trailers; state_.iteration_state_ = IterationState::Stopped; ASSERT(result.trailers.length == 0 && result.trailers.entries == NULL); return Http::FilterTrailersStatus::StopIteration; // Resume previously-stopped iteration, possibly forwarding headers and data if iteration was // stopped during an on*Headers or on*Data invocation. case kEnvoyFilterTrailersStatusResumeIteration: RELEASE_ASSERT(state_.iteration_state_ == IterationState::Stopped, "invalid filter state: ResumeIteration may only be used when filter iteration " "is stopped"); // Update pending henders before resuming iteration, if needed. if (result.pending_headers) { replaceHeaders(*pending_headers_, *result.pending_headers); pending_headers_ = nullptr; free(result.pending_headers); } // We've already moved data into the internal buffer and presented it to the platform. Replace // the internal buffer with any modifications returned by the platform filter prior to // resumption. if (result.pending_data) { internal_buffer->drain(internal_buffer->length()); internal_buffer->addBufferFragment( *Buffer::BridgeFragment::createBridgeFragment(*result.pending_data)); free(result.pending_data); } replaceHeaders(trailers, result.trailers); state_.iteration_state_ = IterationState::Ongoing; state_.trailers_forwarded_ = true; return Http::FilterTrailersStatus::Continue; default: PANIC("invalid filter state: unsupported status for platform filters"); } NOT_REACHED_GCOVR_EXCL_LINE; } Http::FilterHeadersStatus PlatformBridgeFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool end_stream) { ENVOY_LOG(trace, "PlatformBridgeFilter({})::decodeHeaders(end_stream:{})", filter_name_, end_stream); // Delegate to base implementation for request and response path. return request_filter_base_->onHeaders(headers, end_stream); } Http::FilterHeadersStatus PlatformBridgeFilter::encodeHeaders(Http::ResponseHeaderMap& headers, bool end_stream) { ENVOY_LOG(trace, "PlatformBridgeFilter({})::encodeHeaders(end_stream:{})", filter_name_, end_stream); // Presence of internal error header indicates an error that should be surfaced as an // error callback (rather than an HTTP response). const auto error_code_header = headers.get(Http::InternalHeaders::get().ErrorCode); if (error_code_header.empty()) { // No error, so delegate to base implementation for request and response path. return response_filter_base_->onHeaders(headers, end_stream); } // Update stream state, since we won't be delegating to FilterBase. response_filter_base_->state_.stream_complete_ = end_stream; error_response_ = true; envoy_error_code_t error_code; bool parsed_code = absl::SimpleAtoi(error_code_header[0]->value().getStringView(), &error_code); RELEASE_ASSERT(parsed_code, "parse error reading error code"); envoy_data error_message = envoy_nodata; const auto error_message_header = headers.get(Http::InternalHeaders::get().ErrorMessage); if (!error_message_header.empty()) { error_message = Data::Utility::copyToBridgeData(error_message_header[0]->value().getStringView()); } int32_t attempt_count = 1; if (headers.EnvoyAttemptCount()) { bool parsed_attempts = absl::SimpleAtoi(headers.EnvoyAttemptCount()->value().getStringView(), &attempt_count); RELEASE_ASSERT(parsed_attempts, "parse error reading attempt count"); } if (platform_filter_.on_error) { platform_filter_.on_error({error_code, error_message, attempt_count}, platform_filter_.instance_context); } else { release_envoy_data(error_message); } response_filter_base_->state_.headers_forwarded_ = true; return Http::FilterHeadersStatus::Continue; } Http::FilterDataStatus PlatformBridgeFilter::decodeData(Buffer::Instance& data, bool end_stream) { ENVOY_LOG(trace, "PlatformBridgeFilter({})::decodeData(length:{}, end_stream:{})", filter_name_, data.length(), end_stream); // Delegate to base implementation for request and response path. return request_filter_base_->onData(data, end_stream); } Http::FilterDataStatus PlatformBridgeFilter::encodeData(Buffer::Instance& data, bool end_stream) { ENVOY_LOG(trace, "PlatformBridgeFilter({})::encodeData(length:{}, end_stream:{})", filter_name_, data.length(), end_stream); // Pass through if already mapped to error response. if (error_response_) { response_filter_base_->state_.data_forwarded_ = true; return Http::FilterDataStatus::Continue; } // Delegate to base implementation for request and response path. return response_filter_base_->onData(data, end_stream); } Http::FilterTrailersStatus PlatformBridgeFilter::decodeTrailers(Http::RequestTrailerMap& trailers) { ENVOY_LOG(trace, "PlatformBridgeFilter({})::decodeTrailers", filter_name_); // Delegate to base implementation for request and response path. return request_filter_base_->onTrailers(trailers); } Http::FilterTrailersStatus PlatformBridgeFilter::encodeTrailers(Http::ResponseTrailerMap& trailers) { ENVOY_LOG(trace, "PlatformBridgeFilter({})::encodeTrailers", filter_name_); // Pass through if already mapped to error response. if (error_response_) { response_filter_base_->state_.trailers_forwarded_ = true; return Http::FilterTrailersStatus::Continue; } // Delegate to base implementation for request and response path. return response_filter_base_->onTrailers(trailers); } void PlatformBridgeFilter::resumeDecoding() { ENVOY_LOG(trace, "PlatformBridgeFilter({})::resumeDecoding", filter_name_); auto weak_self = weak_from_this(); // TODO(goaway): There's a potential shutdown race here, due to the fact that the shared // reference that now holds the filter does not retain the dispatcher. In the future we should // make this safer by, e.g.: // 1) adding support to Envoy for (optionally) retaining the dispatcher, or // 2) retaining the engine to transitively retain the dispatcher via Envoy's ownership graph, or // 3) dispatching via a safe intermediary // Relevant: https://github.com/lyft/envoy-mobile/issues/332 dispatcher_.post([weak_self]() -> void { if (auto self = weak_self.lock()) { // Delegate to base implementation for request and response path. self->request_filter_base_->onResume(); } }); } void PlatformBridgeFilter::resumeEncoding() { ENVOY_LOG(trace, "PlatformBridgeFilter({})::resumeEncoding", filter_name_); auto weak_self = weak_from_this(); dispatcher_.post([weak_self]() -> void { if (auto self = weak_self.lock()) { // Delegate to base implementation for request and response path. self->response_filter_base_->onResume(); } }); } void PlatformBridgeFilter::resetIdleTimer() { ENVOY_LOG(trace, "PlatformBridgeFilter({})::resetIdleTimer", filter_name_); auto weak_self = weak_from_this(); dispatcher_.post([weak_self]() -> void { if (auto self = weak_self.lock()) { // Stream idle timeout is nondirectional. self->decoder_callbacks_->resetIdleTimer(); } }); } void PlatformBridgeFilter::FilterBase::onResume() { ScopeTrackerScopeState scope(&parent_, parent_.scopeTracker()); ENVOY_LOG(debug, "PlatformBridgeFilter({})::onResume", parent_.filter_name_); if (!parent_.isAlive()) { return; } if (state_.iteration_state_ == IterationState::Ongoing) { return; } auto internal_buffer = buffer(); envoy_headers bridged_headers; envoy_data bridged_data; envoy_headers bridged_trailers; envoy_headers* pending_headers = nullptr; envoy_data* pending_data = nullptr; envoy_headers* pending_trailers = nullptr; if (pending_headers_) { bridged_headers = Http::Utility::toBridgeHeaders(*pending_headers_); pending_headers = &bridged_headers; } if (internal_buffer) { bridged_data = Data::Utility::copyToBridgeData(*internal_buffer); pending_data = &bridged_data; } if (pending_trailers_) { bridged_trailers = Http::Utility::toBridgeHeaders(*pending_trailers_); pending_trailers = &bridged_trailers; } ENVOY_LOG(trace, "PlatformBridgeFilter({})->on_resume_*", parent_.filter_name_); envoy_filter_resume_status result = on_resume_(pending_headers, pending_data, pending_trailers, state_.stream_complete_, parent_.platform_filter_.instance_context); state_.on_resume_called_ = true; if (result.status == kEnvoyFilterResumeStatusStopIteration) { RELEASE_ASSERT(!result.pending_headers, "invalid filter state: headers must not be present on " "stopping filter iteration on async resume"); RELEASE_ASSERT(!result.pending_data, "invalid filter state: data must not be present on " "stopping filter iteration on async resume"); RELEASE_ASSERT(!result.pending_trailers, "invalid filter state: trailers must not be present on" " stopping filter iteration on async resume"); return; } if (pending_headers_) { RELEASE_ASSERT(result.pending_headers, "invalid filter state: headers are pending and must be " "returned to resume filter iteration"); replaceHeaders(*pending_headers_, *result.pending_headers); pending_headers_ = nullptr; ENVOY_LOG(debug, "PlatformBridgeFilter({})->on_resume_ process headers free#1", parent_.filter_name_); if (pending_headers != result.pending_headers) { free(result.pending_headers); } } if (internal_buffer) { RELEASE_ASSERT( result.pending_data, "invalid filter state: data is pending and must be returned to resume filter iteration"); internal_buffer->drain(internal_buffer->length()); internal_buffer->addBufferFragment( *Buffer::BridgeFragment::createBridgeFragment(*result.pending_data)); ENVOY_LOG(debug, "PlatformBridgeFilter({})->on_resume_ process data free#1", parent_.filter_name_); if (pending_data != result.pending_data) { free(result.pending_data); } } else if (result.pending_data) { addData(*result.pending_data); ENVOY_LOG(debug, "PlatformBridgeFilter({})->on_resume_ process data free#2", parent_.filter_name_); if (pending_data != result.pending_data) { free(result.pending_data); } } if (pending_trailers_) { RELEASE_ASSERT(result.pending_trailers, "invalid filter state: trailers are pending and must " "be returned to resume filter iteration"); replaceHeaders(*pending_trailers_, *result.pending_trailers); pending_trailers_ = nullptr; ENVOY_LOG(debug, "PlatformBridgeFilter({})->on_resume_ process trailers free#1", parent_.filter_name_); if (pending_trailers != result.pending_trailers) { free(result.pending_trailers); } } else if (result.pending_trailers) { addTrailers(*result.pending_trailers); ENVOY_LOG(debug, "PlatformBridgeFilter({})->on_resume_ process trailers free#2", parent_.filter_name_); if (pending_trailers != result.pending_trailers) { free(result.pending_trailers); } } state_.iteration_state_ = IterationState::Ongoing; resumeIteration(); } void PlatformBridgeFilter::FilterBase::dumpState(std::ostream& os, int indent_level) { Buffer::Instance* buffer = this->buffer(); const char* spaces = spacesForLevel(indent_level); os << spaces << DUMP_MEMBER_AS(state_.iteration_state_, (state_.iteration_state_ == IterationState::Ongoing ? "ongoing" : "stopped")) << DUMP_MEMBER(state_.on_headers_called_) << DUMP_MEMBER(state_.headers_forwarded_) << DUMP_MEMBER(state_.on_data_called_) << DUMP_MEMBER(state_.data_forwarded_) << DUMP_MEMBER(state_.on_trailers_called_) << DUMP_MEMBER(state_.trailers_forwarded_) << DUMP_MEMBER(state_.on_resume_called_) << DUMP_NULLABLE_MEMBER(pending_headers_, "pending") << DUMP_NULLABLE_MEMBER(buffer, fmt::format("{} bytes", buffer->length())) << DUMP_NULLABLE_MEMBER(pending_trailers_, "pending") << DUMP_MEMBER(state_.stream_complete_) << std::endl; }; void PlatformBridgeFilter::RequestFilterBase::addData(envoy_data data) { Buffer::OwnedImpl inject_data; inject_data.addBufferFragment(*Buffer::BridgeFragment::createBridgeFragment(data)); parent_.decoder_callbacks_->addDecodedData(inject_data, /* watermark */ false); } void PlatformBridgeFilter::ResponseFilterBase::addData(envoy_data data) { Buffer::OwnedImpl inject_data; inject_data.addBufferFragment(*Buffer::BridgeFragment::createBridgeFragment(data)); parent_.encoder_callbacks_->addEncodedData(inject_data, /* watermark */ false); } void PlatformBridgeFilter::RequestFilterBase::addTrailers(envoy_headers trailers) { Http::HeaderMap& inject_trailers = parent_.decoder_callbacks_->addDecodedTrailers(); replaceHeaders(inject_trailers, trailers); } void PlatformBridgeFilter::ResponseFilterBase::addTrailers(envoy_headers trailers) { Http::HeaderMap& inject_trailers = parent_.encoder_callbacks_->addEncodedTrailers(); replaceHeaders(inject_trailers, trailers); } void PlatformBridgeFilter::RequestFilterBase::resumeIteration() { parent_.decoder_callbacks_->continueDecoding(); } void PlatformBridgeFilter::ResponseFilterBase::resumeIteration() { parent_.encoder_callbacks_->continueEncoding(); } // Technically-speaking to align with Envoy's internal API this method should take // a closure to execute with the available buffer, but since we control all usage, // this shortcut works for now. Buffer::Instance* PlatformBridgeFilter::RequestFilterBase::buffer() { Buffer::Instance* internal_buffer = nullptr; // This only exists to provide a mutable buffer, and that buffer is only used when iteration is // stopped. We check iteration state here before returning the buffer, to ensure this filter is // the one that stopped iteration. if (state_.iteration_state_ == IterationState::Stopped && parent_.decoder_callbacks_->decodingBuffer()) { parent_.decoder_callbacks_->modifyDecodingBuffer( [&internal_buffer](Buffer::Instance& mutable_buffer) { internal_buffer = &mutable_buffer; }); } return internal_buffer; } // Technically-speaking to align with Envoy's internal API this method should take // a closure to execute with the available buffer, but since we control all usage, // this shortcut works for now. Buffer::Instance* PlatformBridgeFilter::ResponseFilterBase::buffer() { Buffer::Instance* internal_buffer = nullptr; // This only exists to provide a mutable buffer, and that buffer is only used when iteration is // stopped. We check iteration state here before returning the buffer, to ensure this filter is // the one that stopped iteration. if (state_.iteration_state_ == IterationState::Stopped && parent_.encoder_callbacks_->encodingBuffer()) { parent_.encoder_callbacks_->modifyEncodingBuffer( [&internal_buffer](Buffer::Instance& mutable_buffer) { internal_buffer = &mutable_buffer; }); } return internal_buffer; } } // namespace PlatformBridge } // namespace HttpFilters } // namespace Extensions } // namespace Envoy
31,000
9,148
/* This file is part of Nori, a simple educational ray tracer Copyright (c) 2015 by Wenzel Jakob Nori is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License Version 3 as published by the Free Software Foundation. Nori is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <nori/accel.h> #include <Eigen/Geometry> #include <chrono> using namespace std::chrono; NORI_NAMESPACE_BEGIN void Accel::addMesh(Mesh *mesh) { if (m_num_meshes >= MAX_NUM_MESHES) throw NoriException("Accel: only %d meshes are supported!", MAX_NUM_MESHES); m_meshes[m_num_meshes] = mesh; m_bbox.expandBy(mesh->getBoundingBox()); m_num_meshes++; } void Accel::build() { if (m_num_meshes == 0) throw NoriException("No mesh found, could not build acceleration structure"); auto start = high_resolution_clock::now(); // delete old hierarchy if present delete m_root; uint32_t num_triangles = 0; for (uint32_t mesh_idx = 0; mesh_idx < m_num_meshes; mesh_idx++) { num_triangles += m_meshes[mesh_idx]->getTriangleCount(); } std::vector<uint32_t> triangles(num_triangles); std::vector<uint32_t> mesh_indices(num_triangles); uint32_t offset = 0; for (uint32_t current_mesh_idx = 0; current_mesh_idx < m_num_meshes; current_mesh_idx++) { uint32_t num_triangles_mesh = m_meshes[current_mesh_idx]->getTriangleCount(); for (uint32_t i = 0; i < num_triangles_mesh; i++) { triangles[offset + i] = i; mesh_indices[offset + i] = current_mesh_idx; } offset += num_triangles_mesh; } m_root = buildRecursive(m_bbox, triangles, mesh_indices, 0); printf("Octree build time: %ldms \n", duration_cast<milliseconds>(high_resolution_clock::now() - start).count()); printf("Num nodes: %d \n", m_num_nodes); printf("Num leaf nodes: %d \n", m_num_leaf_nodes); printf("Num non-empty leaf nodes: %d \n", m_num_nonempty_leaf_nodes); printf("Total number of saved triangles: %d \n", m_num_triangles_saved); printf("Avg triangles per node: %f \n", (float)m_num_triangles_saved / (float)m_num_nodes); printf("Recursion depth: %d \n", m_recursion_depth); } bool Accel::rayIntersect(const Ray3f &ray_, Intersection &its, bool shadowRay) const { bool foundIntersection; // Was an intersection found so far? uint32_t f = (uint32_t) -1; // Triangle index of the closest intersection Ray3f ray(ray_); /// Make a copy of the ray (we will need to update its '.maxt' value) foundIntersection = traverseRecursive(*m_root, ray, its, shadowRay, f); if (shadowRay) return foundIntersection; if (foundIntersection) { /* At this point, we now know that there is an intersection, and we know the triangle index of the closest such intersection. The following computes a number of additional properties which characterize the intersection (normals, texture coordinates, etc..) */ /* Find the barycentric coordinates */ Vector3f bary; bary << 1-its.uv.sum(), its.uv; /* References to all relevant mesh buffers */ const Mesh *mesh = its.mesh; const MatrixXf &V = mesh->getVertexPositions(); const MatrixXf &N = mesh->getVertexNormals(); const MatrixXf &UV = mesh->getVertexTexCoords(); const MatrixXu &F = mesh->getIndices(); /* Vertex indices of the triangle */ uint32_t idx0 = F(0, f), idx1 = F(1, f), idx2 = F(2, f); Point3f p0 = V.col(idx0), p1 = V.col(idx1), p2 = V.col(idx2); its.bary = bary; its.tri_index = Point3f(idx0, idx1, idx2); /* Compute the intersection positon accurately using barycentric coordinates */ its.p = bary.x() * p0 + bary.y() * p1 + bary.z() * p2; /* Compute proper texture coordinates if provided by the mesh */ if (UV.size() > 0) its.uv = bary.x() * UV.col(idx0) + bary.y() * UV.col(idx1) + bary.z() * UV.col(idx2); /* Compute the geometry frame */ its.geoFrame = Frame((p1-p0).cross(p2-p0).normalized()); if (N.size() > 0) { /* Compute the shading frame. Note that for simplicity, the current implementation doesn't attempt to provide tangents that are continuous across the surface. That means that this code will need to be modified to be able use anisotropic BRDFs, which need tangent continuity */ its.shFrame = Frame( (bary.x() * N.col(idx0) + bary.y() * N.col(idx1) + bary.z() * N.col(idx2)).normalized()); } else { its.shFrame = its.geoFrame; } } return foundIntersection; } Accel::Node* Accel::buildRecursive(const BoundingBox3f& bbox, std::vector<uint32_t>& triangle_indices, std::vector<uint32_t>& mesh_indices, uint32_t recursion_depth) { // a node is created in any case m_num_nodes++; uint32_t num_triangles = triangle_indices.size(); // return empty node if no triangles are left if (num_triangles == 0) { Node* node = new Node(); node->bbox = BoundingBox3f(bbox); // add to statistics m_num_leaf_nodes++; return node; } // create leaf node if 10 or less triangles are left or if the max recursion depth is reached. if (num_triangles <= MAX_TRIANGLES_PER_NODE || recursion_depth >= MAX_RECURSION_DEPTH) { Node* node = new Node(); node->num_triangles = num_triangles; node->triangle_indices = new uint32_t[num_triangles]; node->mesh_indices = new uint32_t [num_triangles]; for (uint32_t i = 0; i < num_triangles; i++) { node->triangle_indices[i] = triangle_indices[i]; node->mesh_indices[i] = mesh_indices[i]; } node->bbox = BoundingBox3f(bbox); // add to statistics m_num_leaf_nodes++; m_num_nonempty_leaf_nodes++; m_num_triangles_saved += num_triangles; return node; } // create new parent node Node* node = new Node(); node->bbox = BoundingBox3f(bbox); BoundingBox3f child_bboxes[8] = {}; subdivideBBox(bbox, child_bboxes); std::vector<std::vector<uint32_t>> child_triangle_indices(8); std::vector<std::vector<uint32_t>> child_mesh_indices(8); uint32_t child_num_triangles[8] = {}; // place every triangle in the children it overlaps with // for every child bbox for (uint32_t i = 0; i < 8; i++) { // for every triangle inside of the parent create triangle bounding box for (uint32_t j = 0; j < num_triangles; j++) { // for every triangle vertex expand triangle bbox uint32_t triangle_idx = triangle_indices[j]; uint32_t mesh_idx = mesh_indices[j]; BoundingBox3f triangle_bbox = m_meshes[mesh_idx]->getBoundingBox(triangle_idx); // check if triangle is in bbox, if so put triangle index into triangle list of child if (child_bboxes[i].overlaps(triangle_bbox)) { child_triangle_indices[i].emplace_back(triangle_idx); child_mesh_indices[i].emplace_back(mesh_idx); child_num_triangles[i]++; } } } // release memory to avoid stack overflow triangle_indices = std::vector<uint32_t>(); mesh_indices = std::vector<uint32_t>(); // for every child bbox Node* last_child = nullptr; for (uint32_t i = 0; i < 8; i++) { // first child if (i == 0) { node->child = buildRecursive(child_bboxes[i], child_triangle_indices[i], child_mesh_indices[i], recursion_depth + 1); last_child = node->child; // neighbour children } else { last_child->next = buildRecursive(child_bboxes[i], child_triangle_indices[i], child_mesh_indices[i], recursion_depth + 1); last_child = last_child->next; } m_recursion_depth = std::max(m_recursion_depth, recursion_depth + 1); } return node; } bool Accel::traverseRecursive(const Node& node, Ray3f &ray, Intersection &its, bool shadowRay, uint32_t& hit_idx) const { bool foundIntersection = false; // only check triangles of node and its children if ray intersects with node bbox if (!node.bbox.rayIntersect(ray)) { return false; } // search through all triangles in node for (uint32_t i = 0; i < node.num_triangles; ++i) { float u, v, t; uint32_t triangle_idx = node.triangle_indices[i]; uint32_t mesh_idx = node.mesh_indices[i]; if (m_meshes[mesh_idx]->rayIntersect(triangle_idx, ray, u, v, t) && t < ray.maxt) { /* An intersection was found! Can terminate immediately if this is a shadow ray query */ if (shadowRay) return true; ray.maxt = t; its.t = t; its.uv = Point2f(u, v); its.mesh = m_meshes[mesh_idx]; hit_idx = triangle_idx; foundIntersection = true; } } if (node.child) { std::pair<Node*, float> children[8]; Node* current_child = node.child; int i = 0; do { children[i] = std::pair<Node*, float>(current_child, current_child->bbox.distanceTo(ray.o)); current_child = current_child->next; i++; } while (current_child); std::sort(children, children + 8, [ray](const std::pair<Node*, float>& l, const std::pair<Node*, float>& r) { return l.second < r.second; }); for (auto child: children) { foundIntersection = traverseRecursive(*child.first, ray, its, shadowRay, hit_idx) || foundIntersection; if (shadowRay && foundIntersection) return true; } } return foundIntersection; } void Accel::subdivideBBox(const nori::BoundingBox3f &parent, nori::BoundingBox3f *bboxes) { Point3f extents = parent.getExtents(); Point3f x0_y0_z0 = parent.min; Point3f x1_y0_z0 = Point3f(parent.min.x() + extents.x() / 2.f, parent.min.y(), parent.min.z()); Point3f x0_y1_z0 = Point3f(parent.min.x(), parent.min.y() + extents.y() / 2.f, parent.min.z()); Point3f x1_y1_z0 = Point3f(parent.min.x() + extents.x() / 2.f, parent.min.y() + extents.y() / 2.f, parent.min.z()); Point3f x0_y0_z1 = Point3f(parent.min.x(), parent.min.y(), parent.min.z() + extents.z() / 2.f); Point3f x1_y0_z1 = Point3f(parent.min.x() + extents.x() / 2.f, parent.min.y(), parent.min.z() + extents.z() / 2.f); Point3f x0_y1_z1 = Point3f(parent.min.x(), parent.min.y() + extents.y() / 2.f, parent.min.z() + extents.z() / 2.f); Point3f x1_y1_z1 = Point3f(parent.min.x() + extents.x() / 2.f, parent.min.y() + extents.y() / 2.f, parent.min.z() + extents.z() / 2.f); Point3f x2_y1_z1 = Point3f(parent.max.x(), parent.min.y() + extents.y() / 2.f, parent.min.z() + extents.z() / 2.f); Point3f x1_y2_z1 = Point3f(parent.min.x() + extents.x() / 2.f, parent.max.y(), parent.min.z() + extents.z() / 2.f); Point3f x2_y2_z1 = Point3f(parent.max.x(), parent.max.y(), parent.min.z() + extents.z() / 2.f); Point3f x1_y1_z2 = Point3f(parent.min.x() + extents.x() / 2.f, parent.min.y() + extents.y() / 2.f, parent.max.z()); Point3f x2_y1_z2 = Point3f(parent.max.x(), parent.min.y() + extents.y() / 2.f, parent.max.z()); Point3f x1_y2_z2 = Point3f(parent.min.x() + extents.x() / 2.f, parent.max.y(), parent.max.z()); Point3f x2_y2_z2 = Point3f(parent.max.x(), parent.max.y(), parent.max.z()); bboxes[0] = BoundingBox3f(x0_y0_z0, x1_y1_z1); bboxes[1] = BoundingBox3f(x1_y0_z0, x2_y1_z1); bboxes[2] = BoundingBox3f(x0_y1_z0, x1_y2_z1); bboxes[3] = BoundingBox3f(x1_y1_z0, x2_y2_z1); bboxes[4] = BoundingBox3f(x0_y0_z1, x1_y1_z2); bboxes[5] = BoundingBox3f(x1_y0_z1, x2_y1_z2); bboxes[6] = BoundingBox3f(x0_y1_z1, x1_y2_z2); bboxes[7] = BoundingBox3f(x1_y1_z1, x2_y2_z2); } NORI_NAMESPACE_END
12,545
4,526
#include <cstdlib> #include <string> #include <utility> #include <gtest/gtest.h> #include <entt/core/hashed_string.hpp> #include <entt/core/type_traits.hpp> #include <entt/meta/factory.hpp> #include <entt/meta/meta.hpp> #include <entt/meta/node.hpp> #include <entt/meta/resolve.hpp> struct base_t { virtual ~base_t() = default; static void destroy(base_t &) { ++counter; } inline static int counter = 0; int value{3}; }; struct derived_t: base_t { derived_t() {} }; struct clazz_t { clazz_t() : i{0}, j{1}, base{} {} operator int() const { return h; } int i{0}; const int j{1}; base_t base{}; inline static int h{2}; inline static const int k{3}; }; struct setter_getter_t { setter_getter_t() : value{0} {} int setter(double val) { return value = static_cast<int>(val); } int getter() { return value; } int setter_with_ref(const int &val) { return value = val; } const int &getter_with_ref() { return value; } static int static_setter(setter_getter_t &type, int value) { return type.value = value; } static int static_getter(const setter_getter_t &type) { return type.value; } int value; }; struct multi_setter_t { multi_setter_t() : value{0} {} void from_double(double val) { value = val; } void from_string(const char *val) { value = std::atoi(val); } int value; }; struct array_t { static inline int global[3]; int local[5]; }; enum class property_t { random, value }; struct MetaData: ::testing::Test { void SetUp() override { using namespace entt::literals; entt::meta<double>() .type("double"_hs); entt::meta<base_t>() .type("base"_hs) .dtor<base_t::destroy>() .data<&base_t::value>("value"_hs); entt::meta<derived_t>() .type("derived"_hs) .base<base_t>() .dtor<derived_t::destroy>() .data<&base_t::value>("value_from_base"_hs); entt::meta<clazz_t>() .type("clazz"_hs) .data<&clazz_t::i, entt::as_ref_t>("i"_hs) .prop(3, 0) .data<&clazz_t::i, entt::as_cref_t>("ci"_hs) .data<&clazz_t::j>("j"_hs) .prop(true, 1) .data<&clazz_t::h>("h"_hs) .prop(property_t::random, 2) .data<&clazz_t::k>("k"_hs) .prop(property_t::value, 3) .data<&clazz_t::base>("base"_hs) .data<&clazz_t::i, entt::as_void_t>("void"_hs) .conv<int>(); entt::meta<setter_getter_t>() .type("setter_getter"_hs) .data<&setter_getter_t::static_setter, &setter_getter_t::static_getter>("x"_hs) .data<&setter_getter_t::setter, &setter_getter_t::getter>("y"_hs) .data<&setter_getter_t::static_setter, &setter_getter_t::getter>("z"_hs) .data<&setter_getter_t::setter_with_ref, &setter_getter_t::getter_with_ref>("w"_hs) .data<nullptr, &setter_getter_t::getter>("z_ro"_hs) .data<nullptr, &setter_getter_t::value>("value"_hs); entt::meta<multi_setter_t>() .type("multi_setter"_hs) .data<entt::value_list<&multi_setter_t::from_double, &multi_setter_t::from_string>, &multi_setter_t::value>("value"_hs); entt::meta<array_t>() .type("array"_hs) .data<&array_t::global>("global"_hs) .data<&array_t::local>("local"_hs); base_t::counter = 0; } void TearDown() override { entt::meta_reset(); } }; using MetaDataDeathTest = MetaData; TEST_F(MetaData, Functionalities) { using namespace entt::literals; auto data = entt::resolve<clazz_t>().data("i"_hs); clazz_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_EQ(data.id(), "i"_hs); ASSERT_FALSE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_EQ(data.get(instance).cast<int>(), 0); ASSERT_TRUE(data.set(instance, 42)); ASSERT_EQ(data.get(instance).cast<int>(), 42); for(auto curr: data.prop()) { ASSERT_EQ(curr.key(), 3); ASSERT_EQ(curr.value(), 0); } ASSERT_FALSE(data.prop(2)); ASSERT_FALSE(data.prop('c')); auto prop = data.prop(3); ASSERT_TRUE(prop); ASSERT_EQ(prop.key(), 3); ASSERT_EQ(prop.value(), 0); } TEST_F(MetaData, Const) { using namespace entt::literals; auto data = entt::resolve<clazz_t>().data("j"_hs); clazz_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_EQ(data.id(), "j"_hs); ASSERT_TRUE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_EQ(data.get(instance).cast<int>(), 1); ASSERT_FALSE(data.set(instance, 42)); ASSERT_EQ(data.get(instance).cast<int>(), 1); for(auto curr: data.prop()) { ASSERT_EQ(curr.key(), true); ASSERT_EQ(curr.value(), 1); } ASSERT_FALSE(data.prop(false)); ASSERT_FALSE(data.prop('c')); auto prop = data.prop(true); ASSERT_TRUE(prop); ASSERT_EQ(prop.key(), true); ASSERT_EQ(prop.value(), 1); } TEST_F(MetaData, Static) { using namespace entt::literals; auto data = entt::resolve<clazz_t>().data("h"_hs); ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_EQ(data.id(), "h"_hs); ASSERT_FALSE(data.is_const()); ASSERT_TRUE(data.is_static()); ASSERT_EQ(data.get({}).cast<int>(), 2); ASSERT_TRUE(data.set({}, 42)); ASSERT_EQ(data.get({}).cast<int>(), 42); for(auto curr: data.prop()) { ASSERT_EQ(curr.key(), property_t::random); ASSERT_EQ(curr.value(), 2); } ASSERT_FALSE(data.prop(property_t::value)); ASSERT_FALSE(data.prop('c')); auto prop = data.prop(property_t::random); ASSERT_TRUE(prop); ASSERT_EQ(prop.key(), property_t::random); ASSERT_EQ(prop.value(), 2); } TEST_F(MetaData, ConstStatic) { using namespace entt::literals; auto data = entt::resolve<clazz_t>().data("k"_hs); ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_EQ(data.id(), "k"_hs); ASSERT_TRUE(data.is_const()); ASSERT_TRUE(data.is_static()); ASSERT_EQ(data.get({}).cast<int>(), 3); ASSERT_FALSE(data.set({}, 42)); ASSERT_EQ(data.get({}).cast<int>(), 3); for(auto curr: data.prop()) { ASSERT_EQ(curr.key(), property_t::value); ASSERT_EQ(curr.value(), 3); } ASSERT_FALSE(data.prop(property_t::random)); ASSERT_FALSE(data.prop('c')); auto prop = data.prop(property_t::value); ASSERT_TRUE(prop); ASSERT_EQ(prop.key(), property_t::value); ASSERT_EQ(prop.value(), 3); } TEST_F(MetaData, GetMetaAnyArg) { using namespace entt::literals; entt::meta_any any{clazz_t{}}; any.cast<clazz_t &>().i = 99; const auto value = entt::resolve<clazz_t>().data("i"_hs).get(any); ASSERT_TRUE(value); ASSERT_TRUE(static_cast<bool>(value.cast<int>())); ASSERT_EQ(value.cast<int>(), 99); } TEST_F(MetaData, GetInvalidArg) { using namespace entt::literals; auto instance = 0; ASSERT_FALSE(entt::resolve<clazz_t>().data("i"_hs).get(instance)); } TEST_F(MetaData, SetMetaAnyArg) { using namespace entt::literals; entt::meta_any any{clazz_t{}}; entt::meta_any value{42}; ASSERT_EQ(any.cast<clazz_t>().i, 0); ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(any, value)); ASSERT_EQ(any.cast<clazz_t>().i, 42); } TEST_F(MetaData, SetInvalidArg) { using namespace entt::literals; ASSERT_FALSE(entt::resolve<clazz_t>().data("i"_hs).set({}, 'c')); } TEST_F(MetaData, SetCast) { using namespace entt::literals; clazz_t instance{}; ASSERT_EQ(base_t::counter, 0); ASSERT_TRUE(entt::resolve<clazz_t>().data("base"_hs).set(instance, derived_t{})); ASSERT_EQ(base_t::counter, 1); } TEST_F(MetaData, SetConvert) { using namespace entt::literals; clazz_t instance{}; instance.h = 42; ASSERT_EQ(instance.i, 0); ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(instance, instance)); ASSERT_EQ(instance.i, 42); } TEST_F(MetaData, SetByRef) { using namespace entt::literals; entt::meta_any any{clazz_t{}}; int value{42}; ASSERT_EQ(any.cast<clazz_t>().i, 0); ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(any, entt::make_meta<int &>(value))); ASSERT_EQ(any.cast<clazz_t>().i, 42); value = 3; auto wrapper = entt::make_meta<int &>(value); ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(any, wrapper.as_ref())); ASSERT_EQ(any.cast<clazz_t>().i, 3); } TEST_F(MetaData, SetByConstRef) { using namespace entt::literals; entt::meta_any any{clazz_t{}}; int value{42}; ASSERT_EQ(any.cast<clazz_t>().i, 0); ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(any, entt::make_meta<const int &>(value))); ASSERT_EQ(any.cast<clazz_t>().i, 42); value = 3; auto wrapper = entt::make_meta<const int &>(value); ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(any, wrapper.as_ref())); ASSERT_EQ(any.cast<clazz_t>().i, 3); } TEST_F(MetaData, SetterGetterAsFreeFunctions) { using namespace entt::literals; auto data = entt::resolve<setter_getter_t>().data("x"_hs); setter_getter_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_EQ(data.id(), "x"_hs); ASSERT_FALSE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_EQ(data.get(instance).cast<int>(), 0); ASSERT_TRUE(data.set(instance, 42)); ASSERT_EQ(data.get(instance).cast<int>(), 42); } TEST_F(MetaData, SetterGetterAsMemberFunctions) { using namespace entt::literals; auto data = entt::resolve<setter_getter_t>().data("y"_hs); setter_getter_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<double>()); ASSERT_EQ(data.id(), "y"_hs); ASSERT_FALSE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_EQ(data.get(instance).cast<int>(), 0); ASSERT_TRUE(data.set(instance, 42.)); ASSERT_EQ(data.get(instance).cast<int>(), 42); ASSERT_TRUE(data.set(instance, 3)); ASSERT_EQ(data.get(instance).cast<int>(), 3); } TEST_F(MetaData, SetterGetterWithRefAsMemberFunctions) { using namespace entt::literals; auto data = entt::resolve<setter_getter_t>().data("w"_hs); setter_getter_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_EQ(data.id(), "w"_hs); ASSERT_FALSE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_EQ(data.get(instance).cast<int>(), 0); ASSERT_TRUE(data.set(instance, 42)); ASSERT_EQ(data.get(instance).cast<int>(), 42); } TEST_F(MetaData, SetterGetterMixed) { using namespace entt::literals; auto data = entt::resolve<setter_getter_t>().data("z"_hs); setter_getter_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_EQ(data.id(), "z"_hs); ASSERT_FALSE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_EQ(data.get(instance).cast<int>(), 0); ASSERT_TRUE(data.set(instance, 42)); ASSERT_EQ(data.get(instance).cast<int>(), 42); } TEST_F(MetaData, SetterGetterReadOnly) { using namespace entt::literals; auto data = entt::resolve<setter_getter_t>().data("z_ro"_hs); setter_getter_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 0u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::meta_type{}); ASSERT_EQ(data.id(), "z_ro"_hs); ASSERT_TRUE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_EQ(data.get(instance).cast<int>(), 0); ASSERT_FALSE(data.set(instance, 42)); ASSERT_EQ(data.get(instance).cast<int>(), 0); } TEST_F(MetaData, SetterGetterReadOnlyDataMember) { using namespace entt::literals; auto data = entt::resolve<setter_getter_t>().data("value"_hs); setter_getter_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 0u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::meta_type{}); ASSERT_EQ(data.id(), "value"_hs); ASSERT_TRUE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_EQ(data.get(instance).cast<int>(), 0); ASSERT_FALSE(data.set(instance, 42)); ASSERT_EQ(data.get(instance).cast<int>(), 0); } TEST_F(MetaData, MultiSetter) { using namespace entt::literals; auto data = entt::resolve<multi_setter_t>().data("value"_hs); multi_setter_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 2u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<double>()); ASSERT_EQ(data.arg(1u), entt::resolve<const char *>()); ASSERT_EQ(data.arg(2u), entt::meta_type{}); ASSERT_EQ(data.id(), "value"_hs); ASSERT_FALSE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_EQ(data.get(instance).cast<int>(), 0); ASSERT_TRUE(data.set(instance, 42)); ASSERT_EQ(data.get(instance).cast<int>(), 42); ASSERT_TRUE(data.set(instance, 3.)); ASSERT_EQ(data.get(instance).cast<int>(), 3); ASSERT_FALSE(data.set(instance, std::string{"99"})); ASSERT_TRUE(data.set(instance, std::string{"99"}.c_str())); ASSERT_EQ(data.get(instance).cast<int>(), 99); } TEST_F(MetaData, ConstInstance) { using namespace entt::literals; clazz_t instance{}; ASSERT_NE(entt::resolve<clazz_t>().data("i"_hs).get(instance).try_cast<int>(), nullptr); ASSERT_NE(entt::resolve<clazz_t>().data("i"_hs).get(instance).try_cast<const int>(), nullptr); ASSERT_EQ(entt::resolve<clazz_t>().data("i"_hs).get(std::as_const(instance)).try_cast<int>(), nullptr); // as_ref_t adapts to the constness of the passed object and returns const references in case ASSERT_NE(entt::resolve<clazz_t>().data("i"_hs).get(std::as_const(instance)).try_cast<const int>(), nullptr); ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).get(instance)); ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).set(instance, 3)); ASSERT_TRUE(entt::resolve<clazz_t>().data("i"_hs).get(std::as_const(instance))); ASSERT_FALSE(entt::resolve<clazz_t>().data("i"_hs).set(std::as_const(instance), 3)); ASSERT_TRUE(entt::resolve<clazz_t>().data("ci"_hs).get(instance)); ASSERT_TRUE(entt::resolve<clazz_t>().data("ci"_hs).set(instance, 3)); ASSERT_TRUE(entt::resolve<clazz_t>().data("ci"_hs).get(std::as_const(instance))); ASSERT_FALSE(entt::resolve<clazz_t>().data("ci"_hs).set(std::as_const(instance), 3)); ASSERT_TRUE(entt::resolve<clazz_t>().data("j"_hs).get(instance)); ASSERT_FALSE(entt::resolve<clazz_t>().data("j"_hs).set(instance, 3)); ASSERT_TRUE(entt::resolve<clazz_t>().data("j"_hs).get(std::as_const(instance))); ASSERT_FALSE(entt::resolve<clazz_t>().data("j"_hs).set(std::as_const(instance), 3)); } TEST_F(MetaData, ArrayStatic) { using namespace entt::literals; auto data = entt::resolve<array_t>().data("global"_hs); ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int[3]>()); ASSERT_EQ(data.arg(0u), entt::resolve<int[3]>()); ASSERT_EQ(data.id(), "global"_hs); ASSERT_FALSE(data.is_const()); ASSERT_TRUE(data.is_static()); ASSERT_TRUE(data.type().is_array()); ASSERT_FALSE(data.get({})); } TEST_F(MetaData, Array) { using namespace entt::literals; auto data = entt::resolve<array_t>().data("local"_hs); array_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int[5]>()); ASSERT_EQ(data.arg(0u), entt::resolve<int[5]>()); ASSERT_EQ(data.id(), "local"_hs); ASSERT_FALSE(data.is_const()); ASSERT_FALSE(data.is_static()); ASSERT_TRUE(data.type().is_array()); ASSERT_FALSE(data.get(instance)); } TEST_F(MetaData, AsVoid) { using namespace entt::literals; auto data = entt::resolve<clazz_t>().data("void"_hs); clazz_t instance{}; ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_TRUE(data.set(instance, 42)); ASSERT_EQ(instance.i, 42); ASSERT_EQ(data.get(instance), entt::meta_any{std::in_place_type<void>}); } TEST_F(MetaData, AsRef) { using namespace entt::literals; clazz_t instance{}; auto data = entt::resolve<clazz_t>().data("i"_hs); ASSERT_TRUE(data); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_EQ(instance.i, 0); data.get(instance).cast<int &>() = 3; ASSERT_EQ(instance.i, 3); } TEST_F(MetaData, AsConstRef) { using namespace entt::literals; clazz_t instance{}; auto data = entt::resolve<clazz_t>().data("ci"_hs); ASSERT_EQ(instance.i, 0); ASSERT_EQ(data.arity(), 1u); ASSERT_EQ(data.type(), entt::resolve<int>()); ASSERT_EQ(data.arg(0u), entt::resolve<int>()); ASSERT_EQ(data.get(instance).cast<const int &>(), 0); ASSERT_EQ(data.get(instance).cast<int>(), 0); ASSERT_EQ(instance.i, 0); } TEST_F(MetaDataDeathTest, AsConstRef) { using namespace entt::literals; clazz_t instance{}; auto data = entt::resolve<clazz_t>().data("ci"_hs); ASSERT_DEATH(data.get(instance).cast<int &>() = 3, ""); } TEST_F(MetaData, SetGetBaseData) { using namespace entt::literals; auto type = entt::resolve<derived_t>(); derived_t instance{}; ASSERT_TRUE(type.data("value"_hs)); ASSERT_EQ(instance.value, 3); ASSERT_TRUE(type.data("value"_hs).set(instance, 42)); ASSERT_EQ(type.data("value"_hs).get(instance).cast<int>(), 42); ASSERT_EQ(instance.value, 42); } TEST_F(MetaData, SetGetFromBase) { using namespace entt::literals; auto type = entt::resolve<derived_t>(); derived_t instance{}; ASSERT_TRUE(type.data("value_from_base"_hs)); ASSERT_EQ(instance.value, 3); ASSERT_TRUE(type.data("value_from_base"_hs).set(instance, 42)); ASSERT_EQ(type.data("value_from_base"_hs).get(instance).cast<int>(), 42); ASSERT_EQ(instance.value, 42); } TEST_F(MetaData, ReRegistration) { using namespace entt::literals; SetUp(); auto *node = entt::internal::meta_node<base_t>::resolve(); auto type = entt::resolve<base_t>(); ASSERT_NE(node->data, nullptr); ASSERT_EQ(node->data->next, nullptr); ASSERT_TRUE(type.data("value"_hs)); entt::meta<base_t>().data<&base_t::value>("field"_hs); ASSERT_NE(node->data, nullptr); ASSERT_EQ(node->data->next, nullptr); ASSERT_FALSE(type.data("value"_hs)); ASSERT_TRUE(type.data("field"_hs)); } TEST_F(MetaData, NameCollision) { using namespace entt::literals; ASSERT_NO_FATAL_FAILURE(entt::meta<clazz_t>().data<&clazz_t::j>("j"_hs)); ASSERT_TRUE(entt::resolve<clazz_t>().data("j"_hs)); ASSERT_NO_FATAL_FAILURE(entt::meta<clazz_t>().data<&clazz_t::j>("cj"_hs)); ASSERT_FALSE(entt::resolve<clazz_t>().data("j"_hs)); ASSERT_TRUE(entt::resolve<clazz_t>().data("cj"_hs)); } TEST_F(MetaDataDeathTest, NameCollision) { using namespace entt::literals; ASSERT_DEATH(entt::meta<clazz_t>().data<&clazz_t::j>("i"_hs), ""); }
20,236
8,449
/* * Copyright 2018 Google LLC. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "third_party/nucleus/io/reference.h" #include <vector> #include <gmock/gmock-generated-matchers.h> #include <gmock/gmock-matchers.h> #include <gmock/gmock-more-matchers.h> #include "tensorflow/core/platform/test.h" #include "third_party/nucleus/io/reference_test.h" #include "third_party/nucleus/util/utils.h" #include "third_party/nucleus/vendor/status_matchers.h" #include "tensorflow/core/platform/logging.h" namespace nucleus { using ::testing::IsEmpty; using ::testing::Not; using ::testing::UnorderedElementsAre; TEST_P(GenomeReferenceTest, TestBasic) { EXPECT_THAT(Ref().ContigNames(), UnorderedElementsAre("chrM", "chr1", "chr2")); EXPECT_THAT(Ref().Contigs().size(), 3); const auto& chrm = *Ref().Contig("chrM").ValueOrDie(); EXPECT_EQ(100, chrm.n_bases()); EXPECT_EQ("chrM", chrm.name()); EXPECT_EQ(0, chrm.pos_in_fasta()); const auto& chr1 = *Ref().Contig("chr1").ValueOrDie(); EXPECT_EQ(76, chr1.n_bases()); EXPECT_EQ("chr1", chr1.name()); EXPECT_EQ(1, chr1.pos_in_fasta()); const auto& chr2 = *Ref().Contig("chr2").ValueOrDie(); EXPECT_EQ(121, chr2.n_bases()); EXPECT_EQ("chr2", chr2.name()); EXPECT_EQ(2, chr2.pos_in_fasta()); } TEST_P(GenomeReferenceTest, TestIsValidInterval) { // Checks that we can check that an unknown chromosome isn't valid. EXPECT_FALSE(Ref().IsValidInterval(MakeRange("unknown_chr", 0, 1))); for (const auto& chr : Ref().ContigNames()) { const auto n_bases = Ref().Contig(chr).ValueOrDie()->n_bases(); EXPECT_TRUE(Ref().IsValidInterval(MakeRange(chr, 0, n_bases))); for (int i = 0; i < n_bases; ++i) { EXPECT_TRUE(Ref().IsValidInterval(MakeRange(chr, 0, i+1))); EXPECT_TRUE(Ref().IsValidInterval(MakeRange(chr, i, i+1))); } EXPECT_FALSE(Ref().IsValidInterval(MakeRange(chr, -10, 0))); EXPECT_FALSE(Ref().IsValidInterval(MakeRange(chr, -1, 0))); EXPECT_FALSE(Ref().IsValidInterval(MakeRange(chr, 10, 9))); EXPECT_FALSE(Ref().IsValidInterval(MakeRange(chr, 0, n_bases + 1))); EXPECT_FALSE(Ref().IsValidInterval(MakeRange(chr, 0, n_bases + 100))); EXPECT_FALSE(Ref().IsValidInterval(MakeRange(chr, n_bases, n_bases))); EXPECT_FALSE( Ref().IsValidInterval(MakeRange(chr, n_bases + 100, n_bases + 100))); } } TEST_P(GenomeReferenceTest, NotOKIfContigCalledWithBadName) { EXPECT_THAT(Ref().Contig("missing"), IsNotOKWithMessage("Unknown contig missing")); } TEST_P(GenomeReferenceTest, NotOKIfIntervalIsInvalid) { // Asking for bad chromosome values produces death. StatusOr<string> result = Ref().GetBases(MakeRange("missing", 0, 1)); EXPECT_THAT(result, IsNotOKWithCodeAndMessage( tensorflow::error::INVALID_ARGUMENT, "Invalid interval")); // Starting before 0 is detected. EXPECT_THAT(Ref().GetBases(MakeRange("chrM", -1, 1)), IsNotOKWithMessage("Invalid interval")); // chr1 exists, but this range's start is beyond the chr. EXPECT_THAT(Ref().GetBases(MakeRange("chr1", 1000, 1010)), IsNotOKWithMessage("Invalid interval")); // chr1 exists, but this range's end is beyond the chr. EXPECT_THAT(Ref().GetBases(MakeRange("chr1", 0, 1010)), IsNotOKWithMessage("Invalid interval")); } TEST_P(GenomeReferenceTest, TestHasContig) { EXPECT_TRUE(Ref().HasContig("chrM")); EXPECT_TRUE(Ref().HasContig("chr1")); EXPECT_TRUE(Ref().HasContig("chr2")); EXPECT_FALSE(Ref().HasContig("chr3")); EXPECT_FALSE(Ref().HasContig("chr")); EXPECT_FALSE(Ref().HasContig("")); } // Checks that GetBases work in all its forms for the given arguments. void CheckGetBases(const GenomeReference& ref, const string& chrom, const int64 start, const int64 end, const string& expected_bases) { StatusOr<string> query = ref.GetBases(MakeRange(chrom, start, end)); ASSERT_THAT(query, IsOK()); EXPECT_THAT(query.ValueOrDie(), expected_bases); } TEST_P(GenomeReferenceTest, TestReferenceBases) { CheckGetBases(Ref(), "chrM", 0, 100, "GATCACAGGTCTATCACCCTATTAACCACTCACGGGAGCTCTCCATGCATTTGGTATTTTC" "GTCTGGGGGGTGTGCACGCGATAGCATTGCGAGACGCTG"); CheckGetBases(Ref(), "chr1", 0, 76, "ACCACCATCCTCCGTGAAATCAATATCCCGCACAAGAGTGCTACTCTCCTAAATCCCTTCT" "CGTCCCCATGGATGA"); CheckGetBases(Ref(), "chr2", 0, 121, "CGCTNCGGGCCCATAACACTTGGGGGTAGCTAAAGTGAACTGTATCCGAC" "ATCTGGTTCCTACTTCAGGGCCATAAAGCCTAAATAGCCCACACGTTCCC" "CTTAAATAAGACATCACGATG"); } TEST_P(GenomeReferenceTest, TestGetBasesParts) { CheckGetBases(Ref(), "chrM", 0, 10, "GATCACAGGT"); CheckGetBases(Ref(), "chrM", 0, 9, "GATCACAGG"); CheckGetBases(Ref(), "chrM", 1, 9, "ATCACAGG"); CheckGetBases(Ref(), "chrM", 3, 7, "CACA"); CheckGetBases(Ref(), "chrM", 90, 100, "CGAGACGCTG"); CheckGetBases(Ref(), "chrM", 90, 99, "CGAGACGCT"); CheckGetBases(Ref(), "chrM", 91, 100, "GAGACGCTG"); CheckGetBases(Ref(), "chrM", 92, 100, "AGACGCTG"); CheckGetBases(Ref(), "chrM", 92, 99, "AGACGCT"); CheckGetBases(Ref(), "chrM", 92, 98, "AGACGC"); CheckGetBases(Ref(), "chrM", 0, 1, "G"); CheckGetBases(Ref(), "chrM", 1, 2, "A"); CheckGetBases(Ref(), "chrM", 2, 3, "T"); CheckGetBases(Ref(), "chrM", 3, 4, "C"); CheckGetBases(Ref(), "chrM", 4, 5, "A"); CheckGetBases(Ref(), "chrM", 5, 6, "C"); // crosses the boundary of the index when max_bin_size is 5 CheckGetBases(Ref(), "chrM", 4, 6, "AC"); // 0-bp interval requests should return the empty string. CheckGetBases(Ref(), "chrM", 0, 0, ""); CheckGetBases(Ref(), "chrM", 10, 10, ""); } } // namespace nucleus
7,208
2,849
// Copyright lowRISC contributors. // Licensed under the Apache License, Version 2.0, see LICENSE for details. // SPDX-License-Identifier: Apache-2.0 #include "sw/device/silicon_creator/lib/drivers/otbn.h" #include <array> #include "gtest/gtest.h" #include "sw/device/silicon_creator/lib/base/mock_abs_mmio.h" #include "sw/device/silicon_creator/testing/mask_rom_test.h" #include "hw/top_earlgrey/sw/autogen/top_earlgrey.h" #include "otbn_regs.h" // Generated. namespace otbn_unittest { namespace { using ::testing::ElementsAre; class OtbnTest : public mask_rom_test::MaskRomTest { protected: uint32_t base_ = TOP_EARLGREY_OTBN_BASE_ADDR; mask_rom_test::MockAbsMmio mmio_; }; class StartTest : public OtbnTest {}; TEST_F(StartTest, Success) { // Test assumption. static_assert(OTBN_IMEM_SIZE_BYTES >= 8, "OTBN IMEM size too small."); // Send EXECUTE command. EXPECT_ABS_WRITE32(base_ + OTBN_CMD_REG_OFFSET, kOtbnCmdExecute); otbn_execute(); } class IsBusyTest : public OtbnTest {}; TEST_F(IsBusyTest, Success) { EXPECT_ABS_READ32(base_ + OTBN_STATUS_REG_OFFSET, kOtbnStatusBusyExecute); EXPECT_EQ(otbn_is_busy(), true); } class GetErrBitsTest : public OtbnTest {}; TEST_F(GetErrBitsTest, Success) { EXPECT_ABS_READ32(base_ + OTBN_ERR_BITS_REG_OFFSET, kOtbnErrBitsIllegalInsn | kOtbnErrBitsRegIntgViolation); otbn_err_bits_t err_bits; otbn_get_err_bits(&err_bits); EXPECT_EQ(err_bits, kOtbnErrBitsIllegalInsn | kOtbnErrBitsRegIntgViolation); } class ImemWriteTest : public OtbnTest {}; TEST_F(ImemWriteTest, BadAddressBeyondMemorySize) { std::array<uint32_t, 2> test_data = {0}; EXPECT_EQ(otbn_imem_write(OTBN_IMEM_SIZE_BYTES, test_data.data(), 1), kErrorOtbnBadOffsetLen); } TEST_F(ImemWriteTest, BadAddressIntegerOverflow) { std::array<uint32_t, 4> test_data = {0}; EXPECT_EQ(otbn_imem_write(0xFFFFFFFC, test_data.data(), 1), kErrorOtbnBadOffsetLen); } TEST_F(ImemWriteTest, SuccessWithoutOffset) { // Test assumption. static_assert(OTBN_IMEM_SIZE_BYTES >= 8, "OTBN IMEM size too small."); std::array<uint32_t, 2> test_data = {0x12345678, 0xabcdef01}; EXPECT_ABS_WRITE32(base_ + OTBN_IMEM_REG_OFFSET, test_data[0]); EXPECT_ABS_WRITE32(base_ + OTBN_IMEM_REG_OFFSET + 4, test_data[1]); EXPECT_EQ(otbn_imem_write(0, test_data.data(), 2), kErrorOk); } TEST_F(ImemWriteTest, SuccessWithOffset) { // Test assumption. static_assert(OTBN_IMEM_SIZE_BYTES >= 12, "OTBN IMEM size too small."); std::array<uint32_t, 2> test_data = {0x12345678, 0xabcdef01}; EXPECT_ABS_WRITE32(base_ + OTBN_IMEM_REG_OFFSET + 4, test_data[0]); EXPECT_ABS_WRITE32(base_ + OTBN_IMEM_REG_OFFSET + 8, test_data[1]); EXPECT_EQ(otbn_imem_write(4, test_data.data(), 2), kErrorOk); } class DmemWriteTest : public OtbnTest {}; TEST_F(DmemWriteTest, SuccessWithoutOffset) { // Test assumption. static_assert(OTBN_DMEM_SIZE_BYTES >= 8, "OTBN DMEM size too small."); std::array<uint32_t, 2> test_data = {0x12345678, 0xabcdef01}; EXPECT_ABS_WRITE32(base_ + OTBN_DMEM_REG_OFFSET, test_data[0]); EXPECT_ABS_WRITE32(base_ + OTBN_DMEM_REG_OFFSET + 4, test_data[1]); EXPECT_EQ(otbn_dmem_write(0, test_data.data(), 2), kErrorOk); } TEST_F(DmemWriteTest, SuccessWithOffset) { // Test assumption. static_assert(OTBN_DMEM_SIZE_BYTES >= 12, "OTBN DMEM size too small."); std::array<uint32_t, 2> test_data = {0x12345678, 0xabcdef01}; EXPECT_ABS_WRITE32(base_ + OTBN_DMEM_REG_OFFSET + 4, test_data[0]); EXPECT_ABS_WRITE32(base_ + OTBN_DMEM_REG_OFFSET + 8, test_data[1]); EXPECT_EQ(otbn_dmem_write(4, test_data.data(), 2), kErrorOk); } class DmemReadTest : public OtbnTest {}; TEST_F(DmemReadTest, SuccessWithoutOffset) { // Assumption in the test. ASSERT_GE(OTBN_DMEM_SIZE_BYTES, 8); static_assert(OTBN_DMEM_SIZE_BYTES >= 8, "OTBN DMEM size too small."); EXPECT_ABS_READ32(base_ + OTBN_DMEM_REG_OFFSET, 0x12345678); EXPECT_ABS_READ32(base_ + OTBN_DMEM_REG_OFFSET + 4, 0xabcdef01); std::array<uint32_t, 2> test_data = {0}; EXPECT_EQ(otbn_dmem_read(0, test_data.data(), 2), kErrorOk); EXPECT_THAT(test_data, ElementsAre(0x12345678, 0xabcdef01)); } TEST_F(DmemReadTest, SuccessWithOffset) { // Assumption in the test. static_assert(OTBN_DMEM_SIZE_BYTES >= 12, "OTBN DMEM size too small."); EXPECT_ABS_READ32(base_ + OTBN_DMEM_REG_OFFSET + 4, 0x12345678); EXPECT_ABS_READ32(base_ + OTBN_DMEM_REG_OFFSET + 8, 0xabcdef01); std::array<uint32_t, 2> test_data = {0}; EXPECT_EQ(otbn_dmem_read(4, test_data.data(), 2), kErrorOk); EXPECT_THAT(test_data, ElementsAre(0x12345678, 0xabcdef01)); } class ControlSoftwareErrorsFatalTest : public OtbnTest {}; TEST_F(ControlSoftwareErrorsFatalTest, Success) { EXPECT_ABS_WRITE32(base_ + OTBN_CTRL_REG_OFFSET, 0x1); EXPECT_ABS_READ32(base_ + OTBN_CTRL_REG_OFFSET, 0x1); EXPECT_EQ(otbn_set_ctrl_software_errs_fatal(true), kErrorOk); } // namespace TEST_F(ControlSoftwareErrorsFatalTest, Failure) { EXPECT_ABS_WRITE32(base_ + OTBN_CTRL_REG_OFFSET, 0x0); EXPECT_ABS_READ32(base_ + OTBN_CTRL_REG_OFFSET, 0x1); EXPECT_EQ(otbn_set_ctrl_software_errs_fatal(false), kErrorOtbnUnavailable); } } // namespace } // namespace otbn_unittest
5,229
2,446
/*============================================================================== Copyright (c) 2005-2010 Joel de Guzman Copyright (c) 2010 Thomas Heller Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) ==============================================================================*/ namespace boost { namespace phoenix { template <typename Dummy = void> struct vector0 { typedef mpl::int_<0> size_type; static const int size_value = 0; }; template <int> struct vector_chooser; template <> struct vector_chooser<0> { template <typename Dummy = void> struct apply { typedef vector0<> type; }; }; }} namespace boost { namespace phoenix { template <typename A0> struct vector1 { typedef A0 member_type0; A0 a0; typedef mpl::int_<1> size_type; static const int size_value = 1; typedef vector0<> args_type; args_type args() const { args_type r = {}; return r; } }; template <> struct vector_chooser<1> { template <typename A0> struct apply { typedef vector1<A0> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) , ( boost::phoenix::vector1 ) (A0) , (A0, a0) ) namespace boost { namespace phoenix { template <typename A0 , typename A1> struct vector2 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef mpl::int_<2> size_type; static const int size_value = 2; typedef vector1<A1> args_type; args_type args() const { args_type r = {a1}; return r; } }; template <> struct vector_chooser<2> { template <typename A0 , typename A1> struct apply { typedef vector2<A0 , A1> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) , ( boost::phoenix::vector2 ) (A0) (A1) , (A0, a0) (A1, a1) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2> struct vector3 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef mpl::int_<3> size_type; static const int size_value = 3; typedef vector2<A1 , A2> args_type; args_type args() const { args_type r = {a1 , a2}; return r; } }; template <> struct vector_chooser<3> { template <typename A0 , typename A1 , typename A2> struct apply { typedef vector3<A0 , A1 , A2> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) , ( boost::phoenix::vector3 ) (A0) (A1) (A2) , (A0, a0) (A1, a1) (A2, a2) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3> struct vector4 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef mpl::int_<4> size_type; static const int size_value = 4; typedef vector3<A1 , A2 , A3> args_type; args_type args() const { args_type r = {a1 , a2 , a3}; return r; } }; template <> struct vector_chooser<4> { template <typename A0 , typename A1 , typename A2 , typename A3> struct apply { typedef vector4<A0 , A1 , A2 , A3> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) , ( boost::phoenix::vector4 ) (A0) (A1) (A2) (A3) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4> struct vector5 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef mpl::int_<5> size_type; static const int size_value = 5; typedef vector4<A1 , A2 , A3 , A4> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4}; return r; } }; template <> struct vector_chooser<5> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4> struct apply { typedef vector5<A0 , A1 , A2 , A3 , A4> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) , ( boost::phoenix::vector5 ) (A0) (A1) (A2) (A3) (A4) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5> struct vector6 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef mpl::int_<6> size_type; static const int size_value = 6; typedef vector5<A1 , A2 , A3 , A4 , A5> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5}; return r; } }; template <> struct vector_chooser<6> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5> struct apply { typedef vector6<A0 , A1 , A2 , A3 , A4 , A5> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) , ( boost::phoenix::vector6 ) (A0) (A1) (A2) (A3) (A4) (A5) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6> struct vector7 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef mpl::int_<7> size_type; static const int size_value = 7; typedef vector6<A1 , A2 , A3 , A4 , A5 , A6> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6}; return r; } }; template <> struct vector_chooser<7> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6> struct apply { typedef vector7<A0 , A1 , A2 , A3 , A4 , A5 , A6> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) , ( boost::phoenix::vector7 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7> struct vector8 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef mpl::int_<8> size_type; static const int size_value = 8; typedef vector7<A1 , A2 , A3 , A4 , A5 , A6 , A7> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7}; return r; } }; template <> struct vector_chooser<8> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7> struct apply { typedef vector8<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) , ( boost::phoenix::vector8 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8> struct vector9 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef mpl::int_<9> size_type; static const int size_value = 9; typedef vector8<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8}; return r; } }; template <> struct vector_chooser<9> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8> struct apply { typedef vector9<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) , ( boost::phoenix::vector9 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9> struct vector10 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef mpl::int_<10> size_type; static const int size_value = 10; typedef vector9<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9}; return r; } }; template <> struct vector_chooser<10> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9> struct apply { typedef vector10<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) , ( boost::phoenix::vector10 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10> struct vector11 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef mpl::int_<11> size_type; static const int size_value = 11; typedef vector10<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10}; return r; } }; template <> struct vector_chooser<11> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10> struct apply { typedef vector11<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) , ( boost::phoenix::vector11 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11> struct vector12 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef mpl::int_<12> size_type; static const int size_value = 12; typedef vector11<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11}; return r; } }; template <> struct vector_chooser<12> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11> struct apply { typedef vector12<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) , ( boost::phoenix::vector12 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12> struct vector13 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef mpl::int_<13> size_type; static const int size_value = 13; typedef vector12<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12}; return r; } }; template <> struct vector_chooser<13> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12> struct apply { typedef vector13<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) , ( boost::phoenix::vector13 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13> struct vector14 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef mpl::int_<14> size_type; static const int size_value = 14; typedef vector13<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13}; return r; } }; template <> struct vector_chooser<14> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13> struct apply { typedef vector14<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) , ( boost::phoenix::vector14 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14> struct vector15 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef mpl::int_<15> size_type; static const int size_value = 15; typedef vector14<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14}; return r; } }; template <> struct vector_chooser<15> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14> struct apply { typedef vector15<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) , ( boost::phoenix::vector15 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15> struct vector16 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef A15 member_type15; A15 a15; typedef mpl::int_<16> size_type; static const int size_value = 16; typedef vector15<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14 , a15}; return r; } }; template <> struct vector_chooser<16> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15> struct apply { typedef vector16<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) , ( boost::phoenix::vector16 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) (A15, a15) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16> struct vector17 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef A15 member_type15; A15 a15; typedef A16 member_type16; A16 a16; typedef mpl::int_<17> size_type; static const int size_value = 17; typedef vector16<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14 , a15 , a16}; return r; } }; template <> struct vector_chooser<17> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16> struct apply { typedef vector17<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) , ( boost::phoenix::vector17 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) (A15, a15) (A16, a16) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17> struct vector18 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef A15 member_type15; A15 a15; typedef A16 member_type16; A16 a16; typedef A17 member_type17; A17 a17; typedef mpl::int_<18> size_type; static const int size_value = 18; typedef vector17<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14 , a15 , a16 , a17}; return r; } }; template <> struct vector_chooser<18> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17> struct apply { typedef vector18<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) , ( boost::phoenix::vector18 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) (A15, a15) (A16, a16) (A17, a17) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18> struct vector19 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef A15 member_type15; A15 a15; typedef A16 member_type16; A16 a16; typedef A17 member_type17; A17 a17; typedef A18 member_type18; A18 a18; typedef mpl::int_<19> size_type; static const int size_value = 19; typedef vector18<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14 , a15 , a16 , a17 , a18}; return r; } }; template <> struct vector_chooser<19> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18> struct apply { typedef vector19<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) (A18) , ( boost::phoenix::vector19 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) (A18) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) (A15, a15) (A16, a16) (A17, a17) (A18, a18) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18 , typename A19> struct vector20 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef A15 member_type15; A15 a15; typedef A16 member_type16; A16 a16; typedef A17 member_type17; A17 a17; typedef A18 member_type18; A18 a18; typedef A19 member_type19; A19 a19; typedef mpl::int_<20> size_type; static const int size_value = 20; typedef vector19<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18 , A19> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14 , a15 , a16 , a17 , a18 , a19}; return r; } }; template <> struct vector_chooser<20> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18 , typename A19> struct apply { typedef vector20<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18 , A19> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) (A18) (A19) , ( boost::phoenix::vector20 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) (A18) (A19) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) (A15, a15) (A16, a16) (A17, a17) (A18, a18) (A19, a19) ) namespace boost { namespace phoenix { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18 , typename A19 , typename A20> struct vector21 { typedef A0 member_type0; A0 a0; typedef A1 member_type1; A1 a1; typedef A2 member_type2; A2 a2; typedef A3 member_type3; A3 a3; typedef A4 member_type4; A4 a4; typedef A5 member_type5; A5 a5; typedef A6 member_type6; A6 a6; typedef A7 member_type7; A7 a7; typedef A8 member_type8; A8 a8; typedef A9 member_type9; A9 a9; typedef A10 member_type10; A10 a10; typedef A11 member_type11; A11 a11; typedef A12 member_type12; A12 a12; typedef A13 member_type13; A13 a13; typedef A14 member_type14; A14 a14; typedef A15 member_type15; A15 a15; typedef A16 member_type16; A16 a16; typedef A17 member_type17; A17 a17; typedef A18 member_type18; A18 a18; typedef A19 member_type19; A19 a19; typedef A20 member_type20; A20 a20; typedef mpl::int_<21> size_type; static const int size_value = 21; typedef vector20<A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18 , A19 , A20> args_type; args_type args() const { args_type r = {a1 , a2 , a3 , a4 , a5 , a6 , a7 , a8 , a9 , a10 , a11 , a12 , a13 , a14 , a15 , a16 , a17 , a18 , a19 , a20}; return r; } }; template <> struct vector_chooser<21> { template <typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18 , typename A19 , typename A20> struct apply { typedef vector21<A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18 , A19 , A20> type; }; }; }} BOOST_FUSION_ADAPT_TPL_STRUCT_NO_PARTIAL( (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) (A18) (A19) (A20) , ( boost::phoenix::vector21 ) (A0) (A1) (A2) (A3) (A4) (A5) (A6) (A7) (A8) (A9) (A10) (A11) (A12) (A13) (A14) (A15) (A16) (A17) (A18) (A19) (A20) , (A0, a0) (A1, a1) (A2, a2) (A3, a3) (A4, a4) (A5, a5) (A6, a6) (A7, a7) (A8, a8) (A9, a9) (A10, a10) (A11, a11) (A12, a12) (A13, a13) (A14, a14) (A15, a15) (A16, a16) (A17, a17) (A18, a18) (A19, a19) (A20, a20) )
36,090
16,043
/// @ref gtx /// @file glm/gtx/scalar_multiplication.hpp /// @author Joshua Moerman /// /// Include <glm/gtx/scalar_multiplication.hpp> to use the features of this extension. /// /// Enables scalar multiplication for all types /// /// Since GLSL is very strict about types, the following (often used) combinations do not work: /// double * vec4 /// int * vec4 /// vec4 / int /// So we'll fix that! Of course "float * vec4" should remain the same (hence the enable_if magic) #pragma once #include "../detail/setup.hpp" #ifndef GLM_ENABLE_EXPERIMENTAL # error "GLM: GLM_GTX_scalar_multiplication is an experimental extension and may change in the future. Use #define GLM_ENABLE_EXPERIMENTAL before including it, if you really want to use it." #endif #if !GLM_HAS_TEMPLATE_ALIASES && !(GLM_COMPILER & GLM_COMPILER_GCC) # error "GLM_GTX_scalar_multiplication requires C++11 support or alias templates and if not support for GCC" #endif #include "../vec2.hpp" #include "../vec3.hpp" #include "../vec4.hpp" #include "../mat2x2.hpp" #include <type_traits> namespace glm { template<typename T, typename Vec> using return_type_scalar_multiplication = typename std::enable_if< !std::is_same<T, float>::value // T may not be a float && std::is_arithmetic<T>::value, Vec // But it may be an int or double (no vec3 or mat3, ...) >::type; #define GLM_IMPLEMENT_SCAL_MULT(Vec) \ template<typename T> \ return_type_scalar_multiplication<T, Vec> \ operator*(T const& s, Vec rh){ \ return rh *= static_cast<float>(s); \ } \ \ template<typename T> \ return_type_scalar_multiplication<T, Vec> \ operator*(Vec lh, T const& s){ \ return lh *= static_cast<float>(s); \ } \ \ template<typename T> \ return_type_scalar_multiplication<T, Vec> \ operator/(Vec lh, T const& s){ \ return lh *= 1.0f / s; \ } GLM_IMPLEMENT_SCAL_MULT(vec2) GLM_IMPLEMENT_SCAL_MULT(vec3) GLM_IMPLEMENT_SCAL_MULT(vec4) GLM_IMPLEMENT_SCAL_MULT(mat2) GLM_IMPLEMENT_SCAL_MULT(mat2x3) GLM_IMPLEMENT_SCAL_MULT(mat2x4) GLM_IMPLEMENT_SCAL_MULT(mat3x2) GLM_IMPLEMENT_SCAL_MULT(mat3) GLM_IMPLEMENT_SCAL_MULT(mat3x4) GLM_IMPLEMENT_SCAL_MULT(mat4x2) GLM_IMPLEMENT_SCAL_MULT(mat4x3) GLM_IMPLEMENT_SCAL_MULT(mat4) #undef GLM_IMPLEMENT_SCAL_MULT } // namespace glm
2,330
980
/******************************************************************************* * Copyright 2016-2019 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ #ifndef CPU_JIT_GEMM_CONVOLUTION_UTILS_HPP #define CPU_JIT_GEMM_CONVOLUTION_UTILS_HPP #include "c_types_map.hpp" #include "memory_tracking.hpp" #include "mkldnn_thread.hpp" #include "cpu_convolution_pd.hpp" #include "cpu_engine.hpp" #include "jit_primitive_conf.hpp" namespace mkldnn { namespace impl { namespace cpu { namespace jit_gemm_convolution_utils { template <typename data_type_t> void im2col_3d(const jit_gemm_conv_conf_t &jcp, const data_type_t *im, data_type_t *col, int od); template <typename data_type_t> void im2col(const jit_gemm_conv_conf_t &jcp, const data_type_t *__restrict im, data_type_t *__restrict col, int hs, int hb, int ws, int wb); template <typename T> void im2col_u8(const jit_gemm_conv_conf_t &jcp, const T *__restrict im, T* __restrict imtr, uint8_t *__restrict col, int hs, int hb, int ws, int wb); template <typename T> void im2col_u8_3d(const jit_gemm_conv_conf_t &jcp, const T *__restrict im, uint8_t *__restrict col, int od); void col2im_s32(const jit_gemm_conv_conf_t &jcp, const int32_t *__restrict col, int32_t *__restrict im); void col2im_3d(const jit_gemm_conv_conf_t &jcp, const float *col, float *im, int od); void col2im(const jit_gemm_conv_conf_t &jcp, const float *col, float *im); status_t init_conf(jit_gemm_conv_conf_t &jcp, memory_tracking::registrar_t &scratchpad, const convolution_desc_t &cd, const memory_desc_wrapper &src_d, const memory_desc_wrapper &weights_d, const memory_desc_wrapper &dst_d, int max_threads); void bwd_weights_balance(int ithr, int nthr, int ngroups, int mb, int &ithr_g, int &nthr_g, int &ithr_mb, int &nthr_mb); void bwd_weights_reduction_par(int ithr, int nthr, const jit_gemm_conv_conf_t &jcp, const float *weights_reduce_ws, float *weights); } } } } #endif
2,602
951
/* * Copyright 2004 The WebRTC Project Authors. All rights reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #include "webrtc/base/helpers.h" #include <limits> #if defined(FEATURE_ENABLE_SSL) #include "webrtc/base/sslconfig.h" #if defined(SSL_USE_OPENSSL) #include <openssl/rand.h> #elif defined(SSL_USE_NSS_RNG) #include "pk11func.h" #else #if defined(WEBRTC_WIN) #define WIN32_LEAN_AND_MEAN #include <windows.h> #include <ntsecapi.h> #endif // WEBRTC_WIN #endif // else #endif // FEATURE_ENABLED_SSL #include "webrtc/base/base64.h" #include "webrtc/base/basictypes.h" #include "webrtc/base/logging.h" #include "webrtc/base/scoped_ptr.h" #include "webrtc/base/timeutils.h" // Protect against max macro inclusion. #undef max namespace rtc { // Base class for RNG implementations. class RandomGenerator { public: virtual ~RandomGenerator() {} virtual bool Init(const void* seed, size_t len) = 0; virtual bool Generate(void* buf, size_t len) = 0; }; #if defined(SSL_USE_OPENSSL) // The OpenSSL RNG. Need to make sure it doesn't run out of entropy. class SecureRandomGenerator : public RandomGenerator { public: SecureRandomGenerator() : inited_(false) { } ~SecureRandomGenerator() { } virtual bool Init(const void* seed, size_t len) { // By default, seed from the system state. if (!inited_) { if (RAND_poll() <= 0) { return false; } inited_ = true; } // Allow app data to be mixed in, if provided. if (seed) { RAND_seed(seed, len); } return true; } virtual bool Generate(void* buf, size_t len) { if (!inited_ && !Init(NULL, 0)) { return false; } return (RAND_bytes(reinterpret_cast<unsigned char*>(buf), len) > 0); } private: bool inited_; }; #elif defined(SSL_USE_NSS_RNG) // The NSS RNG. class SecureRandomGenerator : public RandomGenerator { public: SecureRandomGenerator() {} ~SecureRandomGenerator() {} virtual bool Init(const void* seed, size_t len) { return true; } virtual bool Generate(void* buf, size_t len) { return (PK11_GenerateRandom(reinterpret_cast<unsigned char*>(buf), static_cast<int>(len)) == SECSuccess); } }; #else #if defined(WEBRTC_WIN) class SecureRandomGenerator : public RandomGenerator { public: SecureRandomGenerator() : advapi32_(NULL), rtl_gen_random_(NULL) {} ~SecureRandomGenerator() { FreeLibrary(advapi32_); } virtual bool Init(const void* seed, size_t seed_len) { // We don't do any additional seeding on Win32, we just use the CryptoAPI // RNG (which is exposed as a hidden function off of ADVAPI32 so that we // don't need to drag in all of CryptoAPI) if (rtl_gen_random_) { return true; } advapi32_ = LoadLibrary(L"advapi32.dll"); if (!advapi32_) { return false; } rtl_gen_random_ = reinterpret_cast<RtlGenRandomProc>( GetProcAddress(advapi32_, "SystemFunction036")); if (!rtl_gen_random_) { FreeLibrary(advapi32_); return false; } return true; } virtual bool Generate(void* buf, size_t len) { if (!rtl_gen_random_ && !Init(NULL, 0)) { return false; } return (rtl_gen_random_(buf, static_cast<int>(len)) != FALSE); } private: typedef BOOL (WINAPI *RtlGenRandomProc)(PVOID, ULONG); HINSTANCE advapi32_; RtlGenRandomProc rtl_gen_random_; }; #elif !defined(FEATURE_ENABLE_SSL) // No SSL implementation -- use rand() class SecureRandomGenerator : public RandomGenerator { public: virtual bool Init(const void* seed, size_t len) { if (len >= 4) { srand(*reinterpret_cast<const int*>(seed)); } else { srand(*reinterpret_cast<const char*>(seed)); } return true; } virtual bool Generate(void* buf, size_t len) { char* bytes = reinterpret_cast<char*>(buf); for (size_t i = 0; i < len; ++i) { bytes[i] = static_cast<char>(rand()); } return true; } }; #else #error No SSL implementation has been selected! #endif // WEBRTC_WIN #endif // A test random generator, for predictable output. class TestRandomGenerator : public RandomGenerator { public: TestRandomGenerator() : seed_(7) { } ~TestRandomGenerator() { } virtual bool Init(const void* seed, size_t len) { return true; } virtual bool Generate(void* buf, size_t len) { for (size_t i = 0; i < len; ++i) { static_cast<uint8*>(buf)[i] = static_cast<uint8>(GetRandom()); } return true; } private: int GetRandom() { return ((seed_ = seed_ * 214013L + 2531011L) >> 16) & 0x7fff; } int seed_; }; // TODO: Use Base64::Base64Table instead. static const char BASE64[64] = { 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/' }; namespace { // This round about way of creating a global RNG is to safe-guard against // indeterminant static initialization order. scoped_ptr<RandomGenerator>& GetGlobalRng() { LIBJINGLE_DEFINE_STATIC_LOCAL(scoped_ptr<RandomGenerator>, global_rng, (new SecureRandomGenerator())); return global_rng; } RandomGenerator& Rng() { return *GetGlobalRng(); } } // namespace void SetRandomTestMode(bool test) { if (!test) { GetGlobalRng().reset(new SecureRandomGenerator()); } else { GetGlobalRng().reset(new TestRandomGenerator()); } } bool InitRandom(int seed) { return InitRandom(reinterpret_cast<const char*>(&seed), sizeof(seed)); } bool InitRandom(const char* seed, size_t len) { if (!Rng().Init(seed, len)) { LOG(LS_ERROR) << "Failed to init random generator!"; return false; } return true; } std::string CreateRandomString(size_t len) { std::string str; CreateRandomString(len, &str); return str; } bool CreateRandomString(size_t len, const char* table, int table_size, std::string* str) { str->clear(); scoped_ptr<uint8[]> bytes(new uint8[len]); if (!Rng().Generate(bytes.get(), len)) { LOG(LS_ERROR) << "Failed to generate random string!"; return false; } str->reserve(len); for (size_t i = 0; i < len; ++i) { str->push_back(table[bytes[i] % table_size]); } return true; } bool CreateRandomString(size_t len, std::string* str) { return CreateRandomString(len, BASE64, 64, str); } bool CreateRandomString(size_t len, const std::string& table, std::string* str) { return CreateRandomString(len, table.c_str(), static_cast<int>(table.size()), str); } uint32 CreateRandomId() { uint32 id; if (!Rng().Generate(&id, sizeof(id))) { LOG(LS_ERROR) << "Failed to generate random id!"; } return id; } uint64 CreateRandomId64() { return static_cast<uint64>(CreateRandomId()) << 32 | CreateRandomId(); } uint32 CreateRandomNonZeroId() { uint32 id; do { id = CreateRandomId(); } while (id == 0); return id; } double CreateRandomDouble() { return CreateRandomId() / (std::numeric_limits<uint32>::max() + std::numeric_limits<double>::epsilon()); } } // namespace rtc
7,610
2,770
#define EZC3D_API_EXPORTS /// /// \file ForcePlatForm.cpp /// \brief Implementation of ForcePlatForm class /// \author Pariterre /// \version 1.0 /// \date March 25th, 2020 /// #include "modules/ForcePlatforms.h" #include "ezc3d_all.h" ezc3d::Modules::ForcePlatform::ForcePlatform() { } ezc3d::Modules::ForcePlatform::ForcePlatform( size_t idx, const ezc3d::c3d& c3d) { // Extract the required values from the C3D extractUnits(c3d); extractType(idx, c3d); extractCorners(idx, c3d); extractOrigin(idx, c3d); extractCalMatrix(idx, c3d); computePfReferenceFrame(); extractData(idx, c3d); } const std::string& ezc3d::Modules::ForcePlatform::forceUnit() const { return _unitsForce; } const std::string& ezc3d::Modules::ForcePlatform::momentUnit() const { return _unitsMoment; } const std::string& ezc3d::Modules::ForcePlatform::positionUnit() const { return _unitsPosition; } void ezc3d::Modules::ForcePlatform::extractUnits( const ezc3d::c3d &c3d) { const ezc3d::ParametersNS::GroupNS::Group &groupPoint( c3d.parameters().group("POINT")); const ezc3d::ParametersNS::GroupNS::Group &groupPF( c3d.parameters().group("FORCE_PLATFORM")); // Position units if (groupPoint.isParameter("UNITS") && groupPoint.parameter("UNITS").dimension()[0] > 0){ _unitsPosition = groupPoint.parameter("UNITS").valuesAsString()[0]; } else { // Assume meter if not provided _unitsPosition = "m"; } // Force units if (groupPF.isParameter("UNITS") && groupPF.parameter("UNITS").dimension()[0] > 0){ _unitsForce = groupPF.parameter("UNITS").valuesAsString()[0]; } else { // Assume Newton if not provided _unitsForce = "N"; } // Moments units _unitsMoment = _unitsForce + _unitsPosition; } size_t ezc3d::Modules::ForcePlatform::nbFrames() const { return _F.size(); } size_t ezc3d::Modules::ForcePlatform::type() const { return _type; } const ezc3d::Matrix66& ezc3d::Modules::ForcePlatform::calMatrix() const { return _calMatrix; } const std::vector<ezc3d::Vector3d>& ezc3d::Modules::ForcePlatform::corners() const { return _corners; } const ezc3d::Vector3d& ezc3d::Modules::ForcePlatform::meanCorners() const { return _meanCorners; } const ezc3d::Vector3d& ezc3d::Modules::ForcePlatform::origin() const { return _origin; } const std::vector<ezc3d::Vector3d>& ezc3d::Modules::ForcePlatform::forces() const { return _F; } const std::vector<ezc3d::Vector3d>& ezc3d::Modules::ForcePlatform::moments() const { return _M; } const std::vector<ezc3d::Vector3d>& ezc3d::Modules::ForcePlatform::CoP() const { return _CoP; } const std::vector<ezc3d::Vector3d>& ezc3d::Modules::ForcePlatform::Tz() const { return _Tz; } void ezc3d::Modules::ForcePlatform::extractType( size_t idx, const ezc3d::c3d &c3d) { const ezc3d::ParametersNS::GroupNS::Group &groupPF( c3d.parameters().group("FORCE_PLATFORM")); if (groupPF.parameter("TYPE").valuesAsInt().size() < idx + 1){ throw std::runtime_error("FORCE_PLATFORM:IDX is not fill properly " "to extract Force platform informations"); } _type = static_cast<size_t>(groupPF.parameter("TYPE").valuesAsInt()[idx]); // Make sure that particular type is supported if (_type == 1){ } else if (_type == 2 || _type == 4){ } else if (_type == 3 || _type == 7){ if (_type == 7){ throw std::runtime_error("Type 3 (and 7) is not supported yet, " "please open an Issue on git`hub for " "support"); } } else if (_type == 5){ throw std::runtime_error("Type 5 is not supported yet, please " "open an Issue on github for support"); } else if (_type == 6){ throw std::runtime_error("Type 6 is not supported yet, please " "open an Issue on github for support"); } else if (_type == 11 || _type == 12){ throw std::runtime_error("Kistler Split Belt Treadmill is not " "supported for ForcePlatform analysis"); } else if (_type == 21){ throw std::runtime_error("AMTI-stairs is not supported " "for ForcePlatform analysis"); } else { throw std::runtime_error("Force platform type is non existant " "or not supported yet"); } } ezc3d::Modules::ForcePlatforms::ForcePlatforms( const ezc3d::c3d &c3d) { size_t nbForcePF(c3d.parameters().group("FORCE_PLATFORM") .parameter("USED").valuesAsInt()[0]); for (size_t i=0; i<nbForcePF; ++i){ _platforms.push_back(ezc3d::Modules::ForcePlatform(i, c3d)); } } void ezc3d::Modules::ForcePlatform::extractCorners( size_t idx, const ezc3d::c3d &c3d) { const ezc3d::ParametersNS::GroupNS::Group &groupPF( c3d.parameters().group("FORCE_PLATFORM")); const std::vector<double>& all_corners( groupPF.parameter("CORNERS").valuesAsDouble()); if (all_corners.size() < 12*(idx+1)){ throw std::runtime_error("FORCE_PLATFORM:CORNER is not fill properly " "to extract Force platform informations"); } for (size_t i=0; i<4; ++i){ ezc3d::Vector3d corner; for (size_t j=0; j<3; ++j){ corner(j) = all_corners[idx*12 + i*3 + j]; } _corners.push_back(corner); _meanCorners += corner; } _meanCorners /= 4; } void ezc3d::Modules::ForcePlatform::extractOrigin( size_t idx, const ezc3d::c3d &c3d) { const ezc3d::ParametersNS::GroupNS::Group &groupPF( c3d.parameters().group("FORCE_PLATFORM")); const std::vector<double>& all_origins( groupPF.parameter("ORIGIN").valuesAsDouble()); if (all_origins.size() < 3*(idx+1)){ throw std::runtime_error("FORCE_PLATFORM:ORIGIN is not fill properly " "to extract Force platform informations"); } for (size_t i=0; i<3; ++i){ if (_type == 1 && i < 2){ _origin(i) = 0; } else { _origin(i) = all_origins[idx*3 + i]; } } if ((_type >= 1 && _type <= 4) && _origin(2) > 0.0){ _origin = -1*_origin; } } void ezc3d::Modules::ForcePlatform::extractCalMatrix( size_t idx, const ezc3d::c3d &c3d) { const ezc3d::ParametersNS::GroupNS::Group &groupPF( c3d.parameters().group("FORCE_PLATFORM")); size_t nChannels(-1); if (_type >= 1 && _type <= 4){ nChannels = 6; } if (!groupPF.isParameter("CAL_MATRIX")){ if (_type == 2){ // CAL_MATRIX is ignore for type 2 // If none is found, returns all zeros return; } else { throw std::runtime_error( "FORCE_PLATFORM:CAL_MATRIX was not found, but is " "required for the type of force platform"); } } // Check dimensions const auto& calMatrixParam(groupPF.parameter("CAL_MATRIX")); if (calMatrixParam.dimension().size() < 3 || calMatrixParam.dimension()[2] <= idx){ if (_type == 1 || _type == 2 || _type == 3){ // CAL_MATRIX is ignore for type 2 // If none is found, returns all zeros return; } else { throw std::runtime_error( "FORCE_PLATFORM:CAL_MATRIX is not fill properly " "to extract Force platform informations"); } } const auto& val(calMatrixParam.valuesAsDouble()); size_t skip(calMatrixParam.dimension()[0] * calMatrixParam.dimension()[1]); for (size_t i=0; i<nChannels; ++i){ for (size_t j=0; j<nChannels; ++j){ _calMatrix(i, j) = val[skip*idx + j*nChannels + i]; } } } void ezc3d::Modules::ForcePlatform::computePfReferenceFrame() { ezc3d::Vector3d axisX(_corners[0] - _corners[1]); ezc3d::Vector3d axisY(_corners[0] - _corners[3]); ezc3d::Vector3d axisZ(axisX.cross(axisY)); axisY = axisZ.cross(axisX); axisX.normalize(); axisY.normalize(); axisZ.normalize(); for (size_t i=0; i<3; ++i){ _refFrame(i, 0) = axisX(i); _refFrame(i, 1) = axisY(i); _refFrame(i, 2) = axisZ(i); } } void ezc3d::Modules::ForcePlatform::extractData( size_t idx, const ezc3d::c3d &c3d) { const ezc3d::ParametersNS::GroupNS::Group &groupPF( c3d.parameters().group("FORCE_PLATFORM")); // Get elements from the force platform's type size_t nChannels(-1); if (_type == 1) { nChannels = 6; } else if(_type == 2 || _type == 4){ nChannels = 6; } else if (_type == 3) { nChannels = 8; } // Check the dimensions of FORCE_PLATFORM:CHANNEL are consistent const std::vector<size_t>& dimensions(groupPF.parameter("CHANNEL").dimension()); if (dimensions[0] < nChannels){ throw std::runtime_error("FORCE_PLATFORM:CHANNEL is not fill properly " "to extract Force platform informations"); } if (dimensions[1] < idx + 1){ throw std::runtime_error("FORCE_PLATFORM:CHANNEL is not fill properly " "to extract Force platform informations"); } // Get the channels where the force platform are stored in the data std::vector<size_t> channel_idx(nChannels); const std::vector<int>& all_channel_idx( groupPF.parameter("CHANNEL").valuesAsInt()); for (size_t i=0; i<nChannels; ++i){ channel_idx[i] = all_channel_idx[idx*dimensions[0] + i] - 1; // 1-based } // Get the force and moment from these channel in global reference frame size_t nFramesTotal( c3d.header().nbFrames() * c3d.header().nbAnalogByFrame()); _F.resize(nFramesTotal); _M.resize(nFramesTotal); _CoP.resize(nFramesTotal); _Tz.resize(nFramesTotal); size_t cmp(0); double * ch = new double[8]; for (const auto& frame : c3d.data().frames()){ for (size_t i=0; i<frame.analogs().nbSubframes(); ++i){ const auto& subframe(frame.analogs().subframe(i)); if (_type == 1){ ezc3d::Vector3d force_raw; ezc3d::Vector3d cop_raw; ezc3d::Vector3d tz_raw; // CalMatrix (the example I have does not have any) for (size_t j=0; j<3; ++j){ force_raw(j) = subframe.channel(channel_idx[j]).data(); if (j < 2){ cop_raw(j) = subframe.channel(channel_idx[j+3]).data(); } else { tz_raw(j) = subframe.channel(channel_idx[j+3]).data(); } } _F[cmp] = _refFrame * force_raw; _CoP[cmp] = _refFrame * cop_raw; _Tz[cmp] = _refFrame * tz_raw; _M[cmp] = _F[cmp].cross(_CoP[cmp]) - _Tz[cmp]; _CoP[cmp] += _meanCorners; ++cmp; } else if (_type == 2 || _type == 3 || _type == 4){ ezc3d::Vector3d force_raw; ezc3d::Vector3d moment_raw; if (_type == 3){ for (size_t j=0; j<8; ++j){ ch[j] = subframe.channel(channel_idx[j]).data(); } // CalMatrix (the example I have does not have any) force_raw(0) = ch[0] + ch[1]; force_raw(1) = ch[2] + ch[3]; force_raw(2) = ch[4] + ch[5] + ch[6] + ch[7]; moment_raw(0) = _origin(1) * (ch[4] + ch[5] - ch[6] - ch[7]); moment_raw(1) = _origin(0) * (ch[5] + ch[6] - ch[4] - ch[7]); moment_raw(2) = _origin(1) * (ch[1] - ch[0]) + _origin(0) * (ch[2] - ch[3]); moment_raw += force_raw.cross(ezc3d::Vector3d(0, 0, _origin(2))); } else { ezc3d::Vector6d data_raw; for (size_t j=0; j<3; ++j){ data_raw(j) = subframe.channel(channel_idx[j]).data(); data_raw(j+3) = subframe.channel(channel_idx[j+3]).data(); } if (_type == 4){ data_raw = _calMatrix * data_raw; } for (size_t j=0; j<3; ++j){ force_raw(j) = data_raw(j); moment_raw(j) = data_raw(j+3); } moment_raw += force_raw.cross(_origin); } _F[cmp] = _refFrame * force_raw; _M[cmp] = _refFrame * moment_raw; ezc3d::Vector3d CoP_raw( -moment_raw(1)/force_raw(2), moment_raw(0)/force_raw(2), 0); _CoP[cmp] = _refFrame * CoP_raw + _meanCorners; _Tz[cmp] = _refFrame * static_cast<Vector3d>( moment_raw - force_raw.cross(-1*CoP_raw)); ++cmp; } } } delete[] ch; } const std::vector<ezc3d::Modules::ForcePlatform>& ezc3d::Modules::ForcePlatforms::forcePlatforms() const { return _platforms; } const ezc3d::Modules::ForcePlatform& ezc3d::Modules::ForcePlatforms::forcePlatform( size_t idx) const { return _platforms.at(idx); }
13,923
4,767
// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chromeos/memory/pagemap.h" #include <fcntl.h> #include <sys/stat.h> #include <sys/types.h> #include <string> #include "base/memory/aligned_memory.h" #include "base/posix/eintr_wrapper.h" #include "base/process/process_metrics.h" #include "base/strings/stringprintf.h" #include "base/threading/scoped_blocking_call.h" namespace chromeos { namespace memory { namespace { constexpr char kPagemapFileFormat[] = "/proc/%d/pagemap"; } Pagemap::~Pagemap() = default; Pagemap::Pagemap(pid_t pid) { if (pid) { std::string pagemap_file = base::StringPrintf(kPagemapFileFormat, pid); fd_.reset(HANDLE_EINTR(open(pagemap_file.c_str(), O_RDONLY))); } } bool Pagemap::IsValid() const { return fd_.is_valid(); } bool Pagemap::GetEntries(uint64_t address, uint64_t length, std::vector<PagemapEntry>* entries) const { base::ScopedBlockingCall scoped_blocking_call(FROM_HERE, base::BlockingType::WILL_BLOCK); DCHECK(IsValid()); DCHECK(entries); const size_t kPageSize = base::GetPageSize(); DCHECK(base::IsPageAligned(address)); DCHECK(base::IsPageAligned(length)); // The size of each pagemap entry to calculate our offset in the file. uint64_t num_pages = length / kPageSize; if (entries->size() != num_pages) { // Shrink or grow entries to the correct length if it was not already. entries->resize(num_pages); entries->shrink_to_fit(); // If we made it smaller shrink capacity. } uint64_t pagemap_offset = (address / kPageSize) * sizeof(PagemapEntry); uint64_t pagemap_len = num_pages * sizeof(PagemapEntry); memset(entries->data(), 0, pagemap_len); // The caller was expected to provide a buffer large enough for the number of // pages in the region. uint64_t total_read = 0; while (total_read < pagemap_len) { ssize_t bytes_read = HANDLE_EINTR( pread(fd_.get(), reinterpret_cast<char*>(entries->data()) + total_read, pagemap_len - total_read, pagemap_offset + total_read)); if (bytes_read <= 0) { return false; } total_read += bytes_read; } return true; } bool Pagemap::GetNumberOfPagesInCore(uint64_t address, uint64_t length, uint64_t* pages_in_core) const { DCHECK(pages_in_core); *pages_in_core = 0; std::vector<Pagemap::PagemapEntry> entries(length / base::GetPageSize()); if (!GetEntries(address, length, &entries)) { return false; } for (const Pagemap::PagemapEntry& entry : entries) { if (entry.page_present) (*pages_in_core)++; } return true; } } // namespace memory } // namespace chromeos
2,897
1,024
//------------------------------------------------------------------------------------------------------- // Copyright (C) Microsoft Corporation and contributors. All rights reserved. // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. //------------------------------------------------------------------------------------------------------- #include "Backend.h" #if ENABLE_DEBUG_CONFIG_OPTIONS #define TESTTRACE_PHASE_INSTR(phase, instr, ...) \ if(PHASE_TESTTRACE(phase, this->func)) \ { \ char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; \ Output::Print( \ _u("Testtrace: %s function %s (%s): "), \ Js::PhaseNames[phase], \ instr->m_func->GetJITFunctionBody()->GetDisplayName(), \ instr->m_func->GetDebugNumberSet(debugStringBuffer)); \ Output::Print(__VA_ARGS__); \ Output::Flush(); \ } #else // ENABLE_DEBUG_CONFIG_OPTIONS #define TESTTRACE_PHASE_INSTR(phase, instr, ...) #endif // ENABLE_DEBUG_CONFIG_OPTIONS #if DBG_DUMP #define DO_MEMOP_TRACE() (PHASE_TRACE(Js::MemOpPhase, this->func) ||\ PHASE_TRACE(Js::MemSetPhase, this->func) ||\ PHASE_TRACE(Js::MemCopyPhase, this->func)) #define DO_MEMOP_TRACE_PHASE(phase) (PHASE_TRACE(Js::MemOpPhase, this->func) || PHASE_TRACE(Js::phase ## Phase, this->func)) #define OUTPUT_MEMOP_TRACE(loop, instr, ...) {\ char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];\ Output::Print(15, _u("Function: %s%s, Loop: %u: "), this->func->GetJITFunctionBody()->GetDisplayName(), this->func->GetDebugNumberSet(debugStringBuffer), loop->GetLoopNumber());\ Output::Print(__VA_ARGS__);\ IR::Instr* __instr__ = instr;\ if(__instr__) __instr__->DumpByteCodeOffset();\ if(__instr__) Output::Print(_u(" (%s)"), Js::OpCodeUtil::GetOpCodeName(__instr__->m_opcode));\ Output::Print(_u("\n"));\ Output::Flush(); \ } #define TRACE_MEMOP(loop, instr, ...) \ if (DO_MEMOP_TRACE()) {\ Output::Print(_u("TRACE MemOp:"));\ OUTPUT_MEMOP_TRACE(loop, instr, __VA_ARGS__)\ } #define TRACE_MEMOP_VERBOSE(loop, instr, ...) if(CONFIG_FLAG(Verbose)) {TRACE_MEMOP(loop, instr, __VA_ARGS__)} #define TRACE_MEMOP_PHASE(phase, loop, instr, ...) \ if (DO_MEMOP_TRACE_PHASE(phase))\ {\ Output::Print(_u("TRACE ") _u(#phase) _u(":"));\ OUTPUT_MEMOP_TRACE(loop, instr, __VA_ARGS__)\ } #define TRACE_MEMOP_PHASE_VERBOSE(phase, loop, instr, ...) if(CONFIG_FLAG(Verbose)) {TRACE_MEMOP_PHASE(phase, loop, instr, __VA_ARGS__)} #else #define DO_MEMOP_TRACE() #define DO_MEMOP_TRACE_PHASE(phase) #define OUTPUT_MEMOP_TRACE(loop, instr, ...) #define TRACE_MEMOP(loop, instr, ...) #define TRACE_MEMOP_VERBOSE(loop, instr, ...) #define TRACE_MEMOP_PHASE(phase, loop, instr, ...) #define TRACE_MEMOP_PHASE_VERBOSE(phase, loop, instr, ...) #endif class AutoRestoreVal { private: Value *const originalValue; Value *const tempValue; Value * *const valueRef; public: AutoRestoreVal(Value *const originalValue, Value * *const tempValueRef) : originalValue(originalValue), tempValue(*tempValueRef), valueRef(tempValueRef) { } ~AutoRestoreVal() { if(*valueRef == tempValue) { *valueRef = originalValue; } } PREVENT_COPY(AutoRestoreVal); }; GlobOpt::GlobOpt(Func * func) : func(func), intConstantToStackSymMap(nullptr), intConstantToValueMap(nullptr), currentValue(FirstNewValueNumber), prePassLoop(nullptr), alloc(nullptr), isCallHelper(false), inInlinedBuiltIn(false), rootLoopPrePass(nullptr), noImplicitCallUsesToInsert(nullptr), valuesCreatedForClone(nullptr), valuesCreatedForMerge(nullptr), instrCountSinceLastCleanUp(0), isRecursiveCallOnLandingPad(false), updateInductionVariableValueNumber(false), isPerformingLoopBackEdgeCompensation(false), currentRegion(nullptr), changedSymsAfterIncBailoutCandidate(nullptr), doTypeSpec( !IsTypeSpecPhaseOff(func)), doAggressiveIntTypeSpec( doTypeSpec && DoAggressiveIntTypeSpec(func)), doAggressiveMulIntTypeSpec( doTypeSpec && !PHASE_OFF(Js::AggressiveMulIntTypeSpecPhase, func) && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsAggressiveMulIntTypeSpecDisabled(func->IsLoopBody()))), doDivIntTypeSpec( doAggressiveIntTypeSpec && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsDivIntTypeSpecDisabled(func->IsLoopBody()))), doLossyIntTypeSpec( doTypeSpec && DoLossyIntTypeSpec(func)), doFloatTypeSpec( doTypeSpec && DoFloatTypeSpec(func)), doArrayCheckHoist( DoArrayCheckHoist(func)), doArrayMissingValueCheckHoist( doArrayCheckHoist && DoArrayMissingValueCheckHoist(func)), doArraySegmentHoist( doArrayCheckHoist && DoArraySegmentHoist(ValueType::GetObject(ObjectType::Int32Array), func)), doJsArraySegmentHoist( doArraySegmentHoist && DoArraySegmentHoist(ValueType::GetObject(ObjectType::Array), func)), doArrayLengthHoist( doArrayCheckHoist && DoArrayLengthHoist(func)), doEliminateArrayAccessHelperCall( doArrayCheckHoist && !PHASE_OFF(Js::EliminateArrayAccessHelperCallPhase, func)), doTrackRelativeIntBounds( doAggressiveIntTypeSpec && DoPathDependentValues() && !PHASE_OFF(Js::Phase::TrackRelativeIntBoundsPhase, func)), doBoundCheckElimination( doTrackRelativeIntBounds && !PHASE_OFF(Js::Phase::BoundCheckEliminationPhase, func)), doBoundCheckHoist( doEliminateArrayAccessHelperCall && doBoundCheckElimination && DoConstFold() && !PHASE_OFF(Js::Phase::BoundCheckHoistPhase, func) && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsBoundCheckHoistDisabled(func->IsLoopBody()))), doLoopCountBasedBoundCheckHoist( doBoundCheckHoist && !PHASE_OFF(Js::Phase::LoopCountBasedBoundCheckHoistPhase, func) && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsLoopCountBasedBoundCheckHoistDisabled(func->IsLoopBody()))), doPowIntIntTypeSpec( doAggressiveIntTypeSpec && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsPowIntIntTypeSpecDisabled())), doTagChecks( (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsTagCheckDisabled())), isAsmJSFunc(func->GetJITFunctionBody()->IsAsmJsMode()) { } void GlobOpt::BackwardPass(Js::Phase tag) { BEGIN_CODEGEN_PHASE(this->func, tag); ::BackwardPass backwardPass(this->func, this, tag); backwardPass.Optimize(); END_CODEGEN_PHASE(this->func, tag); } void GlobOpt::Optimize() { this->objectTypeSyms = nullptr; this->func->argInsCount = this->func->GetInParamsCount() - 1; //Don't include "this" pointer in the count. if (!func->DoGlobOpt()) { this->lengthEquivBv = nullptr; this->argumentsEquivBv = nullptr; this->callerEquivBv = nullptr; // Still need to run the dead store phase to calculate the live reg on back edge this->BackwardPass(Js::DeadStorePhase); CannotAllocateArgumentsObjectOnStack(); return; } { this->lengthEquivBv = this->func->m_symTable->m_propertyEquivBvMap->Lookup(Js::PropertyIds::length, nullptr); // Used to kill live "length" properties this->argumentsEquivBv = func->m_symTable->m_propertyEquivBvMap->Lookup(Js::PropertyIds::arguments, nullptr); // Used to kill live "arguments" properties this->callerEquivBv = func->m_symTable->m_propertyEquivBvMap->Lookup(Js::PropertyIds::caller, nullptr); // Used to kill live "caller" properties // The backward phase needs the glob opt's allocator to allocate the propertyTypeValueMap // in GlobOpt::EnsurePropertyTypeValue and ranges of instructions where int overflow may be ignored. // (see BackwardPass::TrackIntUsage) PageAllocator * pageAllocator = this->func->m_alloc->GetPageAllocator(); NoRecoverMemoryJitArenaAllocator localAlloc(_u("BE-GlobOpt"), pageAllocator, Js::Throw::OutOfMemory); this->alloc = &localAlloc; NoRecoverMemoryJitArenaAllocator localTempAlloc(_u("BE-GlobOpt temp"), pageAllocator, Js::Throw::OutOfMemory); this->tempAlloc = &localTempAlloc; // The forward passes use info (upwardExposedUses) from the backward pass. This info // isn't available for some of the symbols created during the backward pass, or the forward pass. // Keep track of the last symbol for which we're guaranteed to have data. this->maxInitialSymID = this->func->m_symTable->GetMaxSymID(); this->BackwardPass(Js::BackwardPhase); this->ForwardPass(); } this->BackwardPass(Js::DeadStorePhase); this->TailDupPass(); } bool GlobOpt::ShouldExpectConventionalArrayIndexValue(IR::IndirOpnd *const indirOpnd) { Assert(indirOpnd); if(!indirOpnd->GetIndexOpnd()) { return indirOpnd->GetOffset() >= 0; } IR::RegOpnd *const indexOpnd = indirOpnd->GetIndexOpnd(); if(indexOpnd->m_sym->m_isNotInt) { // Typically, single-def or any sym-specific information for type-specialized syms should not be used because all of // their defs will not have been accounted for until after the forward pass. But m_isNotInt is only ever changed from // false to true, so it's okay in this case. return false; } StackSym *indexVarSym = indexOpnd->m_sym; if(indexVarSym->IsTypeSpec()) { indexVarSym = indexVarSym->GetVarEquivSym(nullptr); Assert(indexVarSym); } else if(!IsLoopPrePass()) { // Don't use single-def info or const flags for type-specialized syms, as all of their defs will not have been accounted // for until after the forward pass. Also, don't use the const flags in a loop prepass because the const flags may not // be up-to-date. StackSym *const indexSym = indexOpnd->m_sym; if(indexSym->IsIntConst()) { return indexSym->GetIntConstValue() >= 0; } } Value *const indexValue = CurrentBlockData()->FindValue(indexVarSym); if(!indexValue) { // Treat it as Uninitialized, assume it's going to be valid return true; } ValueInfo *const indexValueInfo = indexValue->GetValueInfo(); int32 indexConstantValue; if(indexValueInfo->TryGetIntConstantValue(&indexConstantValue)) { return indexConstantValue >= 0; } if(indexValueInfo->IsUninitialized()) { // Assume it's going to be valid return true; } return indexValueInfo->HasBeenNumber() && !indexValueInfo->HasBeenFloat(); } // // Either result is float or 1/x or cst1/cst2 where cst1%cst2 != 0 // ValueType GlobOpt::GetDivValueType(IR::Instr* instr, Value* src1Val, Value* src2Val, bool specialize) { ValueInfo *src1ValueInfo = (src1Val ? src1Val->GetValueInfo() : nullptr); ValueInfo *src2ValueInfo = (src2Val ? src2Val->GetValueInfo() : nullptr); if (instr->IsProfiledInstr() && instr->m_func->HasProfileInfo()) { ValueType resultType = instr->m_func->GetReadOnlyProfileInfo()->GetDivProfileInfo(static_cast<Js::ProfileId>(instr->AsProfiledInstr()->u.profileId)); if (resultType.IsLikelyInt()) { if (specialize && src1ValueInfo && src2ValueInfo && ((src1ValueInfo->IsInt() && src2ValueInfo->IsInt()) || (this->DoDivIntTypeSpec() && src1ValueInfo->IsLikelyInt() && src2ValueInfo->IsLikelyInt()))) { return ValueType::GetInt(true); } return resultType; } // Consider: Checking that the sources are numbers. if (resultType.IsLikelyFloat()) { return ValueType::Float; } return resultType; } int32 src1IntConstantValue; if(!src1ValueInfo || !src1ValueInfo->TryGetIntConstantValue(&src1IntConstantValue)) { return ValueType::Number; } if (src1IntConstantValue == 1) { return ValueType::Float; } int32 src2IntConstantValue; if(!src2Val || !src2ValueInfo->TryGetIntConstantValue(&src2IntConstantValue)) { return ValueType::Number; } if (src2IntConstantValue // Avoid divide by zero && !(src1IntConstantValue == 0x80000000 && src2IntConstantValue == -1) // Avoid integer overflow && (src1IntConstantValue % src2IntConstantValue) != 0) { return ValueType::Float; } return ValueType::Number; } void GlobOpt::ForwardPass() { BEGIN_CODEGEN_PHASE(this->func, Js::ForwardPhase); #if DBG_DUMP if (Js::Configuration::Global.flags.Trace.IsEnabled(Js::GlobOptPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId())) { this->func->DumpHeader(); } if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::GlobOptPhase)) { this->TraceSettings(); } #endif // GetConstantCount() gives us the right size to pick for the SparseArray, but we may need more if we've inlined // functions with constants. There will be a gap in the symbol numbering between the main constants and // the inlined ones, so we'll most likely need a new array chunk. Make the min size of the array chunks be 64 // in case we have a main function with very few constants and a bunch of constants from inlined functions. this->byteCodeConstantValueArray = SparseArray<Value>::New(this->alloc, max(this->func->GetJITFunctionBody()->GetConstCount(), 64U)); this->byteCodeConstantValueNumbersBv = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); this->tempBv = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); this->prePassCopyPropSym = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); this->slotSyms = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); this->byteCodeUses = nullptr; this->propertySymUse = nullptr; // changedSymsAfterIncBailoutCandidate helps track building incremental bailout in ForwardPass this->changedSymsAfterIncBailoutCandidate = JitAnew(alloc, BVSparse<JitArenaAllocator>, alloc); #if DBG this->byteCodeUsesBeforeOpt = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); if (Js::Configuration::Global.flags.Trace.IsEnabled(Js::FieldCopyPropPhase) && this->DoFunctionFieldCopyProp()) { Output::Print(_u("TRACE: CanDoFieldCopyProp Func: ")); this->func->DumpFullFunctionName(); Output::Print(_u("\n")); } #endif OpndList localNoImplicitCallUsesToInsert(alloc); this->noImplicitCallUsesToInsert = &localNoImplicitCallUsesToInsert; IntConstantToStackSymMap localIntConstantToStackSymMap(alloc); this->intConstantToStackSymMap = &localIntConstantToStackSymMap; IntConstantToValueMap localIntConstantToValueMap(alloc); this->intConstantToValueMap = &localIntConstantToValueMap; Int64ConstantToValueMap localInt64ConstantToValueMap(alloc); this->int64ConstantToValueMap = &localInt64ConstantToValueMap; AddrConstantToValueMap localAddrConstantToValueMap(alloc); this->addrConstantToValueMap = &localAddrConstantToValueMap; StringConstantToValueMap localStringConstantToValueMap(alloc); this->stringConstantToValueMap = &localStringConstantToValueMap; SymIdToInstrMap localPrePassInstrMap(alloc); this->prePassInstrMap = &localPrePassInstrMap; ValueSetByValueNumber localValuesCreatedForClone(alloc, 64); this->valuesCreatedForClone = &localValuesCreatedForClone; ValueNumberPairToValueMap localValuesCreatedForMerge(alloc, 64); this->valuesCreatedForMerge = &localValuesCreatedForMerge; #if DBG BVSparse<JitArenaAllocator> localFinishedStackLiteralInitFld(alloc); this->finishedStackLiteralInitFld = &localFinishedStackLiteralInitFld; #endif FOREACH_BLOCK_IN_FUNC_EDITING(block, this->func) { this->OptBlock(block); } NEXT_BLOCK_IN_FUNC_EDITING; if (!PHASE_OFF(Js::MemOpPhase, this->func)) { ProcessMemOp(); } this->noImplicitCallUsesToInsert = nullptr; this->intConstantToStackSymMap = nullptr; this->intConstantToValueMap = nullptr; this->int64ConstantToValueMap = nullptr; this->addrConstantToValueMap = nullptr; this->stringConstantToValueMap = nullptr; #if DBG this->finishedStackLiteralInitFld = nullptr; uint freedCount = 0; uint spilledCount = 0; #endif FOREACH_BLOCK_IN_FUNC(block, this->func) { #if DBG if (block->GetDataUseCount() == 0) { freedCount++; } else { spilledCount++; } #endif block->SetDataUseCount(0); if (block->cloneStrCandidates) { JitAdelete(this->alloc, block->cloneStrCandidates); block->cloneStrCandidates = nullptr; } } NEXT_BLOCK_IN_FUNC; // Make sure we free most of them. Assert(freedCount >= spilledCount); // this->alloc will be freed right after return, no need to free it here this->changedSymsAfterIncBailoutCandidate = nullptr; END_CODEGEN_PHASE(this->func, Js::ForwardPhase); } void GlobOpt::OptBlock(BasicBlock *block) { if (this->func->m_fg->RemoveUnreachableBlock(block, this)) { GOPT_TRACE(_u("Removing unreachable block #%d\n"), block->GetBlockNum()); return; } Loop * loop = block->loop; if (loop && block->isLoopHeader) { if (loop != this->prePassLoop) { OptLoops(loop); if (!this->IsLoopPrePass() && DoFieldPRE(loop)) { // Note: !IsLoopPrePass means this was a root loop pre-pass. FieldPre() is called once per loop. this->FieldPRE(loop); // Re-optimize the landing pad BasicBlock *landingPad = loop->landingPad; this->isRecursiveCallOnLandingPad = true; this->OptBlock(landingPad); this->isRecursiveCallOnLandingPad = false; } } } this->currentBlock = block; PrepareLoopArrayCheckHoist(); block->MergePredBlocksValueMaps(this); this->intOverflowCurrentlyMattersInRange = true; this->intOverflowDoesNotMatterRange = this->currentBlock->intOverflowDoesNotMatterRange; if (loop && DoFieldHoisting(loop)) { if (block->isLoopHeader) { if (!this->IsLoopPrePass()) { this->PrepareFieldHoisting(loop); } else if (loop == this->rootLoopPrePass) { this->PreparePrepassFieldHoisting(loop); } } } else { Assert(!TrackHoistableFields() || !HasHoistableFields(CurrentBlockData())); if (!DoFieldCopyProp() && !DoFieldRefOpts()) { this->KillAllFields(CurrentBlockData()->liveFields); } } this->tempAlloc->Reset(); if(loop && block->isLoopHeader) { loop->firstValueNumberInLoop = this->currentValue; } GOPT_TRACE_BLOCK(block, true); FOREACH_INSTR_IN_BLOCK_EDITING(instr, instrNext, block) { GOPT_TRACE_INSTRTRACE(instr); BailOutInfo* oldBailOutInfo = nullptr; bool isCheckAuxBailoutNeeded = this->func->IsJitInDebugMode() && !this->IsLoopPrePass(); if (isCheckAuxBailoutNeeded && instr->HasAuxBailOut() && !instr->HasBailOutInfo()) { oldBailOutInfo = instr->GetBailOutInfo(); Assert(oldBailOutInfo); } bool isInstrRemoved = false; instrNext = this->OptInstr(instr, &isInstrRemoved); // If we still have instrs with only aux bail out, convert aux bail out back to regular bail out and fill it. // During OptInstr some instr can be moved out to a different block, in this case bailout info is going to be replaced // with e.g. loop bailout info which is filled as part of processing that block, thus we don't need to fill it here. if (isCheckAuxBailoutNeeded && !isInstrRemoved && instr->HasAuxBailOut() && !instr->HasBailOutInfo()) { if (instr->GetBailOutInfo() == oldBailOutInfo) { instr->PromoteAuxBailOut(); FillBailOutInfo(block, instr->GetBailOutInfo()); } else { AssertMsg(instr->GetBailOutInfo(), "With aux bailout, the bailout info should not be removed by OptInstr."); } } } NEXT_INSTR_IN_BLOCK_EDITING; GOPT_TRACE_BLOCK(block, false); if (block->loop) { if (IsLoopPrePass()) { if (DoBoundCheckHoist()) { DetectUnknownChangesToInductionVariables(&block->globOptData); } } else { isPerformingLoopBackEdgeCompensation = true; Assert(this->tempBv->IsEmpty()); BVSparse<JitArenaAllocator> tempBv2(this->tempAlloc); // On loop back-edges, we need to restore the state of the type specialized // symbols to that of the loop header. FOREACH_SUCCESSOR_BLOCK(succ, block) { if (succ->isLoopHeader && succ->loop->IsDescendentOrSelf(block->loop)) { BVSparse<JitArenaAllocator> *liveOnBackEdge = block->loop->regAlloc.liveOnBackEdgeSyms; this->tempBv->Minus(block->loop->varSymsOnEntry, block->globOptData.liveVarSyms); this->tempBv->And(liveOnBackEdge); this->ToVar(this->tempBv, block); // Lossy int in the loop header, and no int on the back-edge - need a lossy conversion to int this->tempBv->Minus(block->loop->lossyInt32SymsOnEntry, block->globOptData.liveInt32Syms); this->tempBv->And(liveOnBackEdge); this->ToInt32(this->tempBv, block, true /* lossy */); // Lossless int in the loop header, and no lossless int on the back-edge - need a lossless conversion to int this->tempBv->Minus(block->loop->int32SymsOnEntry, block->loop->lossyInt32SymsOnEntry); tempBv2.Minus(block->globOptData.liveInt32Syms, block->globOptData.liveLossyInt32Syms); this->tempBv->Minus(&tempBv2); this->tempBv->And(liveOnBackEdge); this->ToInt32(this->tempBv, block, false /* lossy */); this->tempBv->Minus(block->loop->float64SymsOnEntry, block->globOptData.liveFloat64Syms); this->tempBv->And(liveOnBackEdge); this->ToFloat64(this->tempBv, block); #ifdef ENABLE_SIMDJS // SIMD_JS // Compensate on backedge if sym is live on loop entry but not on backedge this->tempBv->Minus(block->loop->simd128F4SymsOnEntry, block->globOptData.liveSimd128F4Syms); this->tempBv->And(liveOnBackEdge); this->ToTypeSpec(this->tempBv, block, TySimd128F4, IR::BailOutSimd128F4Only); this->tempBv->Minus(block->loop->simd128I4SymsOnEntry, block->globOptData.liveSimd128I4Syms); this->tempBv->And(liveOnBackEdge); this->ToTypeSpec(this->tempBv, block, TySimd128I4, IR::BailOutSimd128I4Only); #endif // For ints and floats, go aggressive and type specialize in the landing pad any symbol which was specialized on // entry to the loop body (in the loop header), and is still specialized on this tail, but wasn't specialized in // the landing pad. // Lossy int in the loop header and no int in the landing pad - need a lossy conversion to int // (entry.lossyInt32 - landingPad.int32) this->tempBv->Minus(block->loop->lossyInt32SymsOnEntry, block->loop->landingPad->globOptData.liveInt32Syms); this->tempBv->And(liveOnBackEdge); this->ToInt32(this->tempBv, block->loop->landingPad, true /* lossy */); // Lossless int in the loop header, and no lossless int in the landing pad - need a lossless conversion to int // ((entry.int32 - entry.lossyInt32) - (landingPad.int32 - landingPad.lossyInt32)) this->tempBv->Minus(block->loop->int32SymsOnEntry, block->loop->lossyInt32SymsOnEntry); tempBv2.Minus( block->loop->landingPad->globOptData.liveInt32Syms, block->loop->landingPad->globOptData.liveLossyInt32Syms); this->tempBv->Minus(&tempBv2); this->tempBv->And(liveOnBackEdge); this->ToInt32(this->tempBv, block->loop->landingPad, false /* lossy */); // ((entry.float64 - landingPad.float64) & block.float64) this->tempBv->Minus(block->loop->float64SymsOnEntry, block->loop->landingPad->globOptData.liveFloat64Syms); this->tempBv->And(block->globOptData.liveFloat64Syms); this->tempBv->And(liveOnBackEdge); this->ToFloat64(this->tempBv, block->loop->landingPad); #ifdef ENABLE_SIMDJS // SIMD_JS // compensate on landingpad if live on loopEntry and Backedge. this->tempBv->Minus(block->loop->simd128F4SymsOnEntry, block->loop->landingPad->globOptData.liveSimd128F4Syms); this->tempBv->And(block->globOptData.liveSimd128F4Syms); this->tempBv->And(liveOnBackEdge); this->ToTypeSpec(this->tempBv, block->loop->landingPad, TySimd128F4, IR::BailOutSimd128F4Only); this->tempBv->Minus(block->loop->simd128I4SymsOnEntry, block->loop->landingPad->globOptData.liveSimd128I4Syms); this->tempBv->And(block->globOptData.liveSimd128I4Syms); this->tempBv->And(liveOnBackEdge); this->ToTypeSpec(this->tempBv, block->loop->landingPad, TySimd128I4, IR::BailOutSimd128I4Only); #endif // Now that we're done with the liveFields within this loop, trim the set to those syms // that the backward pass told us were live out of the loop. // This assumes we have no further need of the liveFields within the loop. if (block->loop->liveOutFields) { block->globOptData.liveFields->And(block->loop->liveOutFields); } } } NEXT_SUCCESSOR_BLOCK; this->tempBv->ClearAll(); isPerformingLoopBackEdgeCompensation = false; } } #if DBG // The set of live lossy int32 syms should be a subset of all live int32 syms this->tempBv->And(block->globOptData.liveInt32Syms, block->globOptData.liveLossyInt32Syms); Assert(this->tempBv->Count() == block->globOptData.liveLossyInt32Syms->Count()); // The set of live lossy int32 syms should be a subset of live var or float syms (var or float sym containing the lossless // value of the sym should be live) this->tempBv->Or(block->globOptData.liveVarSyms, block->globOptData.liveFloat64Syms); this->tempBv->And(block->globOptData.liveLossyInt32Syms); Assert(this->tempBv->Count() == block->globOptData.liveLossyInt32Syms->Count()); this->tempBv->ClearAll(); Assert(this->currentBlock == block); #endif } void GlobOpt::OptLoops(Loop *loop) { Assert(loop != nullptr); #if DBG if (Js::Configuration::Global.flags.Trace.IsEnabled(Js::FieldCopyPropPhase) && !DoFunctionFieldCopyProp() && DoFieldCopyProp(loop)) { Output::Print(_u("TRACE: CanDoFieldCopyProp Loop: ")); this->func->DumpFullFunctionName(); uint loopNumber = loop->GetLoopNumber(); Assert(loopNumber != Js::LoopHeader::NoLoop); Output::Print(_u(" Loop: %d\n"), loopNumber); } #endif Loop *previousLoop = this->prePassLoop; this->prePassLoop = loop; if (previousLoop == nullptr) { Assert(this->rootLoopPrePass == nullptr); this->rootLoopPrePass = loop; this->prePassInstrMap->Clear(); if (loop->parent == nullptr) { // Outer most loop... this->prePassCopyPropSym->ClearAll(); } } if (loop->symsUsedBeforeDefined == nullptr) { loop->symsUsedBeforeDefined = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc); loop->likelyIntSymsUsedBeforeDefined = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc); loop->likelyNumberSymsUsedBeforeDefined = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc); loop->forceFloat64SymsOnEntry = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); #ifdef ENABLE_SIMDJS loop->likelySimd128F4SymsUsedBeforeDefined = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc); loop->likelySimd128I4SymsUsedBeforeDefined = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc); loop->forceSimd128F4SymsOnEntry = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); loop->forceSimd128I4SymsOnEntry = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); #endif loop->symsDefInLoop = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); loop->fieldKilled = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc); loop->fieldPRESymStore = JitAnew(alloc, BVSparse<JitArenaAllocator>, this->alloc); loop->allFieldsKilled = false; } else { loop->symsUsedBeforeDefined->ClearAll(); loop->likelyIntSymsUsedBeforeDefined->ClearAll(); loop->likelyNumberSymsUsedBeforeDefined->ClearAll(); loop->forceFloat64SymsOnEntry->ClearAll(); #ifdef ENABLE_SIMDJS loop->likelySimd128F4SymsUsedBeforeDefined->ClearAll(); loop->likelySimd128I4SymsUsedBeforeDefined->ClearAll(); loop->forceSimd128F4SymsOnEntry->ClearAll(); loop->forceSimd128I4SymsOnEntry->ClearAll(); #endif loop->symsDefInLoop->ClearAll(); loop->fieldKilled->ClearAll(); loop->allFieldsKilled = false; loop->initialValueFieldMap.Reset(); } FOREACH_BLOCK_IN_LOOP(block, loop) { block->SetDataUseCount(block->GetSuccList()->Count()); OptBlock(block); } NEXT_BLOCK_IN_LOOP; if (previousLoop == nullptr) { Assert(this->rootLoopPrePass == loop); this->rootLoopPrePass = nullptr; } this->prePassLoop = previousLoop; } void GlobOpt::TailDupPass() { FOREACH_LOOP_IN_FUNC_EDITING(loop, this->func) { BasicBlock* header = loop->GetHeadBlock(); BasicBlock* loopTail = nullptr; FOREACH_PREDECESSOR_BLOCK(pred, header) { if (loop->IsDescendentOrSelf(pred->loop)) { loopTail = pred; break; } } NEXT_PREDECESSOR_BLOCK; if (loopTail) { AssertMsg(loopTail->GetLastInstr()->IsBranchInstr(), "LastInstr of loop should always be a branch no?"); if (!loopTail->GetPredList()->HasOne()) { TryTailDup(loopTail->GetLastInstr()->AsBranchInstr()); } } } NEXT_LOOP_IN_FUNC_EDITING; } bool GlobOpt::TryTailDup(IR::BranchInstr *tailBranch) { if (PHASE_OFF(Js::TailDupPhase, tailBranch->m_func->GetTopFunc())) { return false; } if (tailBranch->IsConditional()) { return false; } IR::Instr *instr; uint instrCount = 0; for (instr = tailBranch->GetPrevRealInstrOrLabel(); !instr->IsLabelInstr(); instr = instr->GetPrevRealInstrOrLabel()) { if (instr->HasBailOutInfo()) { break; } if (!OpCodeAttr::CanCSE(instr->m_opcode)) { // Consider: We could be more aggressive here break; } instrCount++; if (instrCount > 1) { // Consider: If copy handled single-def tmps renaming, we could do more instrs break; } } if (!instr->IsLabelInstr()) { return false; } IR::LabelInstr *mergeLabel = instr->AsLabelInstr(); IR::Instr *mergeLabelPrev = mergeLabel->m_prev; // Skip unreferenced labels while (mergeLabelPrev->IsLabelInstr() && mergeLabelPrev->AsLabelInstr()->labelRefs.Empty()) { mergeLabelPrev = mergeLabelPrev->m_prev; } BasicBlock* labelBlock = mergeLabel->GetBasicBlock(); uint origPredCount = labelBlock->GetPredList()->Count(); uint dupCount = 0; // We are good to go. Let's do the tail duplication. FOREACH_SLISTCOUNTED_ENTRY_EDITING(IR::BranchInstr*, branchEntry, &mergeLabel->labelRefs, iter) { if (branchEntry->IsUnconditional() && !branchEntry->IsMultiBranch() && branchEntry != mergeLabelPrev && branchEntry != tailBranch) { for (instr = mergeLabel->m_next; instr != tailBranch; instr = instr->m_next) { branchEntry->InsertBefore(instr->Copy()); } instr = branchEntry; branchEntry->ReplaceTarget(mergeLabel, tailBranch->GetTarget()); while(!instr->IsLabelInstr()) { instr = instr->m_prev; } BasicBlock* branchBlock = instr->AsLabelInstr()->GetBasicBlock(); labelBlock->RemovePred(branchBlock, func->m_fg); func->m_fg->AddEdge(branchBlock, tailBranch->GetTarget()->GetBasicBlock()); dupCount++; } } NEXT_SLISTCOUNTED_ENTRY_EDITING; // If we've duplicated everywhere, tail block is dead and should be removed. if (dupCount == origPredCount) { AssertMsg(mergeLabel->labelRefs.Empty(), "Should not remove block with referenced label."); func->m_fg->RemoveBlock(labelBlock, nullptr, true); } return true; } void GlobOpt::ToVar(BVSparse<JitArenaAllocator> *bv, BasicBlock *block) { FOREACH_BITSET_IN_SPARSEBV(id, bv) { StackSym *stackSym = this->func->m_symTable->FindStackSym(id); IR::RegOpnd *newOpnd = IR::RegOpnd::New(stackSym, TyVar, this->func); IR::Instr *lastInstr = block->GetLastInstr(); if (lastInstr->IsBranchInstr() || lastInstr->m_opcode == Js::OpCode::BailTarget) { // If branch is using this symbol, hoist the operand as the ToVar load will get // inserted right before the branch. IR::Opnd *src1 = lastInstr->GetSrc1(); if (src1) { if (src1->IsRegOpnd() && src1->AsRegOpnd()->m_sym == stackSym) { lastInstr->HoistSrc1(Js::OpCode::Ld_A); } IR::Opnd *src2 = lastInstr->GetSrc2(); if (src2) { if (src2->IsRegOpnd() && src2->AsRegOpnd()->m_sym == stackSym) { lastInstr->HoistSrc2(Js::OpCode::Ld_A); } } } this->ToVar(lastInstr, newOpnd, block, nullptr, false); } else { IR::Instr *lastNextInstr = lastInstr->m_next; this->ToVar(lastNextInstr, newOpnd, block, nullptr, false); } } NEXT_BITSET_IN_SPARSEBV; } void GlobOpt::ToInt32(BVSparse<JitArenaAllocator> *bv, BasicBlock *block, bool lossy, IR::Instr *insertBeforeInstr) { return this->ToTypeSpec(bv, block, TyInt32, IR::BailOutIntOnly, lossy, insertBeforeInstr); } void GlobOpt::ToFloat64(BVSparse<JitArenaAllocator> *bv, BasicBlock *block) { return this->ToTypeSpec(bv, block, TyFloat64, IR::BailOutNumberOnly); } void GlobOpt::ToTypeSpec(BVSparse<JitArenaAllocator> *bv, BasicBlock *block, IRType toType, IR::BailOutKind bailOutKind, bool lossy, IR::Instr *insertBeforeInstr) { FOREACH_BITSET_IN_SPARSEBV(id, bv) { StackSym *stackSym = this->func->m_symTable->FindStackSym(id); IRType fromType = TyIllegal; // Win8 bug: 757126. If we are trying to type specialize the arguments object, // let's make sure stack args optimization is not enabled. This is a problem, particularly, // if the instruction comes from an unreachable block. In other cases, the pass on the // instruction itself should disable arguments object optimization. if(block->globOptData.argObjSyms && block->globOptData.IsArgumentsSymID(id)) { CannotAllocateArgumentsObjectOnStack(); } if (block->globOptData.liveVarSyms->Test(id)) { fromType = TyVar; } else if (block->globOptData.liveInt32Syms->Test(id) && !block->globOptData.liveLossyInt32Syms->Test(id)) { fromType = TyInt32; stackSym = stackSym->GetInt32EquivSym(this->func); } else if (block->globOptData.liveFloat64Syms->Test(id)) { fromType = TyFloat64; stackSym = stackSym->GetFloat64EquivSym(this->func); } else { #ifdef ENABLE_SIMDJS Assert(block->globOptData.IsLiveAsSimd128(stackSym)); if (block->globOptData.IsLiveAsSimd128F4(stackSym)) { fromType = TySimd128F4; stackSym = stackSym->GetSimd128F4EquivSym(this->func); } else { fromType = TySimd128I4; stackSym = stackSym->GetSimd128I4EquivSym(this->func); } #else Assert(UNREACHED); #endif } IR::RegOpnd *newOpnd = IR::RegOpnd::New(stackSym, fromType, this->func); IR::Instr *lastInstr = block->GetLastInstr(); if (!insertBeforeInstr && lastInstr->IsBranchInstr()) { // If branch is using this symbol, hoist the operand as the ToInt32 load will get // inserted right before the branch. IR::Instr *instrPrev = lastInstr->m_prev; IR::Opnd *src1 = lastInstr->GetSrc1(); if (src1) { if (src1->IsRegOpnd() && src1->AsRegOpnd()->m_sym == stackSym) { lastInstr->HoistSrc1(Js::OpCode::Ld_A); } IR::Opnd *src2 = lastInstr->GetSrc2(); if (src2) { if (src2->IsRegOpnd() && src2->AsRegOpnd()->m_sym == stackSym) { lastInstr->HoistSrc2(Js::OpCode::Ld_A); } } // Did we insert anything? if (lastInstr->m_prev != instrPrev) { // If we had ByteCodeUses right before the branch, move them back down. IR::Instr *insertPoint = lastInstr; for (IR::Instr *instrBytecode = instrPrev; instrBytecode->m_opcode == Js::OpCode::ByteCodeUses; instrBytecode = instrBytecode->m_prev) { instrBytecode->Unlink(); insertPoint->InsertBefore(instrBytecode); insertPoint = instrBytecode; } } } } this->ToTypeSpecUse(nullptr, newOpnd, block, nullptr, nullptr, toType, bailOutKind, lossy, insertBeforeInstr); } NEXT_BITSET_IN_SPARSEBV; } PRECandidatesList * GlobOpt::FindPossiblePRECandidates(Loop *loop, JitArenaAllocator *alloc) { // Find the set of PRE candidates BasicBlock *loopHeader = loop->GetHeadBlock(); PRECandidatesList *candidates = nullptr; bool firstBackEdge = true; FOREACH_PREDECESSOR_BLOCK(blockPred, loopHeader) { if (!loop->IsDescendentOrSelf(blockPred->loop)) { // Not a loop back-edge continue; } if (firstBackEdge) { candidates = this->FindBackEdgePRECandidates(blockPred, alloc); } else { blockPred->globOptData.RemoveUnavailableCandidates(candidates); } } NEXT_PREDECESSOR_BLOCK; return candidates; } BOOL GlobOpt::PreloadPRECandidate(Loop *loop, GlobHashBucket* candidate) { // Insert a load for each field PRE candidate. PropertySym *propertySym = candidate->value->AsPropertySym(); StackSym *objPtrSym = propertySym->m_stackSym; // If objPtr isn't live, we'll retry later. // Another PRE candidate may insert a load for it. if (!loop->landingPad->globOptData.IsLive(objPtrSym)) { return false; } BasicBlock *landingPad = loop->landingPad; Value *value = candidate->element; Sym *symStore = value->GetValueInfo()->GetSymStore(); // The symStore can't be live into the loop // The symStore needs to still have the same value Assert(symStore && symStore->IsStackSym()); if (loop->landingPad->globOptData.IsLive(symStore)) { // May have already been hoisted: // o.x = t1; // o.y = t1; return false; } Value *landingPadValue = landingPad->globOptData.FindValue(propertySym); // Value should be added as initial value or already be there. Assert(landingPadValue); IR::Instr * ldInstr = this->prePassInstrMap->Lookup(propertySym->m_id, nullptr); Assert(ldInstr); // Create instr to put in landing pad for compensation Assert(IsPREInstrCandidateLoad(ldInstr->m_opcode)); IR::SymOpnd *ldSrc = ldInstr->GetSrc1()->AsSymOpnd(); if (ldSrc->m_sym != propertySym) { // It's possible that the propertySym but have equivalent objPtrs. Verify their values. Value *val1 = CurrentBlockData()->FindValue(ldSrc->m_sym->AsPropertySym()->m_stackSym); Value *val2 = CurrentBlockData()->FindValue(propertySym->m_stackSym); if (!val1 || !val2 || val1->GetValueNumber() != val2->GetValueNumber()) { return false; } } ldInstr = ldInstr->Copy(); // Consider: Shouldn't be necessary once we have copy-prop in prepass... ldInstr->GetSrc1()->AsSymOpnd()->m_sym = propertySym; ldSrc = ldInstr->GetSrc1()->AsSymOpnd(); if (ldSrc->IsPropertySymOpnd()) { IR::PropertySymOpnd *propSymOpnd = ldSrc->AsPropertySymOpnd(); IR::PropertySymOpnd *newPropSymOpnd; newPropSymOpnd = propSymOpnd->AsPropertySymOpnd()->CopyWithoutFlowSensitiveInfo(this->func); ldInstr->ReplaceSrc1(newPropSymOpnd); } if (ldInstr->GetDst()->AsRegOpnd()->m_sym != symStore) { ldInstr->ReplaceDst(IR::RegOpnd::New(symStore->AsStackSym(), TyVar, this->func)); } ldInstr->GetSrc1()->SetIsJITOptimizedReg(true); ldInstr->GetDst()->SetIsJITOptimizedReg(true); landingPad->globOptData.liveVarSyms->Set(symStore->m_id); loop->fieldPRESymStore->Set(symStore->m_id); ValueType valueType(ValueType::Uninitialized); Value *initialValue = nullptr; if (loop->initialValueFieldMap.TryGetValue(propertySym, &initialValue)) { if (ldInstr->IsProfiledInstr()) { if (initialValue->GetValueNumber() == value->GetValueNumber()) { if (value->GetValueInfo()->IsUninitialized()) { valueType = ldInstr->AsProfiledInstr()->u.FldInfo().valueType; } else { valueType = value->GetValueInfo()->Type(); } } else { valueType = ValueType::Uninitialized; } ldInstr->AsProfiledInstr()->u.FldInfo().valueType = valueType; } } else { valueType = landingPadValue->GetValueInfo()->Type(); } loop->symsUsedBeforeDefined->Set(symStore->m_id); if (valueType.IsLikelyNumber()) { loop->likelyNumberSymsUsedBeforeDefined->Set(symStore->m_id); if (DoAggressiveIntTypeSpec() ? valueType.IsLikelyInt() : valueType.IsInt()) { // Can only force int conversions in the landing pad based on likely-int values if aggressive int type // specialization is enabled loop->likelyIntSymsUsedBeforeDefined->Set(symStore->m_id); } } // Insert in landing pad if (ldInstr->HasAnyImplicitCalls()) { IR::Instr * bailInstr = EnsureDisableImplicitCallRegion(loop); bailInstr->InsertBefore(ldInstr); } else if (loop->endDisableImplicitCall) { loop->endDisableImplicitCall->InsertBefore(ldInstr); } else { loop->landingPad->InsertAfter(ldInstr); } ldInstr->ClearByteCodeOffset(); ldInstr->SetByteCodeOffset(landingPad->GetFirstInstr()); #if DBG_DUMP if (Js::Configuration::Global.flags.Trace.IsEnabled(Js::FieldPREPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId())) { Output::Print(_u("** TRACE: Field PRE: field pre-loaded in landing pad of loop head #%-3d: "), loop->GetHeadBlock()->GetBlockNum()); ldInstr->Dump(); Output::Print(_u("\n")); } #endif return true; } void GlobOpt::PreloadPRECandidates(Loop *loop, PRECandidatesList *candidates) { // Insert loads in landing pad for field PRE candidates. Iterate while(changed) // for the o.x.y cases. BOOL changed = true; if (!candidates) { return; } Assert(loop->landingPad->GetFirstInstr() == loop->landingPad->GetLastInstr()); while (changed) { changed = false; FOREACH_SLIST_ENTRY_EDITING(GlobHashBucket*, candidate, (SList<GlobHashBucket*>*)candidates, iter) { if (this->PreloadPRECandidate(loop, candidate)) { changed = true; iter.RemoveCurrent(); } } NEXT_SLIST_ENTRY_EDITING; } } void GlobOpt::FieldPRE(Loop *loop) { if (!DoFieldPRE(loop)) { return; } PRECandidatesList *candidates; JitArenaAllocator *alloc = this->tempAlloc; candidates = this->FindPossiblePRECandidates(loop, alloc); this->PreloadPRECandidates(loop, candidates); } void GlobOpt::InsertValueCompensation( BasicBlock *const predecessor, const SymToValueInfoMap &symsRequiringCompensationToMergedValueInfoMap) { Assert(predecessor); Assert(symsRequiringCompensationToMergedValueInfoMap.Count() != 0); IR::Instr *insertBeforeInstr = predecessor->GetLastInstr(); Func *const func = insertBeforeInstr->m_func; bool setLastInstrInPredecessor; if(insertBeforeInstr->IsBranchInstr() || insertBeforeInstr->m_opcode == Js::OpCode::BailTarget) { // Don't insert code between the branch and the corresponding ByteCodeUses instructions while(insertBeforeInstr->m_prev->m_opcode == Js::OpCode::ByteCodeUses) { insertBeforeInstr = insertBeforeInstr->m_prev; } setLastInstrInPredecessor = false; } else { // Insert at the end of the block and set the last instruction Assert(insertBeforeInstr->m_next); insertBeforeInstr = insertBeforeInstr->m_next; // Instruction after the last instruction in the predecessor setLastInstrInPredecessor = true; } GlobOptBlockData &predecessorBlockData = predecessor->globOptData; GlobOptBlockData &successorBlockData = *CurrentBlockData(); struct DelayChangeValueInfo { Value* predecessorValue; ArrayValueInfo* valueInfo; void ChangeValueInfo(BasicBlock* predecessor, GlobOpt* g) { g->ChangeValueInfo( predecessor, predecessorValue, valueInfo, false /*allowIncompatibleType*/, true /*compensated*/); } }; JsUtil::List<DelayChangeValueInfo, ArenaAllocator> delayChangeValueInfo(alloc); for(auto it = symsRequiringCompensationToMergedValueInfoMap.GetIterator(); it.IsValid(); it.MoveNext()) { const auto &entry = it.Current(); Sym *const sym = entry.Key(); Value *const predecessorValue = predecessorBlockData.FindValue(sym); Assert(predecessorValue); ValueInfo *const predecessorValueInfo = predecessorValue->GetValueInfo(); // Currently, array value infos are the only ones that require compensation based on values Assert(predecessorValueInfo->IsAnyOptimizedArray()); const ArrayValueInfo *const predecessorArrayValueInfo = predecessorValueInfo->AsArrayValueInfo(); StackSym *const predecessorHeadSegmentSym = predecessorArrayValueInfo->HeadSegmentSym(); StackSym *const predecessorHeadSegmentLengthSym = predecessorArrayValueInfo->HeadSegmentLengthSym(); StackSym *const predecessorLengthSym = predecessorArrayValueInfo->LengthSym(); ValueInfo *const mergedValueInfo = entry.Value(); const ArrayValueInfo *const mergedArrayValueInfo = mergedValueInfo->AsArrayValueInfo(); StackSym *const mergedHeadSegmentSym = mergedArrayValueInfo->HeadSegmentSym(); StackSym *const mergedHeadSegmentLengthSym = mergedArrayValueInfo->HeadSegmentLengthSym(); StackSym *const mergedLengthSym = mergedArrayValueInfo->LengthSym(); Assert(!mergedHeadSegmentSym || predecessorHeadSegmentSym); Assert(!mergedHeadSegmentLengthSym || predecessorHeadSegmentLengthSym); Assert(!mergedLengthSym || predecessorLengthSym); bool compensated = false; if(mergedHeadSegmentSym && predecessorHeadSegmentSym != mergedHeadSegmentSym) { IR::Instr *const newInstr = IR::Instr::New( Js::OpCode::Ld_A, IR::RegOpnd::New(mergedHeadSegmentSym, mergedHeadSegmentSym->GetType(), func), IR::RegOpnd::New(predecessorHeadSegmentSym, predecessorHeadSegmentSym->GetType(), func), func); newInstr->GetDst()->SetIsJITOptimizedReg(true); newInstr->GetSrc1()->SetIsJITOptimizedReg(true); newInstr->SetByteCodeOffset(insertBeforeInstr); insertBeforeInstr->InsertBefore(newInstr); compensated = true; } if(mergedHeadSegmentLengthSym && predecessorHeadSegmentLengthSym != mergedHeadSegmentLengthSym) { IR::Instr *const newInstr = IR::Instr::New( Js::OpCode::Ld_I4, IR::RegOpnd::New(mergedHeadSegmentLengthSym, mergedHeadSegmentLengthSym->GetType(), func), IR::RegOpnd::New(predecessorHeadSegmentLengthSym, predecessorHeadSegmentLengthSym->GetType(), func), func); newInstr->GetDst()->SetIsJITOptimizedReg(true); newInstr->GetSrc1()->SetIsJITOptimizedReg(true); newInstr->SetByteCodeOffset(insertBeforeInstr); insertBeforeInstr->InsertBefore(newInstr); compensated = true; // Merge the head segment length value Assert(predecessorBlockData.liveVarSyms->Test(predecessorHeadSegmentLengthSym->m_id)); predecessorBlockData.liveVarSyms->Set(mergedHeadSegmentLengthSym->m_id); successorBlockData.liveVarSyms->Set(mergedHeadSegmentLengthSym->m_id); Value *const predecessorHeadSegmentLengthValue = predecessorBlockData.FindValue(predecessorHeadSegmentLengthSym); Assert(predecessorHeadSegmentLengthValue); predecessorBlockData.SetValue(predecessorHeadSegmentLengthValue, mergedHeadSegmentLengthSym); Value *const mergedHeadSegmentLengthValue = successorBlockData.FindValue(mergedHeadSegmentLengthSym); if(mergedHeadSegmentLengthValue) { Assert(mergedHeadSegmentLengthValue->GetValueNumber() != predecessorHeadSegmentLengthValue->GetValueNumber()); if(predecessorHeadSegmentLengthValue->GetValueInfo() != mergedHeadSegmentLengthValue->GetValueInfo()) { mergedHeadSegmentLengthValue->SetValueInfo( ValueInfo::MergeLikelyIntValueInfo( this->alloc, mergedHeadSegmentLengthValue, predecessorHeadSegmentLengthValue, mergedHeadSegmentLengthValue->GetValueInfo()->Type() .Merge(predecessorHeadSegmentLengthValue->GetValueInfo()->Type()))); } } else { successorBlockData.SetValue(CopyValue(predecessorHeadSegmentLengthValue), mergedHeadSegmentLengthSym); } } if(mergedLengthSym && predecessorLengthSym != mergedLengthSym) { IR::Instr *const newInstr = IR::Instr::New( Js::OpCode::Ld_I4, IR::RegOpnd::New(mergedLengthSym, mergedLengthSym->GetType(), func), IR::RegOpnd::New(predecessorLengthSym, predecessorLengthSym->GetType(), func), func); newInstr->GetDst()->SetIsJITOptimizedReg(true); newInstr->GetSrc1()->SetIsJITOptimizedReg(true); newInstr->SetByteCodeOffset(insertBeforeInstr); insertBeforeInstr->InsertBefore(newInstr); compensated = true; // Merge the length value Assert(predecessorBlockData.liveVarSyms->Test(predecessorLengthSym->m_id)); predecessorBlockData.liveVarSyms->Set(mergedLengthSym->m_id); successorBlockData.liveVarSyms->Set(mergedLengthSym->m_id); Value *const predecessorLengthValue = predecessorBlockData.FindValue(predecessorLengthSym); Assert(predecessorLengthValue); predecessorBlockData.SetValue(predecessorLengthValue, mergedLengthSym); Value *const mergedLengthValue = successorBlockData.FindValue(mergedLengthSym); if(mergedLengthValue) { Assert(mergedLengthValue->GetValueNumber() != predecessorLengthValue->GetValueNumber()); if(predecessorLengthValue->GetValueInfo() != mergedLengthValue->GetValueInfo()) { mergedLengthValue->SetValueInfo( ValueInfo::MergeLikelyIntValueInfo( this->alloc, mergedLengthValue, predecessorLengthValue, mergedLengthValue->GetValueInfo()->Type().Merge(predecessorLengthValue->GetValueInfo()->Type()))); } } else { successorBlockData.SetValue(CopyValue(predecessorLengthValue), mergedLengthSym); } } if(compensated) { // Save the new ValueInfo for later. // We don't want other symbols needing compensation to see this new one delayChangeValueInfo.Add({ predecessorValue, ArrayValueInfo::New( alloc, predecessorValueInfo->Type(), mergedHeadSegmentSym ? mergedHeadSegmentSym : predecessorHeadSegmentSym, mergedHeadSegmentLengthSym ? mergedHeadSegmentLengthSym : predecessorHeadSegmentLengthSym, mergedLengthSym ? mergedLengthSym : predecessorLengthSym, predecessorValueInfo->GetSymStore()) }); } } // Once we've compensated all the symbols, update the new ValueInfo. delayChangeValueInfo.Map([predecessor, this](int, DelayChangeValueInfo d) { d.ChangeValueInfo(predecessor, this); }); if(setLastInstrInPredecessor) { predecessor->SetLastInstr(insertBeforeInstr->m_prev); } } bool GlobOpt::AreFromSameBytecodeFunc(IR::RegOpnd const* src1, IR::RegOpnd const* dst) const { Assert(this->func->m_symTable->FindStackSym(src1->m_sym->m_id) == src1->m_sym); Assert(this->func->m_symTable->FindStackSym(dst->m_sym->m_id) == dst->m_sym); if (dst->m_sym->HasByteCodeRegSlot() && src1->m_sym->HasByteCodeRegSlot()) { return src1->m_sym->GetByteCodeFunc() == dst->m_sym->GetByteCodeFunc(); } return false; } /* * This is for scope object removal along with Heap Arguments optimization. * We track several instructions to facilitate the removal of scope object. * - LdSlotArr - This instr is tracked to keep track of the formals array (the dest) * - InlineeStart - To keep track of the stack syms for the formals of the inlinee. */ void GlobOpt::TrackInstrsForScopeObjectRemoval(IR::Instr * instr) { IR::Opnd* dst = instr->GetDst(); IR::Opnd* src1 = instr->GetSrc1(); if (instr->m_opcode == Js::OpCode::Ld_A && src1->IsRegOpnd()) { AssertMsg(!instr->m_func->IsStackArgsEnabled() || !src1->IsScopeObjOpnd(instr->m_func), "There can be no aliasing for scope object."); } // The following is to track formals array for Stack Arguments optimization with Formals if (instr->m_func->IsStackArgsEnabled() && !this->IsLoopPrePass()) { if (instr->m_opcode == Js::OpCode::LdSlotArr) { if (instr->GetSrc1()->IsScopeObjOpnd(instr->m_func)) { AssertMsg(!instr->m_func->GetJITFunctionBody()->HasImplicitArgIns(), "No mapping is required in this case. So it should already be generating ArgIns."); instr->m_func->TrackFormalsArraySym(dst->GetStackSym()->m_id); } } else if (instr->m_opcode == Js::OpCode::InlineeStart) { Assert(instr->m_func->IsInlined()); Js::ArgSlot actualsCount = instr->m_func->actualCount - 1; Js::ArgSlot formalsCount = instr->m_func->GetJITFunctionBody()->GetInParamsCount() - 1; Func * func = instr->m_func; Func * inlinerFunc = func->GetParentFunc(); //Inliner's func IR::Instr * argOutInstr = instr->GetSrc2()->GetStackSym()->GetInstrDef(); //The argout immediately before the InlineeStart will be the ArgOut for NewScObject //So we don't want to track the stack sym for this argout.- Skipping it here. if (instr->m_func->IsInlinedConstructor()) { //PRE might introduce a second defintion for the Src1. So assert for the opcode only when it has single definition. Assert(argOutInstr->GetSrc1()->GetStackSym()->GetInstrDef() == nullptr || argOutInstr->GetSrc1()->GetStackSym()->GetInstrDef()->m_opcode == Js::OpCode::NewScObjectNoCtor); argOutInstr = argOutInstr->GetSrc2()->GetStackSym()->GetInstrDef(); } if (formalsCount < actualsCount) { Js::ArgSlot extraActuals = actualsCount - formalsCount; //Skipping extra actuals passed for (Js::ArgSlot i = 0; i < extraActuals; i++) { argOutInstr = argOutInstr->GetSrc2()->GetStackSym()->GetInstrDef(); } } StackSym * undefinedSym = nullptr; for (Js::ArgSlot param = formalsCount; param > 0; param--) { StackSym * argOutSym = nullptr; if (argOutInstr->GetSrc1()) { if (argOutInstr->GetSrc1()->IsRegOpnd()) { argOutSym = argOutInstr->GetSrc1()->GetStackSym(); } else { // We will always have ArgOut instr - so the source operand will not be removed. argOutSym = StackSym::New(inlinerFunc); IR::Opnd * srcOpnd = argOutInstr->GetSrc1(); IR::Opnd * dstOpnd = IR::RegOpnd::New(argOutSym, TyVar, inlinerFunc); IR::Instr * assignInstr = IR::Instr::New(Js::OpCode::Ld_A, dstOpnd, srcOpnd, inlinerFunc); instr->InsertBefore(assignInstr); } } Assert(!func->HasStackSymForFormal(param - 1)); if (param <= actualsCount) { Assert(argOutSym); func->TrackStackSymForFormalIndex(param - 1, argOutSym); argOutInstr = argOutInstr->GetSrc2()->GetStackSym()->GetInstrDef(); } else { /*When param is out of range of actuals count, load undefined*/ // TODO: saravind: This will insert undefined for each of the param not having an actual. - Clean up this by having a sym for undefined on func ? Assert(formalsCount > actualsCount); if (undefinedSym == nullptr) { undefinedSym = StackSym::New(inlinerFunc); IR::Opnd * srcOpnd = IR::AddrOpnd::New(inlinerFunc->GetScriptContextInfo()->GetUndefinedAddr(), IR::AddrOpndKindDynamicMisc, inlinerFunc); IR::Opnd * dstOpnd = IR::RegOpnd::New(undefinedSym, TyVar, inlinerFunc); IR::Instr * assignUndefined = IR::Instr::New(Js::OpCode::Ld_A, dstOpnd, srcOpnd, inlinerFunc); instr->InsertBefore(assignUndefined); } func->TrackStackSymForFormalIndex(param - 1, undefinedSym); } } } } } void GlobOpt::OptArguments(IR::Instr *instr) { IR::Opnd* dst = instr->GetDst(); IR::Opnd* src1 = instr->GetSrc1(); IR::Opnd* src2 = instr->GetSrc2(); TrackInstrsForScopeObjectRemoval(instr); if (!TrackArgumentsObject()) { return; } if (instr->HasAnyLoadHeapArgsOpCode()) { #ifdef ENABLE_DEBUG_CONFIG_OPTIONS if (instr->m_func->IsStackArgsEnabled()) { if (instr->GetSrc1()->IsRegOpnd() && instr->m_func->GetJITFunctionBody()->GetInParamsCount() > 1) { StackSym * scopeObjSym = instr->GetSrc1()->GetStackSym(); Assert(scopeObjSym); Assert(scopeObjSym->GetInstrDef()->m_opcode == Js::OpCode::InitCachedScope || scopeObjSym->GetInstrDef()->m_opcode == Js::OpCode::NewScopeObject); Assert(instr->m_func->GetScopeObjSym() == scopeObjSym); if (PHASE_VERBOSE_TRACE1(Js::StackArgFormalsOptPhase)) { Output::Print(_u("StackArgFormals : %s (%d) :Setting scopeObjSym in forward pass. \n"), instr->m_func->GetJITFunctionBody()->GetDisplayName(), instr->m_func->GetJITFunctionBody()->GetFunctionNumber()); Output::Flush(); } } } #endif if (instr->m_func->GetJITFunctionBody()->GetInParamsCount() != 1 && !instr->m_func->IsStackArgsEnabled()) { CannotAllocateArgumentsObjectOnStack(); } else { CurrentBlockData()->TrackArgumentsSym(dst->AsRegOpnd()); } return; } // Keep track of arguments objects and its aliases // LdHeapArguments loads the arguments object and Ld_A tracks the aliases. if ((instr->m_opcode == Js::OpCode::Ld_A || instr->m_opcode == Js::OpCode::BytecodeArgOutCapture) && (src1->IsRegOpnd() && CurrentBlockData()->IsArgumentsOpnd(src1))) { // In the debug mode, we don't want to optimize away the aliases. Since we may have to show them on the inspection. if (((!AreFromSameBytecodeFunc(src1->AsRegOpnd(), dst->AsRegOpnd()) || this->currentBlock->loop) && instr->m_opcode != Js::OpCode::BytecodeArgOutCapture) || this->func->IsJitInDebugMode()) { CannotAllocateArgumentsObjectOnStack(); return; } if(!dst->AsRegOpnd()->GetStackSym()->m_nonEscapingArgObjAlias) { CurrentBlockData()->TrackArgumentsSym(dst->AsRegOpnd()); } return; } if (!CurrentBlockData()->TestAnyArgumentsSym()) { // There are no syms to track yet, don't start tracking arguments sym. return; } // Avoid loop prepass if (this->currentBlock->loop && this->IsLoopPrePass()) { return; } SymID id = 0; switch(instr->m_opcode) { case Js::OpCode::LdElemI_A: case Js::OpCode::TypeofElem: { Assert(src1->IsIndirOpnd()); IR::RegOpnd *indexOpnd = src1->AsIndirOpnd()->GetIndexOpnd(); if (indexOpnd && CurrentBlockData()->IsArgumentsSymID(indexOpnd->m_sym->m_id)) { // Pathological test cases such as a[arguments] CannotAllocateArgumentsObjectOnStack(); return; } IR::RegOpnd *baseOpnd = src1->AsIndirOpnd()->GetBaseOpnd(); id = baseOpnd->m_sym->m_id; if (CurrentBlockData()->IsArgumentsSymID(id)) { instr->usesStackArgumentsObject = true; } break; } case Js::OpCode::LdLen_A: { Assert(src1->IsRegOpnd()); if(CurrentBlockData()->IsArgumentsOpnd(src1)) { instr->usesStackArgumentsObject = true; } break; } case Js::OpCode::ArgOut_A_InlineBuiltIn: { if (CurrentBlockData()->IsArgumentsOpnd(src1)) { instr->usesStackArgumentsObject = true; } if (CurrentBlockData()->IsArgumentsOpnd(src1) && src1->AsRegOpnd()->m_sym->GetInstrDef()->m_opcode == Js::OpCode::BytecodeArgOutCapture) { // Apply inlining results in such usage - this is to ignore this sym that is def'd by ByteCodeArgOutCapture // It's needed because we do not have block level merging of arguments object and this def due to inlining can turn off stack args opt. IR::Instr* builtinStart = instr->GetNextRealInstr(); if (builtinStart->m_opcode == Js::OpCode::InlineBuiltInStart) { IR::Opnd* builtinOpnd = builtinStart->GetSrc1(); if (builtinStart->GetSrc1()->IsAddrOpnd()) { Assert(builtinOpnd->AsAddrOpnd()->m_isFunction); Js::BuiltinFunction builtinFunction = Js::JavascriptLibrary::GetBuiltInForFuncInfo(((FixedFieldInfo*)builtinOpnd->AsAddrOpnd()->m_metadata)->GetFuncInfoAddr(), func->GetThreadContextInfo()); if (builtinFunction == Js::BuiltinFunction::JavascriptFunction_Apply) { CurrentBlockData()->ClearArgumentsSym(src1->AsRegOpnd()); } } else if (builtinOpnd->IsRegOpnd()) { if (builtinOpnd->AsRegOpnd()->m_sym->m_builtInIndex == Js::BuiltinFunction::JavascriptFunction_Apply) { CurrentBlockData()->ClearArgumentsSym(src1->AsRegOpnd()); } } } } break; } case Js::OpCode::BailOnNotStackArgs: case Js::OpCode::ArgOut_A_FromStackArgs: case Js::OpCode::BytecodeArgOutUse: { if (src1 && CurrentBlockData()->IsArgumentsOpnd(src1)) { instr->usesStackArgumentsObject = true; } break; } default: { // Super conservative here, if we see the arguments or any of its alias being used in any // other opcode just don't do this optimization. Revisit this to optimize further if we see any common // case is missed. if (src1) { if (src1->IsRegOpnd() || src1->IsSymOpnd() || src1->IsIndirOpnd()) { if (CurrentBlockData()->IsArgumentsOpnd(src1)) { #ifdef PERF_HINT if (PHASE_TRACE1(Js::PerfHintPhase)) { WritePerfHint(PerfHints::HeapArgumentsCreated, instr->m_func, instr->GetByteCodeOffset()); } #endif CannotAllocateArgumentsObjectOnStack(); return; } } } if (src2) { if (src2->IsRegOpnd() || src2->IsSymOpnd() || src2->IsIndirOpnd()) { if (CurrentBlockData()->IsArgumentsOpnd(src2)) { #ifdef PERF_HINT if (PHASE_TRACE1(Js::PerfHintPhase)) { WritePerfHint(PerfHints::HeapArgumentsCreated, instr->m_func, instr->GetByteCodeOffset()); } #endif CannotAllocateArgumentsObjectOnStack(); return; } } } // We should look at dst last to correctly handle cases where it's the same as one of the src operands. if (dst) { if (dst->IsIndirOpnd() || dst->IsSymOpnd()) { if (CurrentBlockData()->IsArgumentsOpnd(dst)) { #ifdef PERF_HINT if (PHASE_TRACE1(Js::PerfHintPhase)) { WritePerfHint(PerfHints::HeapArgumentsModification, instr->m_func, instr->GetByteCodeOffset()); } #endif CannotAllocateArgumentsObjectOnStack(); return; } } else if (dst->IsRegOpnd()) { if (this->currentBlock->loop && CurrentBlockData()->IsArgumentsOpnd(dst)) { #ifdef PERF_HINT if (PHASE_TRACE1(Js::PerfHintPhase)) { WritePerfHint(PerfHints::HeapArgumentsModification, instr->m_func, instr->GetByteCodeOffset()); } #endif CannotAllocateArgumentsObjectOnStack(); return; } CurrentBlockData()->ClearArgumentsSym(dst->AsRegOpnd()); } } } break; } return; } void GlobOpt::MarkArgumentsUsedForBranch(IR::Instr * instr) { // If it's a conditional branch instruction and the operand used for branching is one of the arguments // to the function, tag the m_argUsedForBranch of the functionBody so that it can be used later for inlining decisions. if (instr->IsBranchInstr() && !instr->AsBranchInstr()->IsUnconditional()) { IR::BranchInstr * bInstr = instr->AsBranchInstr(); IR::Opnd *src1 = bInstr->GetSrc1(); IR::Opnd *src2 = bInstr->GetSrc2(); // These are used because we don't want to rely on src1 or src2 to always be the register/constant IR::RegOpnd *regOpnd = nullptr; if (!src2 && (instr->m_opcode == Js::OpCode::BrFalse_A || instr->m_opcode == Js::OpCode::BrTrue_A) && src1->IsRegOpnd()) { regOpnd = src1->AsRegOpnd(); } // We need to check for (0===arg) and (arg===0); this is especially important since some minifiers // change all instances of one to the other. else if (src2 && src2->IsConstOpnd() && src1->IsRegOpnd()) { regOpnd = src1->AsRegOpnd(); } else if (src2 && src2->IsRegOpnd() && src1->IsConstOpnd()) { regOpnd = src2->AsRegOpnd(); } if (regOpnd != nullptr) { if (regOpnd->m_sym->IsSingleDef()) { IR::Instr * defInst = regOpnd->m_sym->GetInstrDef(); IR::Opnd *defSym = defInst->GetSrc1(); if (defSym && defSym->IsSymOpnd() && defSym->AsSymOpnd()->m_sym->IsStackSym() && defSym->AsSymOpnd()->m_sym->AsStackSym()->IsParamSlotSym()) { uint16 param = defSym->AsSymOpnd()->m_sym->AsStackSym()->GetParamSlotNum(); // We only support functions with 13 arguments to ensure optimal size of callSiteInfo if (param < Js::Constants::MaximumArgumentCountForConstantArgumentInlining) { this->func->GetJITOutput()->SetArgUsedForBranch((uint8)param); } } } } } } const InductionVariable* GlobOpt::GetInductionVariable(SymID sym, Loop *loop) { if (loop->inductionVariables) { for (auto it = loop->inductionVariables->GetIterator(); it.IsValid(); it.MoveNext()) { InductionVariable* iv = &it.CurrentValueReference(); if (!iv->IsChangeDeterminate() || !iv->IsChangeUnidirectional()) { continue; } if (iv->Sym()->m_id == sym) { return iv; } } } return nullptr; } bool GlobOpt::IsSymIDInductionVariable(SymID sym, Loop *loop) { return GetInductionVariable(sym, loop) != nullptr; } SymID GlobOpt::GetVarSymID(StackSym *sym) { if (sym && sym->m_type != TyVar) { sym = sym->GetVarEquivSym(nullptr); } if (!sym) { return Js::Constants::InvalidSymID; } return sym->m_id; } bool GlobOpt::IsAllowedForMemOpt(IR::Instr* instr, bool isMemset, IR::RegOpnd *baseOpnd, IR::Opnd *indexOpnd) { Assert(instr); if (!baseOpnd || !indexOpnd) { return false; } Loop* loop = this->currentBlock->loop; const ValueType baseValueType(baseOpnd->GetValueType()); const ValueType indexValueType(indexOpnd->GetValueType()); // Validate the array and index types if ( !indexValueType.IsInt() || !( baseValueType.IsTypedIntOrFloatArray() || baseValueType.IsArray() ) ) { #if DBG_DUMP wchar indexValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; indexValueType.ToString(indexValueTypeStr); wchar baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; baseValueType.ToString(baseValueTypeStr); TRACE_MEMOP_VERBOSE(loop, instr, _u("Index[%s] or Array[%s] value type is invalid"), indexValueTypeStr, baseValueTypeStr); #endif return false; } // The following is conservative and works around a bug in induction variable analysis. if (baseOpnd->IsArrayRegOpnd()) { IR::ArrayRegOpnd *baseArrayOp = baseOpnd->AsArrayRegOpnd(); bool hasBoundChecksRemoved = ( baseArrayOp->EliminatedLowerBoundCheck() && baseArrayOp->EliminatedUpperBoundCheck() && !instr->extractedUpperBoundCheckWithoutHoisting && !instr->loadedArrayHeadSegment && !instr->loadedArrayHeadSegmentLength ); if (!hasBoundChecksRemoved) { TRACE_MEMOP_VERBOSE(loop, instr, _u("Missing bounds check optimization")); return false; } } if (!baseValueType.IsTypedArray()) { // Check if the instr can kill the value type of the array JsArrayKills arrayKills = CheckJsArrayKills(instr); if (arrayKills.KillsValueType(baseValueType)) { TRACE_MEMOP_VERBOSE(loop, instr, _u("The array (s%d) can lose its value type"), GetVarSymID(baseOpnd->GetStackSym())); return false; } } // Process the Index Operand if (!this->OptIsInvariant(baseOpnd, this->currentBlock, loop, CurrentBlockData()->FindValue(baseOpnd->m_sym), false, true)) { TRACE_MEMOP_VERBOSE(loop, instr, _u("Base (s%d) is not invariant"), GetVarSymID(baseOpnd->GetStackSym())); return false; } // Validate the index Assert(indexOpnd->GetStackSym()); SymID indexSymID = GetVarSymID(indexOpnd->GetStackSym()); const InductionVariable* iv = GetInductionVariable(indexSymID, loop); if (!iv) { // If the index is not an induction variable return TRACE_MEMOP_VERBOSE(loop, instr, _u("Index (s%d) is not an induction variable"), indexSymID); return false; } Assert(iv->IsChangeDeterminate() && iv->IsChangeUnidirectional()); const IntConstantBounds & bounds = iv->ChangeBounds(); if (loop->memOpInfo) { // Only accept induction variables that increments by 1 Loop::InductionVariableChangeInfo inductionVariableChangeInfo = { 0, 0 }; inductionVariableChangeInfo = loop->memOpInfo->inductionVariableChangeInfoMap->Lookup(indexSymID, inductionVariableChangeInfo); if ( (bounds.LowerBound() != 1 && bounds.LowerBound() != -1) || (bounds.UpperBound() != bounds.LowerBound()) || inductionVariableChangeInfo.unroll > 1 // Must be 0 (not seen yet) or 1 (already seen) ) { TRACE_MEMOP_VERBOSE(loop, instr, _u("The index does not change by 1: %d><%d, unroll=%d"), bounds.LowerBound(), bounds.UpperBound(), inductionVariableChangeInfo.unroll); return false; } // Check if the index is the same in all MemOp optimization in this loop if (!loop->memOpInfo->candidates->Empty()) { Loop::MemOpCandidate* previousCandidate = loop->memOpInfo->candidates->Head(); // All MemOp operations within the same loop must use the same index if (previousCandidate->index != indexSymID) { TRACE_MEMOP_VERBOSE(loop, instr, _u("The index is not the same as other MemOp in the loop")); return false; } } } return true; } bool GlobOpt::CollectMemcopyLdElementI(IR::Instr *instr, Loop *loop) { Assert(instr->GetSrc1()->IsIndirOpnd()); IR::IndirOpnd *src1 = instr->GetSrc1()->AsIndirOpnd(); IR::Opnd *indexOpnd = src1->GetIndexOpnd(); IR::RegOpnd *baseOpnd = src1->GetBaseOpnd()->AsRegOpnd(); SymID baseSymID = GetVarSymID(baseOpnd->GetStackSym()); if (!IsAllowedForMemOpt(instr, false, baseOpnd, indexOpnd)) { return false; } SymID inductionSymID = GetVarSymID(indexOpnd->GetStackSym()); Assert(IsSymIDInductionVariable(inductionSymID, loop)); loop->EnsureMemOpVariablesInitialized(); bool isIndexPreIncr = loop->memOpInfo->inductionVariableChangeInfoMap->ContainsKey(inductionSymID); IR::Opnd * dst = instr->GetDst(); if (!dst->IsRegOpnd() || !dst->AsRegOpnd()->GetStackSym()->IsSingleDef()) { return false; } Loop::MemCopyCandidate* memcopyInfo = memcopyInfo = JitAnewStruct(this->func->GetTopFunc()->m_fg->alloc, Loop::MemCopyCandidate); memcopyInfo->ldBase = baseSymID; memcopyInfo->ldCount = 1; memcopyInfo->count = 0; memcopyInfo->bIndexAlreadyChanged = isIndexPreIncr; memcopyInfo->base = Js::Constants::InvalidSymID; //need to find the stElem first memcopyInfo->index = inductionSymID; memcopyInfo->transferSym = dst->AsRegOpnd()->GetStackSym(); loop->memOpInfo->candidates->Prepend(memcopyInfo); return true; } bool GlobOpt::CollectMemsetStElementI(IR::Instr *instr, Loop *loop) { Assert(instr->GetDst()->IsIndirOpnd()); IR::IndirOpnd *dst = instr->GetDst()->AsIndirOpnd(); IR::Opnd *indexOp = dst->GetIndexOpnd(); IR::RegOpnd *baseOp = dst->GetBaseOpnd()->AsRegOpnd(); if (!IsAllowedForMemOpt(instr, true, baseOp, indexOp)) { return false; } SymID baseSymID = GetVarSymID(baseOp->GetStackSym()); IR::Opnd *srcDef = instr->GetSrc1(); StackSym *srcSym = nullptr; if (srcDef->IsRegOpnd()) { IR::RegOpnd* opnd = srcDef->AsRegOpnd(); if (this->OptIsInvariant(opnd, this->currentBlock, loop, CurrentBlockData()->FindValue(opnd->m_sym), true, true)) { srcSym = opnd->GetStackSym(); } } BailoutConstantValue constant = {TyIllegal, 0}; if (srcDef->IsFloatConstOpnd()) { constant.InitFloatConstValue(srcDef->AsFloatConstOpnd()->m_value); } else if (srcDef->IsIntConstOpnd()) { constant.InitIntConstValue(srcDef->AsIntConstOpnd()->GetValue(), srcDef->AsIntConstOpnd()->GetType()); } else if (srcDef->IsAddrOpnd()) { constant.InitVarConstValue(srcDef->AsAddrOpnd()->m_address); } else if(!srcSym) { TRACE_MEMOP_PHASE_VERBOSE(MemSet, loop, instr, _u("Source is not an invariant")); return false; } // Process the Index Operand Assert(indexOp->GetStackSym()); SymID inductionSymID = GetVarSymID(indexOp->GetStackSym()); Assert(IsSymIDInductionVariable(inductionSymID, loop)); loop->EnsureMemOpVariablesInitialized(); bool isIndexPreIncr = loop->memOpInfo->inductionVariableChangeInfoMap->ContainsKey(inductionSymID); Loop::MemSetCandidate* memsetInfo = JitAnewStruct(this->func->GetTopFunc()->m_fg->alloc, Loop::MemSetCandidate); memsetInfo->base = baseSymID; memsetInfo->index = inductionSymID; memsetInfo->constant = constant; memsetInfo->srcSym = srcSym; memsetInfo->count = 1; memsetInfo->bIndexAlreadyChanged = isIndexPreIncr; loop->memOpInfo->candidates->Prepend(memsetInfo); return true; } bool GlobOpt::CollectMemcopyStElementI(IR::Instr *instr, Loop *loop) { if (!loop->memOpInfo || loop->memOpInfo->candidates->Empty()) { // There is no ldElem matching this stElem return false; } Assert(instr->GetDst()->IsIndirOpnd()); IR::IndirOpnd *dst = instr->GetDst()->AsIndirOpnd(); IR::Opnd *indexOp = dst->GetIndexOpnd(); IR::RegOpnd *baseOp = dst->GetBaseOpnd()->AsRegOpnd(); SymID baseSymID = GetVarSymID(baseOp->GetStackSym()); if (!instr->GetSrc1()->IsRegOpnd()) { return false; } IR::RegOpnd* src1 = instr->GetSrc1()->AsRegOpnd(); if (!src1->GetIsDead()) { // This must be the last use of the register. // It will invalidate `var m = a[i]; b[i] = m;` but this is not a very interesting case. TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, instr, _u("Source (s%d) is still alive after StElemI"), baseSymID); return false; } if (!IsAllowedForMemOpt(instr, false, baseOp, indexOp)) { return false; } SymID srcSymID = GetVarSymID(src1->GetStackSym()); // Prepare the memcopyCandidate entry Loop::MemOpCandidate* previousCandidate = loop->memOpInfo->candidates->Head(); if (!previousCandidate->IsMemCopy()) { return false; } Loop::MemCopyCandidate* memcopyInfo = previousCandidate->AsMemCopy(); // The previous candidate has to have been created by the matching ldElem if ( memcopyInfo->base != Js::Constants::InvalidSymID || GetVarSymID(memcopyInfo->transferSym) != srcSymID ) { TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, instr, _u("No matching LdElem found (s%d)"), baseSymID); return false; } Assert(indexOp->GetStackSym()); SymID inductionSymID = GetVarSymID(indexOp->GetStackSym()); Assert(IsSymIDInductionVariable(inductionSymID, loop)); bool isIndexPreIncr = loop->memOpInfo->inductionVariableChangeInfoMap->ContainsKey(inductionSymID); if (isIndexPreIncr != memcopyInfo->bIndexAlreadyChanged) { // The index changed between the load and the store TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, instr, _u("Index value changed between ldElem and stElem")); return false; } // Consider: Can we remove the count field? memcopyInfo->count++; memcopyInfo->base = baseSymID; return true; } bool GlobOpt::CollectMemOpLdElementI(IR::Instr *instr, Loop *loop) { Assert(instr->m_opcode == Js::OpCode::LdElemI_A); return (!PHASE_OFF(Js::MemCopyPhase, this->func) && CollectMemcopyLdElementI(instr, loop)); } bool GlobOpt::CollectMemOpStElementI(IR::Instr *instr, Loop *loop) { Assert(instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict); Assert(instr->GetSrc1()); return (!PHASE_OFF(Js::MemSetPhase, this->func) && CollectMemsetStElementI(instr, loop)) || (!PHASE_OFF(Js::MemCopyPhase, this->func) && CollectMemcopyStElementI(instr, loop)); } bool GlobOpt::CollectMemOpInfo(IR::Instr *instrBegin, IR::Instr *instr, Value *src1Val, Value *src2Val) { Assert(this->currentBlock->loop); Loop *loop = this->currentBlock->loop; if (!loop->blockList.HasTwo()) { // We support memcopy and memset for loops which have only two blocks. return false; } if (loop->GetLoopFlags().isInterpreted && !loop->GetLoopFlags().memopMinCountReached) { TRACE_MEMOP_VERBOSE(loop, instr, _u("minimum loop count not reached")) loop->doMemOp = false; return false; } Assert(loop->doMemOp); bool isIncr = true, isChangedByOne = false; switch (instr->m_opcode) { case Js::OpCode::StElemI_A: case Js::OpCode::StElemI_A_Strict: if (!CollectMemOpStElementI(instr, loop)) { loop->doMemOp = false; return false; } break; case Js::OpCode::LdElemI_A: if (!CollectMemOpLdElementI(instr, loop)) { loop->doMemOp = false; return false; } break; case Js::OpCode::Decr_A: isIncr = false; case Js::OpCode::Incr_A: isChangedByOne = true; goto MemOpCheckInductionVariable; case Js::OpCode::Sub_I4: case Js::OpCode::Sub_A: isIncr = false; case Js::OpCode::Add_A: case Js::OpCode::Add_I4: { MemOpCheckInductionVariable: StackSym *sym = instr->GetSrc1()->GetStackSym(); if (!sym) { sym = instr->GetSrc2()->GetStackSym(); } SymID inductionSymID = GetVarSymID(sym); if (IsSymIDInductionVariable(inductionSymID, this->currentBlock->loop)) { if (!isChangedByOne) { IR::Opnd *src1, *src2; src1 = instr->GetSrc1(); src2 = instr->GetSrc2(); if (src2->IsRegOpnd()) { Value *val = CurrentBlockData()->FindValue(src2->AsRegOpnd()->m_sym); if (val) { ValueInfo *vi = val->GetValueInfo(); int constValue; if (vi && vi->TryGetIntConstantValue(&constValue)) { if (constValue == 1) { isChangedByOne = true; } } } } else if (src2->IsIntConstOpnd()) { if (src2->AsIntConstOpnd()->GetValue() == 1) { isChangedByOne = true; } } } loop->EnsureMemOpVariablesInitialized(); if (!isChangedByOne) { Loop::InductionVariableChangeInfo inductionVariableChangeInfo = { Js::Constants::InvalidLoopUnrollFactor, 0 }; if (!loop->memOpInfo->inductionVariableChangeInfoMap->ContainsKey(inductionSymID)) { loop->memOpInfo->inductionVariableChangeInfoMap->Add(inductionSymID, inductionVariableChangeInfo); } else { loop->memOpInfo->inductionVariableChangeInfoMap->Item(inductionSymID, inductionVariableChangeInfo); } } else { if (!loop->memOpInfo->inductionVariableChangeInfoMap->ContainsKey(inductionSymID)) { Loop::InductionVariableChangeInfo inductionVariableChangeInfo = { 1, isIncr }; loop->memOpInfo->inductionVariableChangeInfoMap->Add(inductionSymID, inductionVariableChangeInfo); } else { Loop::InductionVariableChangeInfo inductionVariableChangeInfo = { 0, 0 }; inductionVariableChangeInfo = loop->memOpInfo->inductionVariableChangeInfoMap->Lookup(inductionSymID, inductionVariableChangeInfo); inductionVariableChangeInfo.unroll++; inductionVariableChangeInfo.isIncremental = isIncr; loop->memOpInfo->inductionVariableChangeInfoMap->Item(inductionSymID, inductionVariableChangeInfo); } } break; } // Fallthrough if not an induction variable } default: FOREACH_INSTR_IN_RANGE(chkInstr, instrBegin->m_next, instr) { if (IsInstrInvalidForMemOp(chkInstr, loop, src1Val, src2Val)) { loop->doMemOp = false; return false; } // Make sure this instruction doesn't use the memcopy transfer sym before it is checked by StElemI if (loop->memOpInfo && !loop->memOpInfo->candidates->Empty()) { Loop::MemOpCandidate* prevCandidate = loop->memOpInfo->candidates->Head(); if (prevCandidate->IsMemCopy()) { Loop::MemCopyCandidate* memcopyCandidate = prevCandidate->AsMemCopy(); if (memcopyCandidate->base == Js::Constants::InvalidSymID) { if (chkInstr->HasSymUse(memcopyCandidate->transferSym)) { loop->doMemOp = false; TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, chkInstr, _u("Found illegal use of LdElemI value(s%d)"), GetVarSymID(memcopyCandidate->transferSym)); return false; } } } } } NEXT_INSTR_IN_RANGE; } return true; } bool GlobOpt::IsInstrInvalidForMemOp(IR::Instr *instr, Loop *loop, Value *src1Val, Value *src2Val) { // List of instruction that are valid with memop (ie: instr that gets removed if memop is emitted) if ( this->currentBlock != loop->GetHeadBlock() && !instr->IsLabelInstr() && instr->IsRealInstr() && instr->m_opcode != Js::OpCode::IncrLoopBodyCount && instr->m_opcode != Js::OpCode::StLoopBodyCount && instr->m_opcode != Js::OpCode::Ld_A && instr->m_opcode != Js::OpCode::Ld_I4 && !(instr->IsBranchInstr() && instr->AsBranchInstr()->IsUnconditional()) ) { TRACE_MEMOP_VERBOSE(loop, instr, _u("Instruction not accepted for memop")); return true; } // Check prev instr because it could have been added by an optimization and we won't see it here. if (OpCodeAttr::FastFldInstr(instr->m_opcode) || (instr->m_prev && OpCodeAttr::FastFldInstr(instr->m_prev->m_opcode))) { // Refuse any operations interacting with Fields TRACE_MEMOP_VERBOSE(loop, instr, _u("Field interaction detected")); return true; } if (Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::ElementSlot) { // Refuse any operations interacting with slots TRACE_MEMOP_VERBOSE(loop, instr, _u("Slot interaction detected")); return true; } if (this->MayNeedBailOnImplicitCall(instr, src1Val, src2Val)) { TRACE_MEMOP_VERBOSE(loop, instr, _u("Implicit call bailout detected")); return true; } return false; } void GlobOpt::TryReplaceLdLen(IR::Instr *& instr) { // Change LdFld on arrays, strings, and 'arguments' to LdLen when we're accessing the .length field if ((instr->GetSrc1() && instr->GetSrc1()->IsSymOpnd() && instr->m_opcode == Js::OpCode::ProfiledLdFld) || instr->m_opcode == Js::OpCode::LdFld || instr->m_opcode == Js::OpCode::ScopedLdFld) { IR::SymOpnd * opnd = instr->GetSrc1()->AsSymOpnd(); Sym *sym = opnd->m_sym; if (sym->IsPropertySym()) { PropertySym *originalPropertySym = sym->AsPropertySym(); // only on .length if (this->lengthEquivBv != nullptr && this->lengthEquivBv->Test(originalPropertySym->m_id)) { IR::RegOpnd* newopnd = IR::RegOpnd::New(originalPropertySym->m_stackSym, IRType::TyVar, instr->m_func); ValueInfo *const objectValueInfo = CurrentBlockData()->FindValue(originalPropertySym->m_stackSym)->GetValueInfo(); // Only for things we'd emit a fast path for if ( objectValueInfo->IsLikelyAnyArray() || objectValueInfo->HasHadStringTag() || objectValueInfo->IsLikelyString() || newopnd->IsArgumentsObject() || (CurrentBlockData()->argObjSyms && CurrentBlockData()->IsArgumentsOpnd(newopnd)) ) { // We need to properly transfer over the information from the old operand, which is // a SymOpnd, to the new one, which is a RegOpnd. Unfortunately, the types mean the // normal copy methods won't work here, so we're going to directly copy data. newopnd->SetIsJITOptimizedReg(opnd->GetIsJITOptimizedReg()); newopnd->SetValueType(objectValueInfo->Type()); newopnd->SetIsDead(opnd->GetIsDead()); // Now that we have the operand we need, we can go ahead and make the new instr. IR::Instr *newinstr = IR::Instr::New(Js::OpCode::LdLen_A, instr->m_func); instr->TransferTo(newinstr); newinstr->UnlinkSrc1(); newinstr->SetSrc1(newopnd); instr->InsertAfter(newinstr); instr->Remove(); instr = newinstr; } } } } } IR::Instr * GlobOpt::OptInstr(IR::Instr *&instr, bool* isInstrRemoved) { Assert(instr->m_func->IsTopFunc() || instr->m_func->isGetterSetter || instr->m_func->callSiteIdInParentFunc != UINT16_MAX); IR::Opnd *src1, *src2; Value *src1Val = nullptr, *src2Val = nullptr, *dstVal = nullptr; Value *src1IndirIndexVal = nullptr, *dstIndirIndexVal = nullptr; IR::Instr *instrPrev = instr->m_prev; IR::Instr *instrNext = instr->m_next; if (instr->IsLabelInstr() && this->func->HasTry() && this->func->DoOptimizeTry()) { this->currentRegion = instr->AsLabelInstr()->GetRegion(); Assert(this->currentRegion); } if(PrepareForIgnoringIntOverflow(instr)) { if(!IsLoopPrePass()) { *isInstrRemoved = true; currentBlock->RemoveInstr(instr); } return instrNext; } if (!instr->IsRealInstr() || instr->IsByteCodeUsesInstr() || instr->m_opcode == Js::OpCode::Conv_Bool) { return instrNext; } if (instr->m_opcode == Js::OpCode::Yield) { // TODO[generators][ianhall]: Can this and the FillBailOutInfo call below be moved to after Src1 and Src2 so that Yield can be optimized right up to the actual yield? CurrentBlockData()->KillStateForGeneratorYield(); } // Change LdFld on arrays, strings, and 'arguments' to LdLen when we're accessing the .length field this->TryReplaceLdLen(instr); // Consider: Do we ever get post-op bailout here, and if so is the FillBailOutInfo call in the right place? if (instr->HasBailOutInfo() && !this->IsLoopPrePass()) { this->FillBailOutInfo(this->currentBlock, instr->GetBailOutInfo()); } this->instrCountSinceLastCleanUp++; instr = this->PreOptPeep(instr); this->OptArguments(instr); //StackArguments Optimization - We bail out if the index is out of range of actuals. if ((instr->m_opcode == Js::OpCode::LdElemI_A || instr->m_opcode == Js::OpCode::TypeofElem) && instr->DoStackArgsOpt(this->func) && !this->IsLoopPrePass()) { GenerateBailAtOperation(&instr, IR::BailOnStackArgsOutOfActualsRange); } #if DBG PropertySym *propertySymUseBefore = nullptr; Assert(this->byteCodeUses == nullptr); this->byteCodeUsesBeforeOpt->ClearAll(); GlobOpt::TrackByteCodeSymUsed(instr, this->byteCodeUsesBeforeOpt, &propertySymUseBefore); Assert(noImplicitCallUsesToInsert->Count() == 0); #endif this->ignoredIntOverflowForCurrentInstr = false; this->ignoredNegativeZeroForCurrentInstr = false; src1 = instr->GetSrc1(); src2 = instr->GetSrc2(); if (src1) { src1Val = this->OptSrc(src1, &instr, &src1IndirIndexVal); instr = this->SetTypeCheckBailOut(instr->GetSrc1(), instr, nullptr); if (src2) { src2Val = this->OptSrc(src2, &instr); } } if(instr->GetDst() && instr->GetDst()->IsIndirOpnd()) { this->OptSrc(instr->GetDst(), &instr, &dstIndirIndexVal); } MarkArgumentsUsedForBranch(instr); CSEOptimize(this->currentBlock, &instr, &src1Val, &src2Val, &src1IndirIndexVal); OptimizeChecks(instr); OptArraySrc(&instr); OptNewScObject(&instr, src1Val); instr = this->OptPeep(instr, src1Val, src2Val); if (instr->m_opcode == Js::OpCode::Nop || (instr->m_opcode == Js::OpCode::CheckThis && instr->GetSrc1()->IsRegOpnd() && instr->GetSrc1()->AsRegOpnd()->m_sym->m_isSafeThis)) { instrNext = instr->m_next; InsertNoImplicitCallUses(instr); if (this->byteCodeUses) { this->InsertByteCodeUses(instr); } *isInstrRemoved = true; this->currentBlock->RemoveInstr(instr); return instrNext; } else if (instr->m_opcode == Js::OpCode::GetNewScObject && !this->IsLoopPrePass() && src1Val->GetValueInfo()->IsPrimitive()) { // Constructor returned (src1) a primitive value, so fold this into "dst = Ld_A src2", where src2 is the new object that // was passed into the constructor as its 'this' parameter instr->FreeSrc1(); instr->SetSrc1(instr->UnlinkSrc2()); instr->m_opcode = Js::OpCode::Ld_A; src1Val = src2Val; src2Val = nullptr; } else if ((instr->m_opcode == Js::OpCode::TryCatch && this->func->DoOptimizeTry()) || (instr->m_opcode == Js::OpCode::TryFinally && this->func->DoOptimizeTry())) { ProcessTryHandler(instr); } else if (instr->m_opcode == Js::OpCode::BrOnException || instr->m_opcode == Js::OpCode::BrOnNoException) { if (this->ProcessExceptionHandlingEdges(instr)) { *isInstrRemoved = true; return instrNext; } } bool isAlreadyTypeSpecialized = false; if (!IsLoopPrePass() && instr->HasBailOutInfo()) { if (instr->GetBailOutKind() == IR::BailOutExpectingInteger) { isAlreadyTypeSpecialized = TypeSpecializeBailoutExpectedInteger(instr, src1Val, &dstVal); } else if (instr->GetBailOutKind() == IR::BailOutExpectingString) { if (instr->GetSrc1()->IsRegOpnd()) { if (!src1Val || !src1Val->GetValueInfo()->IsLikelyString()) { // Disable SwitchOpt if the source is definitely not a string - This may be realized only in Globopt Assert(IsSwitchOptEnabled()); throw Js::RejitException(RejitReason::DisableSwitchOptExpectingString); } } } } bool forceInvariantHoisting = false; const bool ignoreIntOverflowInRangeForInstr = instr->ignoreIntOverflowInRange; // Save it since the instr can change if (!isAlreadyTypeSpecialized) { bool redoTypeSpec; instr = this->TypeSpecialization(instr, &src1Val, &src2Val, &dstVal, &redoTypeSpec, &forceInvariantHoisting); if(redoTypeSpec && instr->m_opcode != Js::OpCode::Nop) { forceInvariantHoisting = false; instr = this->TypeSpecialization(instr, &src1Val, &src2Val, &dstVal, &redoTypeSpec, &forceInvariantHoisting); Assert(!redoTypeSpec); } if (instr->m_opcode == Js::OpCode::Nop) { InsertNoImplicitCallUses(instr); if (this->byteCodeUses) { this->InsertByteCodeUses(instr); } instrNext = instr->m_next; *isInstrRemoved = true; this->currentBlock->RemoveInstr(instr); return instrNext; } } if (ignoreIntOverflowInRangeForInstr) { VerifyIntSpecForIgnoringIntOverflow(instr); } // Track calls after any pre-op bailouts have been inserted before the call, because they will need to restore out params. this->TrackCalls(instr); if (instr->GetSrc1()) { this->UpdateObjPtrValueType(instr->GetSrc1(), instr); } IR::Opnd *dst = instr->GetDst(); if (dst) { // Copy prop dst uses and mark live/available type syms before tracking kills. CopyPropDstUses(dst, instr, src1Val); } // Track mark temp object before we process the dst so we can generate pre-op bailout instr = this->TrackMarkTempObject(instrPrev->m_next, instr); bool removed = OptTagChecks(instr); if (removed) { *isInstrRemoved = true; return instrNext; } dstVal = this->OptDst(&instr, dstVal, src1Val, src2Val, dstIndirIndexVal, src1IndirIndexVal); dst = instr->GetDst(); instrNext = instr->m_next; if (dst) { if (this->func->HasTry() && this->func->DoOptimizeTry()) { this->InsertToVarAtDefInTryRegion(instr, dst); } instr = this->SetTypeCheckBailOut(dst, instr, nullptr); this->UpdateObjPtrValueType(dst, instr); } BVSparse<JitArenaAllocator> instrByteCodeStackSymUsedAfter(this->alloc); PropertySym *propertySymUseAfter = nullptr; if (this->byteCodeUses != nullptr) { GlobOpt::TrackByteCodeSymUsed(instr, &instrByteCodeStackSymUsedAfter, &propertySymUseAfter); } #if DBG else { GlobOpt::TrackByteCodeSymUsed(instr, &instrByteCodeStackSymUsedAfter, &propertySymUseAfter); instrByteCodeStackSymUsedAfter.Equal(this->byteCodeUsesBeforeOpt); Assert(propertySymUseAfter == propertySymUseBefore); } #endif bool isHoisted = false; if (this->currentBlock->loop && !this->IsLoopPrePass()) { isHoisted = this->TryHoistInvariant(instr, this->currentBlock, dstVal, src1Val, src2Val, true, false, forceInvariantHoisting); } src1 = instr->GetSrc1(); if (!this->IsLoopPrePass() && src1) { // instr const, nonConst => canonicalize by swapping operands // This simplifies lowering. (somewhat machine dependent) // Note that because of Var overflows, src1 may not have been constant prop'd to an IntConst this->PreLowerCanonicalize(instr, &src1Val, &src2Val); } if (!PHASE_OFF(Js::MemOpPhase, this->func) && !isHoisted && !(instr->IsJitProfilingInstr()) && this->currentBlock->loop && !IsLoopPrePass() && !func->IsJitInDebugMode() && (func->HasProfileInfo() && !func->GetReadOnlyProfileInfo()->IsMemOpDisabled()) && this->currentBlock->loop->doMemOp) { CollectMemOpInfo(instrPrev, instr, src1Val, src2Val); } InsertNoImplicitCallUses(instr); if (this->byteCodeUses != nullptr) { // Optimization removed some uses from the instruction. // Need to insert fake uses so we can get the correct live register to restore in bailout. this->byteCodeUses->Minus(&instrByteCodeStackSymUsedAfter); if (this->propertySymUse == propertySymUseAfter) { this->propertySymUse = nullptr; } this->InsertByteCodeUses(instr); } if (!this->IsLoopPrePass() && !isHoisted && this->IsImplicitCallBailOutCurrentlyNeeded(instr, src1Val, src2Val)) { IR::BailOutKind kind = IR::BailOutOnImplicitCalls; if(instr->HasBailOutInfo()) { Assert(instr->GetBailOutInfo()->bailOutOffset == instr->GetByteCodeOffset()); const IR::BailOutKind bailOutKind = instr->GetBailOutKind(); if((bailOutKind & ~IR::BailOutKindBits) != IR::BailOutOnImplicitCallsPreOp) { Assert(!(bailOutKind & ~IR::BailOutKindBits)); instr->SetBailOutKind(bailOutKind + IR::BailOutOnImplicitCallsPreOp); } } else if (instr->forcePreOpBailOutIfNeeded || this->isRecursiveCallOnLandingPad) { // We can't have a byte code reg slot as dst to generate a // pre-op implicit call after we have processed the dst. // Consider: This might miss an opportunity to use a copy prop sym to restore // some other byte code reg if the dst is that copy prop that we already killed. Assert(!instr->GetDst() || !instr->GetDst()->IsRegOpnd() || instr->GetDst()->AsRegOpnd()->GetIsJITOptimizedReg() || !instr->GetDst()->AsRegOpnd()->m_sym->HasByteCodeRegSlot()); this->GenerateBailAtOperation(&instr, IR::BailOutOnImplicitCallsPreOp); } else { // Capture value of the bailout after the operation is done. this->GenerateBailAfterOperation(&instr, kind); } } if (CurrentBlockData()->capturedValuesCandidate && !this->IsLoopPrePass()) { this->CommitCapturedValuesCandidate(); } return instrNext; } bool GlobOpt::OptTagChecks(IR::Instr *instr) { if (PHASE_OFF(Js::OptTagChecksPhase, this->func) || !this->DoTagChecks()) { return false; } StackSym *stackSym = nullptr; IR::SymOpnd *symOpnd = nullptr; IR::RegOpnd *regOpnd = nullptr; switch(instr->m_opcode) { case Js::OpCode::LdFld: case Js::OpCode::LdMethodFld: case Js::OpCode::CheckFixedFld: case Js::OpCode::CheckPropertyGuardAndLoadType: symOpnd = instr->GetSrc1()->AsSymOpnd(); stackSym = symOpnd->m_sym->AsPropertySym()->m_stackSym; break; case Js::OpCode::BailOnNotObject: case Js::OpCode::BailOnNotArray: if (instr->GetSrc1()->IsRegOpnd()) { regOpnd = instr->GetSrc1()->AsRegOpnd(); stackSym = regOpnd->m_sym; } break; case Js::OpCode::StFld: symOpnd = instr->GetDst()->AsSymOpnd(); stackSym = symOpnd->m_sym->AsPropertySym()->m_stackSym; break; } if (stackSym) { Value *value = CurrentBlockData()->FindValue(stackSym); if (value) { ValueInfo *valInfo = value->GetValueInfo(); if (valInfo->GetSymStore() && valInfo->GetSymStore()->IsStackSym() && valInfo->GetSymStore()->AsStackSym()->IsFromByteCodeConstantTable()) { return false; } ValueType valueType = value->GetValueInfo()->Type(); if (instr->m_opcode == Js::OpCode::BailOnNotObject) { if (valueType.CanBeTaggedValue()) { // We're not adding new information to the value other than changing the value type. Preserve any existing // information and just change the value type. ChangeValueType(nullptr, value, valueType.SetCanBeTaggedValue(false), true /*preserveSubClassInfo*/); return false; } if (this->byteCodeUses) { this->InsertByteCodeUses(instr); } this->currentBlock->RemoveInstr(instr); return true; } if (valueType.CanBeTaggedValue() && !valueType.HasBeenNumber() && !this->IsLoopPrePass()) { ValueType newValueType = valueType.SetCanBeTaggedValue(false); // Split out the tag check as a separate instruction. IR::Instr *bailOutInstr; bailOutInstr = IR::BailOutInstr::New(Js::OpCode::BailOnNotObject, IR::BailOutOnTaggedValue, instr, instr->m_func); if (!this->IsLoopPrePass()) { FillBailOutInfo(this->currentBlock, bailOutInstr->GetBailOutInfo()); } IR::RegOpnd *srcOpnd = regOpnd; if (!srcOpnd) { srcOpnd = IR::RegOpnd::New(stackSym, stackSym->GetType(), instr->m_func); AnalysisAssert(symOpnd); if (symOpnd->GetIsJITOptimizedReg()) { srcOpnd->SetIsJITOptimizedReg(true); } } bailOutInstr->SetSrc1(srcOpnd); bailOutInstr->GetSrc1()->SetValueType(valueType); instr->InsertBefore(bailOutInstr); if (this->currentBlock->loop) { // Try hoisting the BailOnNotObject instr. // But since this isn't the current instr being optimized, we need to play tricks with // the byteCodeUse fields... TrackByteCodeUsesForInstrAddedInOptInstr(bailOutInstr, [&]() { TryHoistInvariant(bailOutInstr, this->currentBlock, nullptr, value, nullptr, true, false, false, IR::BailOutOnTaggedValue); }); } if (symOpnd) { symOpnd->SetPropertyOwnerValueType(newValueType); } else { regOpnd->SetValueType(newValueType); } ChangeValueType(nullptr, value, newValueType, false); } } } return false; } bool GlobOpt::TypeSpecializeBailoutExpectedInteger(IR::Instr* instr, Value* src1Val, Value** dstVal) { bool isAlreadyTypeSpecialized = false; if(instr->GetSrc1()->IsRegOpnd()) { if (!src1Val || !src1Val->GetValueInfo()->IsLikelyInt() || instr->GetSrc1()->AsRegOpnd()->m_sym->m_isNotInt) { Assert(IsSwitchOptEnabledForIntTypeSpec()); throw Js::RejitException(RejitReason::DisableSwitchOptExpectingInteger); } // Attach the BailOutExpectingInteger to FromVar and Remove the bail out info on the Ld_A (Begin Switch) instr. this->ToTypeSpecUse(instr, instr->GetSrc1(), this->currentBlock, src1Val, nullptr, TyInt32, IR::BailOutExpectingInteger, false, instr); //TypeSpecialize the dst of Ld_A TypeSpecializeIntDst(instr, instr->m_opcode, src1Val, src1Val, nullptr, IR::BailOutInvalid, INT32_MIN, INT32_MAX, dstVal); isAlreadyTypeSpecialized = true; } instr->ClearBailOutInfo(); return isAlreadyTypeSpecialized; } Value* GlobOpt::OptDst( IR::Instr ** pInstr, Value *dstVal, Value *src1Val, Value *src2Val, Value *dstIndirIndexVal, Value *src1IndirIndexVal) { IR::Instr *&instr = *pInstr; IR::Opnd *opnd = instr->GetDst(); if (opnd) { if (opnd->IsSymOpnd() && opnd->AsSymOpnd()->IsPropertySymOpnd()) { this->FinishOptPropOp(instr, opnd->AsPropertySymOpnd()); } else if (instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict || instr->m_opcode == Js::OpCode::InitComputedProperty) { this->KillObjectHeaderInlinedTypeSyms(this->currentBlock, false); } if (opnd->IsIndirOpnd() && !this->IsLoopPrePass()) { IR::RegOpnd *baseOpnd = opnd->AsIndirOpnd()->GetBaseOpnd(); const ValueType baseValueType(baseOpnd->GetValueType()); if (( baseValueType.IsLikelyNativeArray() || #ifdef _M_IX86 ( !AutoSystemInfo::Data.SSE2Available() && baseValueType.IsLikelyObject() && ( baseValueType.GetObjectType() == ObjectType::Float32Array || baseValueType.GetObjectType() == ObjectType::Float64Array ) ) #else false #endif ) && instr->GetSrc1()->IsVar()) { if(instr->m_opcode == Js::OpCode::StElemC) { // StElemC has different code that handles native array conversion or missing value stores. Add a bailout // for those cases. Assert(baseValueType.IsLikelyNativeArray()); Assert(!instr->HasBailOutInfo()); GenerateBailAtOperation(&instr, IR::BailOutConventionalNativeArrayAccessOnly); } else if(instr->HasBailOutInfo()) { // The lowerer is not going to generate a fast path for this case. Remove any bailouts that require the fast // path. Note that the removed bailouts should not be necessary for correctness. Bailout on native array // conversion will be handled automatically as normal. IR::BailOutKind bailOutKind = instr->GetBailOutKind(); if(bailOutKind & IR::BailOutOnArrayAccessHelperCall) { bailOutKind -= IR::BailOutOnArrayAccessHelperCall; } if(bailOutKind == IR::BailOutOnImplicitCallsPreOp) { bailOutKind -= IR::BailOutOnImplicitCallsPreOp; } if(bailOutKind) { instr->SetBailOutKind(bailOutKind); } else { instr->ClearBailOutInfo(); } } } } } this->ProcessKills(instr); if (opnd) { if (dstVal == nullptr) { dstVal = ValueNumberDst(pInstr, src1Val, src2Val); } if (this->IsLoopPrePass()) { // Keep track of symbols defined in the loop. if (opnd->IsRegOpnd()) { StackSym *symDst = opnd->AsRegOpnd()->m_sym; rootLoopPrePass->symsDefInLoop->Set(symDst->m_id); } } else if (dstVal) { opnd->SetValueType(dstVal->GetValueInfo()->Type()); if(currentBlock->loop && !IsLoopPrePass() && (instr->m_opcode == Js::OpCode::Ld_A || instr->m_opcode == Js::OpCode::Ld_I4) && instr->GetSrc1()->IsRegOpnd() && !func->IsJitInDebugMode() && func->DoGlobOptsForGeneratorFunc()) { // Look for the following patterns: // // Pattern 1: // s1[liveOnBackEdge] = s3[dead] // // Pattern 2: // s3 = operation(s1[liveOnBackEdge], s2) // s1[liveOnBackEdge] = s3 // // In both patterns, s1 and s3 have the same value by the end. Prefer to use s1 as the sym store instead of s3 // since s1 is live on back-edge, as otherwise, their lifetimes overlap, requiring two registers to hold the // value instead of one. do { IR::RegOpnd *const src = instr->GetSrc1()->AsRegOpnd(); StackSym *srcVarSym = src->m_sym; if(srcVarSym->IsTypeSpec()) { srcVarSym = srcVarSym->GetVarEquivSym(nullptr); Assert(srcVarSym); } if(dstVal->GetValueInfo()->GetSymStore() != srcVarSym) { break; } IR::RegOpnd *const dst = opnd->AsRegOpnd(); StackSym *dstVarSym = dst->m_sym; if(dstVarSym->IsTypeSpec()) { dstVarSym = dstVarSym->GetVarEquivSym(nullptr); Assert(dstVarSym); } if(!currentBlock->loop->regAlloc.liveOnBackEdgeSyms->Test(dstVarSym->m_id)) { break; } Value *const srcValue = CurrentBlockData()->FindValue(srcVarSym); if(srcValue->GetValueNumber() != dstVal->GetValueNumber()) { break; } if(!src->GetIsDead()) { IR::Instr *const prevInstr = instr->GetPrevRealInstrOrLabel(); IR::Opnd *const prevDst = prevInstr->GetDst(); if(!prevDst || !src->IsEqualInternal(prevDst) || !( (prevInstr->GetSrc1() && dst->IsEqual(prevInstr->GetSrc1())) || (prevInstr->GetSrc2() && dst->IsEqual(prevInstr->GetSrc2())) )) { break; } } this->SetSymStoreDirect(dstVal->GetValueInfo(), dstVarSym); } while(false); } } this->ValueNumberObjectType(opnd, instr); } this->CSEAddInstr(this->currentBlock, *pInstr, dstVal, src1Val, src2Val, dstIndirIndexVal, src1IndirIndexVal); return dstVal; } void GlobOpt::CopyPropDstUses(IR::Opnd *opnd, IR::Instr *instr, Value *src1Val) { if (opnd->IsSymOpnd()) { IR::SymOpnd *symOpnd = opnd->AsSymOpnd(); if (symOpnd->m_sym->IsPropertySym()) { PropertySym * originalPropertySym = symOpnd->m_sym->AsPropertySym(); Value *const objectValue = CurrentBlockData()->FindValue(originalPropertySym->m_stackSym); symOpnd->SetPropertyOwnerValueType(objectValue ? objectValue->GetValueInfo()->Type() : ValueType::Uninitialized); this->FieldHoistOptDst(instr, originalPropertySym, src1Val); PropertySym * sym = this->CopyPropPropertySymObj(symOpnd, instr); if (sym != originalPropertySym && !this->IsLoopPrePass()) { // Consider: This doesn't detect hoistability of a property sym after object pointer copy prop // on loop prepass. But if it so happened that the property sym is hoisted, we might as well do so. this->FieldHoistOptDst(instr, sym, src1Val); } } } } void GlobOpt::SetLoopFieldInitialValue(Loop *loop, IR::Instr *instr, PropertySym *propertySym, PropertySym *originalPropertySym) { Value *initialValue = nullptr; StackSym *symStore; if (loop->allFieldsKilled || loop->fieldKilled->Test(originalPropertySym->m_id)) { return; } Assert(!loop->fieldKilled->Test(propertySym->m_id)); // Value already exists if (CurrentBlockData()->FindValue(propertySym)) { return; } // If this initial value was already added, we would find in the current value table. Assert(!loop->initialValueFieldMap.TryGetValue(propertySym, &initialValue)); // If propertySym is live in landingPad, we don't need an initial value. if (loop->landingPad->globOptData.liveFields->Test(propertySym->m_id)) { return; } Value *landingPadObjPtrVal, *currentObjPtrVal; landingPadObjPtrVal = loop->landingPad->globOptData.FindValue(propertySym->m_stackSym); currentObjPtrVal = CurrentBlockData()->FindValue(propertySym->m_stackSym); if (!currentObjPtrVal || !landingPadObjPtrVal || currentObjPtrVal->GetValueNumber() != landingPadObjPtrVal->GetValueNumber()) { // objPtr has a different value in the landing pad. return; } // The opnd's value type has not yet been initialized. Since the property sym doesn't have a value, it effectively has an // Uninitialized value type. Use the profiled value type from the instruction. const ValueType profiledValueType = instr->IsProfiledInstr() ? instr->AsProfiledInstr()->u.FldInfo().valueType : ValueType::Uninitialized; Assert(!profiledValueType.IsDefinite()); // Hence the values created here don't need to be tracked for kills initialValue = this->NewGenericValue(profiledValueType, propertySym); symStore = StackSym::New(this->func); initialValue->GetValueInfo()->SetSymStore(symStore); loop->initialValueFieldMap.Add(propertySym, initialValue->Copy(this->alloc, initialValue->GetValueNumber())); // Copy the initial value into the landing pad, but without a symStore Value *landingPadInitialValue = Value::New(this->alloc, initialValue->GetValueNumber(), ValueInfo::New(this->alloc, initialValue->GetValueInfo()->Type())); loop->landingPad->globOptData.SetValue(landingPadInitialValue, propertySym); loop->landingPad->globOptData.liveFields->Set(propertySym->m_id); #if DBG_DUMP if (PHASE_TRACE(Js::FieldPREPhase, this->func)) { Output::Print(_u("** TRACE: Field PRE initial value for loop head #%d. Val:%d symStore:"), loop->GetHeadBlock()->GetBlockNum(), initialValue->GetValueNumber()); symStore->Dump(); Output::Print(_u("\n Instr: ")); instr->Dump(); } #endif // Add initial value to all the previous blocks in the loop. FOREACH_BLOCK_BACKWARD_IN_RANGE(block, this->currentBlock->GetPrev(), loop->GetHeadBlock()) { if (block->GetDataUseCount() == 0) { // All successor blocks have been processed, no point in adding the value. continue; } Value *newValue = initialValue->Copy(this->alloc, initialValue->GetValueNumber()); block->globOptData.SetValue(newValue, propertySym); block->globOptData.liveFields->Set(propertySym->m_id); block->globOptData.SetValue(newValue, symStore); block->globOptData.liveVarSyms->Set(symStore->m_id); } NEXT_BLOCK_BACKWARD_IN_RANGE; CurrentBlockData()->SetValue(initialValue, symStore); CurrentBlockData()->liveVarSyms->Set(symStore->m_id); CurrentBlockData()->liveFields->Set(propertySym->m_id); } // Examine src, apply copy prop and value number it Value* GlobOpt::OptSrc(IR::Opnd *opnd, IR::Instr * *pInstr, Value **indirIndexValRef, IR::IndirOpnd *parentIndirOpnd) { IR::Instr * &instr = *pInstr; Assert(!indirIndexValRef || !*indirIndexValRef); Assert( parentIndirOpnd ? opnd == parentIndirOpnd->GetBaseOpnd() || opnd == parentIndirOpnd->GetIndexOpnd() : opnd == instr->GetSrc1() || opnd == instr->GetSrc2() || opnd == instr->GetDst() && opnd->IsIndirOpnd()); Sym *sym; Value *val; PropertySym *originalPropertySym = nullptr; switch(opnd->GetKind()) { case IR::OpndKindIntConst: val = this->GetIntConstantValue(opnd->AsIntConstOpnd()->AsInt32(), instr); opnd->SetValueType(val->GetValueInfo()->Type()); return val; case IR::OpndKindInt64Const: val = this->GetIntConstantValue(opnd->AsInt64ConstOpnd()->GetValue(), instr); opnd->SetValueType(val->GetValueInfo()->Type()); return val; case IR::OpndKindFloatConst: { const FloatConstType floatValue = opnd->AsFloatConstOpnd()->m_value; int32 int32Value; if(Js::JavascriptNumber::TryGetInt32Value(floatValue, &int32Value)) { val = GetIntConstantValue(int32Value, instr); } else { val = NewFloatConstantValue(floatValue); } opnd->SetValueType(val->GetValueInfo()->Type()); return val; } case IR::OpndKindAddr: { IR::AddrOpnd *addrOpnd = opnd->AsAddrOpnd(); if (addrOpnd->m_isFunction) { AssertMsg(!PHASE_OFF(Js::FixedMethodsPhase, instr->m_func), "Fixed function address operand with fixed method calls phase disabled?"); val = NewFixedFunctionValue((Js::JavascriptFunction *)addrOpnd->m_address, addrOpnd); opnd->SetValueType(val->GetValueInfo()->Type()); return val; } else if (addrOpnd->IsVar() && Js::TaggedInt::Is(addrOpnd->m_address)) { val = this->GetIntConstantValue(Js::TaggedInt::ToInt32(addrOpnd->m_address), instr); opnd->SetValueType(val->GetValueInfo()->Type()); return val; } val = this->GetVarConstantValue(addrOpnd); return val; } case IR::OpndKindSym: { // Clear the opnd's value type up-front, so that this code cannot accidentally use the value type set from a previous // OptSrc on the same instruction (for instance, from an earlier loop prepass). The value type will be set from the // value if available, before returning from this function. opnd->SetValueType(ValueType::Uninitialized); sym = opnd->AsSymOpnd()->m_sym; // Don't create a new value for ArgSlots and don't copy prop them away. if (sym->IsStackSym() && sym->AsStackSym()->IsArgSlotSym()) { return nullptr; } // Unless we have profile info, don't create a new value for ArgSlots and don't copy prop them away. if (sym->IsStackSym() && sym->AsStackSym()->IsParamSlotSym()) { if (!instr->m_func->IsLoopBody() && instr->m_func->HasProfileInfo()) { // Skip "this" pointer. int paramSlotNum = sym->AsStackSym()->GetParamSlotNum() - 2; if (paramSlotNum >= 0) { const auto parameterType = instr->m_func->GetReadOnlyProfileInfo()->GetParameterInfo(static_cast<Js::ArgSlot>(paramSlotNum)); val = NewGenericValue(parameterType); opnd->SetValueType(val->GetValueInfo()->Type()); return val; } } return nullptr; } if (!sym->IsPropertySym()) { break; } originalPropertySym = sym->AsPropertySym(); // Dont give a vale to 'arguments' property sym to prevent field copy prop of 'arguments' if (originalPropertySym->AsPropertySym()->m_propertyId == Js::PropertyIds::arguments && originalPropertySym->AsPropertySym()->m_fieldKind == PropertyKindData) { return nullptr; } Value *const objectValue = CurrentBlockData()->FindValue(originalPropertySym->m_stackSym); opnd->AsSymOpnd()->SetPropertyOwnerValueType( objectValue ? objectValue->GetValueInfo()->Type() : ValueType::Uninitialized); if (!FieldHoistOptSrc(opnd->AsSymOpnd(), instr, originalPropertySym)) { sym = this->CopyPropPropertySymObj(opnd->AsSymOpnd(), instr); // Consider: This doesn't detect hoistability of a property sym after object pointer copy prop // on loop prepass. But if it so happened that the property sym is hoisted, we might as well do so. if (originalPropertySym == sym || this->IsLoopPrePass() || !FieldHoistOptSrc(opnd->AsSymOpnd(), instr, sym->AsPropertySym())) { if (!DoFieldCopyProp()) { if (opnd->AsSymOpnd()->IsPropertySymOpnd()) { this->FinishOptPropOp(instr, opnd->AsPropertySymOpnd()); } return nullptr; } switch (instr->m_opcode) { // These need the symbolic reference to the field, don't copy prop the value of the field case Js::OpCode::DeleteFld: case Js::OpCode::DeleteRootFld: case Js::OpCode::DeleteFldStrict: case Js::OpCode::DeleteRootFldStrict: case Js::OpCode::ScopedDeleteFld: case Js::OpCode::ScopedDeleteFldStrict: case Js::OpCode::LdMethodFromFlags: case Js::OpCode::BrOnNoProperty: case Js::OpCode::BrOnHasProperty: case Js::OpCode::LdMethodFldPolyInlineMiss: case Js::OpCode::StSlotChkUndecl: return nullptr; }; if (instr->CallsGetter()) { return nullptr; } if (this->IsLoopPrePass() && this->DoFieldPRE(this->rootLoopPrePass)) { if (!this->prePassLoop->allFieldsKilled && !this->prePassLoop->fieldKilled->Test(sym->m_id)) { this->SetLoopFieldInitialValue(this->rootLoopPrePass, instr, sym->AsPropertySym(), originalPropertySym); } if (this->IsPREInstrCandidateLoad(instr->m_opcode)) { // Foreach property sym, remember the first instruction that loads it. // Can this be done in one call? if (!this->prePassInstrMap->ContainsKey(sym->m_id)) { this->prePassInstrMap->AddNew(sym->m_id, instr); } } } break; } } // We field hoisted, we can continue as a reg. opnd = instr->GetSrc1(); } case IR::OpndKindReg: // Clear the opnd's value type up-front, so that this code cannot accidentally use the value type set from a previous // OptSrc on the same instruction (for instance, from an earlier loop prepass). The value type will be set from the // value if available, before returning from this function. opnd->SetValueType(ValueType::Uninitialized); sym = opnd->AsRegOpnd()->m_sym; CurrentBlockData()->MarkTempLastUse(instr, opnd->AsRegOpnd()); if (sym->AsStackSym()->IsTypeSpec()) { sym = sym->AsStackSym()->GetVarEquivSym(this->func); } break; case IR::OpndKindIndir: this->OptimizeIndirUses(opnd->AsIndirOpnd(), &instr, indirIndexValRef); return nullptr; default: return nullptr; } val = CurrentBlockData()->FindValue(sym); if (val) { Assert(CurrentBlockData()->IsLive(sym) || (sym->IsPropertySym())); if (instr) { opnd = this->CopyProp(opnd, instr, val, parentIndirOpnd); } // Check if we freed the operand. if (opnd == nullptr) { return nullptr; } // In a loop prepass, determine stack syms that are used before they are defined in the root loop for which the prepass // is being done. This information is used to do type specialization conversions in the landing pad where appropriate. if(IsLoopPrePass() && sym->IsStackSym() && !rootLoopPrePass->symsUsedBeforeDefined->Test(sym->m_id) && rootLoopPrePass->landingPad->globOptData.IsLive(sym) && !isAsmJSFunc) // no typespec in asmjs and hence skipping this { Value *const landingPadValue = rootLoopPrePass->landingPad->globOptData.FindValue(sym); if(landingPadValue && val->GetValueNumber() == landingPadValue->GetValueNumber()) { rootLoopPrePass->symsUsedBeforeDefined->Set(sym->m_id); ValueInfo *landingPadValueInfo = landingPadValue->GetValueInfo(); if(landingPadValueInfo->IsLikelyNumber()) { rootLoopPrePass->likelyNumberSymsUsedBeforeDefined->Set(sym->m_id); if(DoAggressiveIntTypeSpec() ? landingPadValueInfo->IsLikelyInt() : landingPadValueInfo->IsInt()) { // Can only force int conversions in the landing pad based on likely-int values if aggressive int type // specialization is enabled. rootLoopPrePass->likelyIntSymsUsedBeforeDefined->Set(sym->m_id); } } #ifdef ENABLE_SIMDJS // SIMD_JS // For uses before defs, we set likelySimd128*SymsUsedBeforeDefined bits for syms that have landing pad value info that allow type-spec to happen in the loop body. // The BV will be added to loop header if the backedge has a live matching type-spec value. We then compensate in the loop header to unbox the value. // This allows type-spec in the landing pad instead of boxing/unboxing on each iteration. if (Js::IsSimd128Opcode(instr->m_opcode)) { // Simd ops are strongly typed. We type-spec only if the type is likely/Definitely the expected type or if we have object which can come from merging different Simd types. // Simd value must be initialized properly on all paths before the loop entry. Cannot be merged with Undefined/Null. ThreadContext::SimdFuncSignature funcSignature; instr->m_func->GetScriptContext()->GetThreadContext()->GetSimdFuncSignatureFromOpcode(instr->m_opcode, funcSignature); Assert(funcSignature.valid); ValueType expectedType = funcSignature.args[opnd == instr->GetSrc1() ? 0 : 1]; if (expectedType.IsSimd128Float32x4()) { if ( (landingPadValueInfo->IsLikelySimd128Float32x4() || (landingPadValueInfo->IsLikelyObject() && landingPadValueInfo->GetObjectType() == ObjectType::Object)) && !landingPadValueInfo->HasBeenUndefined() && !landingPadValueInfo->HasBeenNull() ) { rootLoopPrePass->likelySimd128F4SymsUsedBeforeDefined->Set(sym->m_id); } } else if (expectedType.IsSimd128Int32x4()) { if ( (landingPadValueInfo->IsLikelySimd128Int32x4() || (landingPadValueInfo->IsLikelyObject() && landingPadValueInfo->GetObjectType() == ObjectType::Object)) && !landingPadValueInfo->HasBeenUndefined() && !landingPadValueInfo->HasBeenNull() ) { rootLoopPrePass->likelySimd128I4SymsUsedBeforeDefined->Set(sym->m_id); } } } else if (instr->m_opcode == Js::OpCode::ExtendArg_A && opnd == instr->GetSrc1() && instr->GetDst()->GetValueType().IsSimd128()) { // Extended_Args for Simd ops are annotated with the expected type by the inliner. Use this info to find out if type-spec is supposed to happen. ValueType expectedType = instr->GetDst()->GetValueType(); if ((landingPadValueInfo->IsLikelySimd128Float32x4() || (landingPadValueInfo->IsLikelyObject() && landingPadValueInfo->GetObjectType() == ObjectType::Object)) && expectedType.IsSimd128Float32x4()) { rootLoopPrePass->likelySimd128F4SymsUsedBeforeDefined->Set(sym->m_id); } else if ((landingPadValueInfo->IsLikelySimd128Int32x4() || (landingPadValueInfo->IsLikelyObject() && landingPadValueInfo->GetObjectType() == ObjectType::Object)) && expectedType.IsSimd128Int32x4()) { rootLoopPrePass->likelySimd128I4SymsUsedBeforeDefined->Set(sym->m_id); } } #endif } } } else if ((instr->TransfersSrcValue() || OpCodeAttr::CanCSE(instr->m_opcode)) && (opnd == instr->GetSrc1() || opnd == instr->GetSrc2())) { if (sym->IsPropertySym()) { val = this->CreateFieldSrcValue(sym->AsPropertySym(), originalPropertySym, &opnd, instr); } else { val = this->NewGenericValue(ValueType::Uninitialized, opnd); } } if (opnd->IsSymOpnd() && opnd->AsSymOpnd()->IsPropertySymOpnd()) { TryOptimizeInstrWithFixedDataProperty(&instr); this->FinishOptPropOp(instr, opnd->AsPropertySymOpnd()); } if (val) { ValueType valueType(val->GetValueInfo()->Type()); // This block uses local profiling data to optimize the case of a native array being passed to a function that fills it with other types. When the function is inlined // into different call paths which use different types this can cause a perf hit by performing unnecessary array conversions, so only perform this optimization when // the function is not inlined. if (valueType.IsLikelyNativeArray() && !valueType.IsObject() && instr->IsProfiledInstr() && !instr->m_func->IsInlined()) { // See if we have profile data for the array type IR::ProfiledInstr *const profiledInstr = instr->AsProfiledInstr(); ValueType profiledArrayType; switch(instr->m_opcode) { case Js::OpCode::LdElemI_A: if(instr->GetSrc1()->IsIndirOpnd() && opnd == instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()) { profiledArrayType = profiledInstr->u.ldElemInfo->GetArrayType(); } break; case Js::OpCode::StElemI_A: case Js::OpCode::StElemI_A_Strict: case Js::OpCode::StElemC: if(instr->GetDst()->IsIndirOpnd() && opnd == instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()) { profiledArrayType = profiledInstr->u.stElemInfo->GetArrayType(); } break; case Js::OpCode::LdLen_A: if(instr->GetSrc1()->IsRegOpnd() && opnd == instr->GetSrc1()) { profiledArrayType = profiledInstr->u.ldElemInfo->GetArrayType(); } break; } if(profiledArrayType.IsLikelyObject() && profiledArrayType.GetObjectType() == valueType.GetObjectType() && (profiledArrayType.HasVarElements() || (valueType.HasIntElements() && profiledArrayType.HasFloatElements()))) { // Merge array type we pulled from profile with type propagated by dataflow. valueType = valueType.Merge(profiledArrayType).SetHasNoMissingValues(valueType.HasNoMissingValues()); ChangeValueType(this->currentBlock, CurrentBlockData()->FindValue(opnd->AsRegOpnd()->m_sym), valueType, false); } } opnd->SetValueType(valueType); if(!IsLoopPrePass() && opnd->IsSymOpnd() && valueType.IsDefinite()) { if (opnd->AsSymOpnd()->m_sym->IsPropertySym()) { // A property sym can only be guaranteed to have a definite value type when implicit calls are disabled from the // point where the sym was defined with the definite value type. Insert an instruction to indicate to the // dead-store pass that implicit calls need to be kept disabled until after this instruction. Assert(DoFieldCopyProp()); CaptureNoImplicitCallUses(opnd, false, instr); } } } else { opnd->SetValueType(ValueType::Uninitialized); } return val; } /* * GlobOpt::TryOptimizeInstrWithFixedDataProperty * Converts Ld[Root]Fld instr to * * CheckFixedFld * * Dst = Ld_A <int Constant value> * This API assumes that the source operand is a Sym/PropertySym kind. */ void GlobOpt::TryOptimizeInstrWithFixedDataProperty(IR::Instr ** const pInstr) { Assert(pInstr); IR::Instr * &instr = *pInstr; IR::Opnd * src1 = instr->GetSrc1(); Assert(src1 && src1->IsSymOpnd() && src1->AsSymOpnd()->IsPropertySymOpnd()); if(PHASE_OFF(Js::UseFixedDataPropsPhase, instr->m_func)) { return; } if (!this->IsLoopPrePass() && !this->isRecursiveCallOnLandingPad && OpCodeAttr::CanLoadFixedFields(instr->m_opcode)) { instr->TryOptimizeInstrWithFixedDataProperty(&instr, this); } } // Constant prop if possible, otherwise if this value already resides in another // symbol, reuse this previous symbol. This should help register allocation. IR::Opnd * GlobOpt::CopyProp(IR::Opnd *opnd, IR::Instr *instr, Value *val, IR::IndirOpnd *parentIndirOpnd) { Assert( parentIndirOpnd ? opnd == parentIndirOpnd->GetBaseOpnd() || opnd == parentIndirOpnd->GetIndexOpnd() : opnd == instr->GetSrc1() || opnd == instr->GetSrc2() || opnd == instr->GetDst() && opnd->IsIndirOpnd()); if (this->IsLoopPrePass()) { // Transformations are not legal in prepass... return opnd; } if (!this->func->DoGlobOptsForGeneratorFunc()) { // Don't copy prop in generator functions because non-bytecode temps that span a yield // cannot be saved and restored by the current bail-out mechanics utilized by generator // yield/resume. // TODO[generators][ianhall]: Enable copy-prop at least for in between yields. return opnd; } if (instr->m_opcode == Js::OpCode::CheckFixedFld || instr->m_opcode == Js::OpCode::CheckPropertyGuardAndLoadType) { // Don't copy prop into CheckFixedFld or CheckPropertyGuardAndLoadType return opnd; } // Don't copy-prop link operands of ExtendedArgs if (instr->m_opcode == Js::OpCode::ExtendArg_A && opnd == instr->GetSrc2()) { return opnd; } // Don't copy-prop operand of SIMD instr with ExtendedArg operands. Each instr should have its exclusive EA sequence. if ( Js::IsSimd128Opcode(instr->m_opcode) && instr->GetSrc1() != nullptr && instr->GetSrc1()->IsRegOpnd() && instr->GetSrc2() == nullptr ) { StackSym *sym = instr->GetSrc1()->GetStackSym(); if (sym && sym->IsSingleDef() && sym->GetInstrDef()->m_opcode == Js::OpCode::ExtendArg_A) { return opnd; } } ValueInfo *valueInfo = val->GetValueInfo(); if (this->func->HasFinally()) { // s0 = undefined was added on functions with early exit in try-finally functions, that can get copy-proped and case incorrect results if (instr->m_opcode == Js::OpCode::ArgOut_A_Inline && valueInfo->GetSymStore() && valueInfo->GetSymStore()->m_id == 0) { // We don't want to copy-prop s0 (return symbol) into inlinee code return opnd; } } // Constant prop? int32 intConstantValue; int64 int64ConstantValue; if (valueInfo->TryGetIntConstantValue(&intConstantValue)) { if (PHASE_OFF(Js::ConstPropPhase, this->func)) { return opnd; } if (( instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict || instr->m_opcode == Js::OpCode::StElemC ) && instr->GetSrc1() == opnd) { // Disabling prop to src of native array store, because we were losing the chance to type specialize. // Is it possible to type specialize this src if we allow constants, etc., to be prop'd here? if (instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetValueType().IsLikelyNativeArray()) { return opnd; } } if(opnd != instr->GetSrc1() && opnd != instr->GetSrc2()) { if(PHASE_OFF(Js::IndirCopyPropPhase, instr->m_func)) { return opnd; } // Const-prop an indir opnd's constant index into its offset IR::Opnd *srcs[] = { instr->GetSrc1(), instr->GetSrc2(), instr->GetDst() }; for(int i = 0; i < sizeof(srcs) / sizeof(srcs[0]); ++i) { const auto src = srcs[i]; if(!src || !src->IsIndirOpnd()) { continue; } const auto indir = src->AsIndirOpnd(); if ((int64)indir->GetOffset() + intConstantValue > INT32_MAX) { continue; } if(opnd == indir->GetIndexOpnd()) { Assert(indir->GetScale() == 0); GOPT_TRACE_OPND(opnd, _u("Constant prop indir index into offset (value: %d)\n"), intConstantValue); this->CaptureByteCodeSymUses(instr); indir->SetOffset(indir->GetOffset() + intConstantValue); indir->SetIndexOpnd(nullptr); } } return opnd; } if (Js::TaggedInt::IsOverflow(intConstantValue)) { return opnd; } IR::Opnd *constOpnd; if (opnd->IsVar()) { IR::AddrOpnd *addrOpnd = IR::AddrOpnd::New(Js::TaggedInt::ToVarUnchecked((int)intConstantValue), IR::AddrOpndKindConstantVar, instr->m_func); GOPT_TRACE_OPND(opnd, _u("Constant prop %d (value:%d)\n"), addrOpnd->m_address, intConstantValue); constOpnd = addrOpnd; } else { // Note: Jit loop body generates some i32 operands... Assert(opnd->IsInt32() || opnd->IsInt64() || opnd->IsUInt32()); IRType opndType; IntConstType constVal; if (opnd->IsUInt32()) { // avoid sign extension constVal = (uint32)intConstantValue; opndType = TyUint32; } else { constVal = intConstantValue; opndType = TyInt32; } IR::IntConstOpnd *intOpnd = IR::IntConstOpnd::New(constVal, opndType, instr->m_func); GOPT_TRACE_OPND(opnd, _u("Constant prop %d (value:%d)\n"), intOpnd->GetImmediateValue(instr->m_func), intConstantValue); constOpnd = intOpnd; } #if ENABLE_DEBUG_CONFIG_OPTIONS //Need to update DumpFieldCopyPropTestTrace for every new opcode that is added for fieldcopyprop if(Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::FieldCopyPropPhase)) { instr->DumpFieldCopyPropTestTrace(); } #endif this->CaptureByteCodeSymUses(instr); opnd = instr->ReplaceSrc(opnd, constOpnd); switch (instr->m_opcode) { case Js::OpCode::LdSlot: case Js::OpCode::LdSlotArr: case Js::OpCode::LdFld: case Js::OpCode::LdFldForTypeOf: case Js::OpCode::LdRootFldForTypeOf: case Js::OpCode::LdFldForCallApplyTarget: case Js::OpCode::LdRootFld: case Js::OpCode::LdMethodFld: case Js::OpCode::LdRootMethodFld: case Js::OpCode::LdMethodFromFlags: case Js::OpCode::ScopedLdMethodFld: instr->m_opcode = Js::OpCode::Ld_A; case Js::OpCode::Ld_A: { IR::Opnd * dst = instr->GetDst(); if (dst->IsRegOpnd() && dst->AsRegOpnd()->m_sym->IsSingleDef()) { dst->AsRegOpnd()->m_sym->SetIsIntConst((int)intConstantValue); } break; } case Js::OpCode::ArgOut_A: case Js::OpCode::ArgOut_A_Inline: case Js::OpCode::ArgOut_A_FixupForStackArgs: case Js::OpCode::ArgOut_A_InlineBuiltIn: if (instr->GetDst()->IsRegOpnd()) { Assert(instr->GetDst()->AsRegOpnd()->m_sym->m_isSingleDef); instr->GetDst()->AsRegOpnd()->m_sym->AsStackSym()->SetIsIntConst((int)intConstantValue); } else { instr->GetDst()->AsSymOpnd()->m_sym->AsStackSym()->SetIsIntConst((int)intConstantValue); } break; case Js::OpCode::TypeofElem: instr->m_opcode = Js::OpCode::Typeof; break; case Js::OpCode::StSlotChkUndecl: if (instr->GetSrc2() == opnd) { // Src2 here should refer to the same location as the Dst operand, which we need to keep live // due to the implicit read for ChkUndecl. instr->m_opcode = Js::OpCode::StSlot; instr->FreeSrc2(); opnd = nullptr; } break; } return opnd; } else if (valueInfo->TryGetIntConstantValue(&int64ConstantValue, false)) { if (PHASE_OFF(Js::ConstPropPhase, this->func) || !PHASE_ON(Js::Int64ConstPropPhase, this->func)) { return opnd; } Assert(this->func->GetJITFunctionBody()->IsWasmFunction()); if (this->func->GetJITFunctionBody()->IsWasmFunction() && opnd->IsInt64()) { IR::Int64ConstOpnd *intOpnd = IR::Int64ConstOpnd::New(int64ConstantValue, opnd->GetType(), instr->m_func); GOPT_TRACE_OPND(opnd, _u("Constant prop %lld (value:%lld)\n"), intOpnd->GetImmediateValue(instr->m_func), int64ConstantValue); this->CaptureByteCodeSymUses(instr); opnd = instr->ReplaceSrc(opnd, intOpnd); } return opnd; } Sym *opndSym = nullptr; if (opnd->IsRegOpnd()) { IR::RegOpnd *regOpnd = opnd->AsRegOpnd(); opndSym = regOpnd->m_sym; } else if (opnd->IsSymOpnd()) { IR::SymOpnd *symOpnd = opnd->AsSymOpnd(); opndSym = symOpnd->m_sym; } if (!opndSym) { return opnd; } if (PHASE_OFF(Js::CopyPropPhase, this->func)) { this->SetSymStoreDirect(valueInfo, opndSym); return opnd; } // We should have dealt with field hoist already Assert(!instr->TransfersSrcValue() || !opndSym->IsPropertySym() || !this->IsHoistedPropertySym(opndSym->AsPropertySym())); StackSym *copySym = CurrentBlockData()->GetCopyPropSym(opndSym, val); if (copySym != nullptr) { // Copy prop. return CopyPropReplaceOpnd(instr, opnd, copySym, parentIndirOpnd); } else { if (valueInfo->GetSymStore() && instr->m_opcode == Js::OpCode::Ld_A && instr->GetDst()->IsRegOpnd() && valueInfo->GetSymStore() == instr->GetDst()->AsRegOpnd()->m_sym) { // Avoid resetting symStore after fieldHoisting: // t1 = LdFld field <- set symStore to fieldHoistSym // fieldHoistSym = Ld_A t1 <- we're looking at t1 now, but want to copy-prop fieldHoistSym forward return opnd; } this->SetSymStoreDirect(valueInfo, opndSym); } return opnd; } IR::Opnd * GlobOpt::CopyPropReplaceOpnd(IR::Instr * instr, IR::Opnd * opnd, StackSym * copySym, IR::IndirOpnd *parentIndirOpnd) { Assert( parentIndirOpnd ? opnd == parentIndirOpnd->GetBaseOpnd() || opnd == parentIndirOpnd->GetIndexOpnd() : opnd == instr->GetSrc1() || opnd == instr->GetSrc2() || opnd == instr->GetDst() && opnd->IsIndirOpnd()); Assert(CurrentBlockData()->IsLive(copySym)); IR::RegOpnd *regOpnd; StackSym *newSym = copySym; GOPT_TRACE_OPND(opnd, _u("Copy prop s%d\n"), newSym->m_id); #if ENABLE_DEBUG_CONFIG_OPTIONS //Need to update DumpFieldCopyPropTestTrace for every new opcode that is added for fieldcopyprop if(Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::FieldCopyPropPhase)) { instr->DumpFieldCopyPropTestTrace(); } #endif this->CaptureByteCodeSymUses(instr); if (opnd->IsRegOpnd()) { regOpnd = opnd->AsRegOpnd(); regOpnd->m_sym = newSym; regOpnd->SetIsJITOptimizedReg(true); // The dead bit on the opnd is specific to the sym it is referencing. Since we replaced the sym, the bit is reset. regOpnd->SetIsDead(false); if(parentIndirOpnd) { return regOpnd; } } else { // If this is an object type specialized field load inside a loop, and it produces a type value which wasn't live // before, make sure the type check is left in the loop, because it may be the last type check in the loop protecting // other fields which are not hoistable and are lexically upstream in the loop. If the check is not ultimately // needed, the dead store pass will remove it. if (this->currentBlock->loop != nullptr && opnd->IsSymOpnd() && opnd->AsSymOpnd()->IsPropertySymOpnd()) { IR::PropertySymOpnd* propertySymOpnd = opnd->AsPropertySymOpnd(); if (CheckIfPropOpEmitsTypeCheck(instr, propertySymOpnd)) { // We only set guarded properties in the dead store pass, so they shouldn't be set here yet. If they were // we would need to move them from this operand to the operand which is being copy propagated. Assert(propertySymOpnd->GetGuardedPropOps() == nullptr); // We're creating a copy of this operand to be reused in the same spot in the flow, so we can copy all // flow sensitive fields. However, we will do only a type check here (no property access) and only for // the sake of downstream instructions, so the flags pertaining to this property access are irrelevant. IR::PropertySymOpnd* checkObjTypeOpnd = CreateOpndForTypeCheckOnly(propertySymOpnd, instr->m_func); IR::Instr* checkObjTypeInstr = IR::Instr::New(Js::OpCode::CheckObjType, instr->m_func); checkObjTypeInstr->SetSrc1(checkObjTypeOpnd); checkObjTypeInstr->SetByteCodeOffset(instr); instr->InsertBefore(checkObjTypeInstr); // Since we inserted this instruction before the one that is being processed in natural flow, we must process // it for object type spec explicitly here. FinishOptPropOp(checkObjTypeInstr, checkObjTypeOpnd); Assert(!propertySymOpnd->IsTypeChecked()); checkObjTypeInstr = this->SetTypeCheckBailOut(checkObjTypeOpnd, checkObjTypeInstr, nullptr); Assert(checkObjTypeInstr->HasBailOutInfo()); if (this->currentBlock->loop && !this->IsLoopPrePass()) { // Try hoisting this checkObjType. // But since this isn't the current instr being optimized, we need to play tricks with // the byteCodeUse fields... TrackByteCodeUsesForInstrAddedInOptInstr(checkObjTypeInstr, [&]() { TryHoistInvariant(checkObjTypeInstr, this->currentBlock, NULL, CurrentBlockData()->FindValue(copySym), NULL, true); }); } } } if (opnd->IsSymOpnd() && opnd->GetIsDead()) { // Take the property sym out of the live fields set this->EndFieldLifetime(opnd->AsSymOpnd()); } regOpnd = IR::RegOpnd::New(newSym, opnd->GetType(), instr->m_func); regOpnd->SetIsJITOptimizedReg(true); instr->ReplaceSrc(opnd, regOpnd); } switch (instr->m_opcode) { case Js::OpCode::Ld_A: if (instr->GetDst()->IsRegOpnd() && instr->GetSrc1()->IsRegOpnd() && instr->GetDst()->AsRegOpnd()->GetStackSym() == instr->GetSrc1()->AsRegOpnd()->GetStackSym()) { this->InsertByteCodeUses(instr, true); instr->m_opcode = Js::OpCode::Nop; } break; case Js::OpCode::LdSlot: case Js::OpCode::LdSlotArr: if (instr->GetDst()->IsRegOpnd() && instr->GetSrc1()->IsRegOpnd() && instr->GetDst()->AsRegOpnd()->GetStackSym() == instr->GetSrc1()->AsRegOpnd()->GetStackSym()) { this->InsertByteCodeUses(instr, true); instr->m_opcode = Js::OpCode::Nop; } else { instr->m_opcode = Js::OpCode::Ld_A; } break; case Js::OpCode::StSlotChkUndecl: if (instr->GetSrc2()->IsRegOpnd()) { // Src2 here should refer to the same location as the Dst operand, which we need to keep live // due to the implicit read for ChkUndecl. instr->m_opcode = Js::OpCode::StSlot; instr->FreeSrc2(); return nullptr; } break; case Js::OpCode::LdFld: case Js::OpCode::LdFldForTypeOf: case Js::OpCode::LdRootFldForTypeOf: case Js::OpCode::LdFldForCallApplyTarget: case Js::OpCode::LdRootFld: case Js::OpCode::LdMethodFld: case Js::OpCode::LdRootMethodFld: case Js::OpCode::ScopedLdMethodFld: instr->m_opcode = Js::OpCode::Ld_A; break; case Js::OpCode::LdMethodFromFlags: // The bailout is checked on the loop top and we don't need to check bailout again in loop. instr->m_opcode = Js::OpCode::Ld_A; instr->ClearBailOutInfo(); break; case Js::OpCode::TypeofElem: instr->m_opcode = Js::OpCode::Typeof; break; } CurrentBlockData()->MarkTempLastUse(instr, regOpnd); return regOpnd; } ValueNumber GlobOpt::NewValueNumber() { ValueNumber valueNumber = this->currentValue++; if (valueNumber == 0) { Js::Throw::OutOfMemory(); } return valueNumber; } Value *GlobOpt::NewValue(ValueInfo *const valueInfo) { return NewValue(NewValueNumber(), valueInfo); } Value *GlobOpt::NewValue(const ValueNumber valueNumber, ValueInfo *const valueInfo) { Assert(valueInfo); return Value::New(alloc, valueNumber, valueInfo); } Value *GlobOpt::CopyValue(Value const *const value) { return CopyValue(value, NewValueNumber()); } Value *GlobOpt::CopyValue(Value const *const value, const ValueNumber valueNumber) { Assert(value); return value->Copy(alloc, valueNumber); } Value * GlobOpt::NewGenericValue(const ValueType valueType) { return NewGenericValue(valueType, static_cast<IR::Opnd *>(nullptr)); } Value * GlobOpt::NewGenericValue(const ValueType valueType, IR::Opnd *const opnd) { // Shouldn't assign a likely-int value to something that is definitely not an int Assert(!(valueType.IsLikelyInt() && opnd && opnd->IsRegOpnd() && opnd->AsRegOpnd()->m_sym->m_isNotInt)); ValueInfo *valueInfo = ValueInfo::New(this->alloc, valueType); Value *val = NewValue(valueInfo); TrackNewValueForKills(val); CurrentBlockData()->InsertNewValue(val, opnd); return val; } Value * GlobOpt::NewGenericValue(const ValueType valueType, Sym *const sym) { ValueInfo *valueInfo = ValueInfo::New(this->alloc, valueType); Value *val = NewValue(valueInfo); TrackNewValueForKills(val); CurrentBlockData()->SetValue(val, sym); return val; } Value * GlobOpt::GetIntConstantValue(const int32 intConst, IR::Instr * instr, IR::Opnd *const opnd) { Value *value = nullptr; Value *const cachedValue = this->intConstantToValueMap->Lookup(intConst, nullptr); if(cachedValue) { // The cached value could be from a different block since this is a global (as opposed to a per-block) cache. Since // values are cloned for each block, we can't use the same value object. We also can't have two values with the same // number in one block, so we can't simply copy the cached value either. And finally, there is no deterministic and fast // way to determine if a value with the same value number exists for this block. So the best we can do with a global // cache is to check the sym-store's value in the current block to see if it has a value with the same number. // Otherwise, we have to create a new value with a new value number. Sym *const symStore = cachedValue->GetValueInfo()->GetSymStore(); if (symStore && CurrentBlockData()->IsLive(symStore)) { Value *const symStoreValue = CurrentBlockData()->FindValue(symStore); int32 symStoreIntConstantValue; if (symStoreValue && symStoreValue->GetValueNumber() == cachedValue->GetValueNumber() && symStoreValue->GetValueInfo()->TryGetIntConstantValue(&symStoreIntConstantValue) && symStoreIntConstantValue == intConst) { value = symStoreValue; } } } if (!value) { value = NewIntConstantValue(intConst, instr, !Js::TaggedInt::IsOverflow(intConst)); } return CurrentBlockData()->InsertNewValue(value, opnd); } Value * GlobOpt::GetIntConstantValue(const int64 intConst, IR::Instr * instr, IR::Opnd *const opnd) { Assert(instr->m_func->GetJITFunctionBody()->IsWasmFunction()); Value *value = nullptr; Value *const cachedValue = this->int64ConstantToValueMap->Lookup(intConst, nullptr); if (cachedValue) { // The cached value could be from a different block since this is a global (as opposed to a per-block) cache. Since // values are cloned for each block, we can't use the same value object. We also can't have two values with the same // number in one block, so we can't simply copy the cached value either. And finally, there is no deterministic and fast // way to determine if a value with the same value number exists for this block. So the best we can do with a global // cache is to check the sym-store's value in the current block to see if it has a value with the same number. // Otherwise, we have to create a new value with a new value number. Sym *const symStore = cachedValue->GetValueInfo()->GetSymStore(); if (symStore && this->currentBlock->globOptData.IsLive(symStore)) { Value *const symStoreValue = this->currentBlock->globOptData.FindValue(symStore); int64 symStoreIntConstantValue; if (symStoreValue && symStoreValue->GetValueNumber() == cachedValue->GetValueNumber() && symStoreValue->GetValueInfo()->TryGetInt64ConstantValue(&symStoreIntConstantValue, false) && symStoreIntConstantValue == intConst) { value = symStoreValue; } } } if (!value) { value = NewInt64ConstantValue(intConst, instr); } return this->currentBlock->globOptData.InsertNewValue(value, opnd); } Value * GlobOpt::NewInt64ConstantValue(const int64 intConst, IR::Instr* instr) { Value * value = NewValue(Int64ConstantValueInfo::New(this->alloc, intConst)); this->int64ConstantToValueMap->Item(intConst, value); if (!value->GetValueInfo()->GetSymStore() && (instr->m_opcode == Js::OpCode::LdC_A_I4 || instr->m_opcode == Js::OpCode::Ld_I4)) { StackSym * sym = instr->GetDst()->GetStackSym(); Assert(sym && !sym->IsTypeSpec()); this->currentBlock->globOptData.SetValue(value, sym); this->currentBlock->globOptData.liveVarSyms->Set(sym->m_id); } return value; } Value * GlobOpt::NewIntConstantValue(const int32 intConst, IR::Instr * instr, bool isTaggable) { Value * value = NewValue(IntConstantValueInfo::New(this->alloc, intConst)); this->intConstantToValueMap->Item(intConst, value); if (isTaggable && !PHASE_OFF(Js::HoistConstIntPhase, this->func)) { // When creating a new int constant value, make sure it gets a symstore. If the int const doesn't have a symstore, // any downstream instruction using the same int will have to create a new value (object) for the int. // This gets in the way of CSE. value = HoistConstantLoadAndPropagateValueBackward(Js::TaggedInt::ToVarUnchecked(intConst), instr, value); if (!value->GetValueInfo()->GetSymStore() && (instr->m_opcode == Js::OpCode::LdC_A_I4 || instr->m_opcode == Js::OpCode::Ld_I4)) { StackSym * sym = instr->GetDst()->GetStackSym(); Assert(sym); if (sym->IsTypeSpec()) { Assert(sym->IsInt32()); StackSym * varSym = sym->GetVarEquivSym(instr->m_func); CurrentBlockData()->SetValue(value, varSym); CurrentBlockData()->liveInt32Syms->Set(varSym->m_id); } else { CurrentBlockData()->SetValue(value, sym); CurrentBlockData()->liveVarSyms->Set(sym->m_id); } } } return value; } ValueInfo * GlobOpt::NewIntRangeValueInfo(const int32 min, const int32 max, const bool wasNegativeZeroPreventedByBailout) { return ValueInfo::NewIntRangeValueInfo(this->alloc, min, max, wasNegativeZeroPreventedByBailout); } ValueInfo *GlobOpt::NewIntRangeValueInfo( const ValueInfo *const originalValueInfo, const int32 min, const int32 max) const { Assert(originalValueInfo); ValueInfo *valueInfo; if(min == max) { // Since int constant values are const-propped, negative zero tracking does not track them, and so it's okay to ignore // 'wasNegativeZeroPreventedByBailout' valueInfo = IntConstantValueInfo::New(alloc, min); } else { valueInfo = IntRangeValueInfo::New( alloc, min, max, min <= 0 && max >= 0 && originalValueInfo->WasNegativeZeroPreventedByBailout()); } valueInfo->SetSymStore(originalValueInfo->GetSymStore()); return valueInfo; } Value * GlobOpt::NewIntRangeValue( const int32 min, const int32 max, const bool wasNegativeZeroPreventedByBailout, IR::Opnd *const opnd) { ValueInfo *valueInfo = this->NewIntRangeValueInfo(min, max, wasNegativeZeroPreventedByBailout); Value *val = NewValue(valueInfo); if (opnd) { GOPT_TRACE_OPND(opnd, _u("Range %d (0x%X) to %d (0x%X)\n"), min, min, max, max); } CurrentBlockData()->InsertNewValue(val, opnd); return val; } IntBoundedValueInfo *GlobOpt::NewIntBoundedValueInfo( const ValueInfo *const originalValueInfo, const IntBounds *const bounds) const { Assert(originalValueInfo); bounds->Verify(); IntBoundedValueInfo *const valueInfo = IntBoundedValueInfo::New( originalValueInfo->Type(), bounds, ( bounds->ConstantLowerBound() <= 0 && bounds->ConstantUpperBound() >= 0 && originalValueInfo->WasNegativeZeroPreventedByBailout() ), alloc); valueInfo->SetSymStore(originalValueInfo->GetSymStore()); return valueInfo; } Value *GlobOpt::NewIntBoundedValue( const ValueType valueType, const IntBounds *const bounds, const bool wasNegativeZeroPreventedByBailout, IR::Opnd *const opnd) { Value *const value = NewValue(IntBoundedValueInfo::New(valueType, bounds, wasNegativeZeroPreventedByBailout, alloc)); CurrentBlockData()->InsertNewValue(value, opnd); return value; } Value * GlobOpt::NewFloatConstantValue(const FloatConstType floatValue, IR::Opnd *const opnd) { FloatConstantValueInfo *valueInfo = FloatConstantValueInfo::New(this->alloc, floatValue); Value *val = NewValue(valueInfo); CurrentBlockData()->InsertNewValue(val, opnd); return val; } Value * GlobOpt::GetVarConstantValue(IR::AddrOpnd *addrOpnd) { bool isVar = addrOpnd->IsVar(); bool isString = isVar && addrOpnd->m_localAddress && JITJavascriptString::Is(addrOpnd->m_localAddress); Value *val = nullptr; Value *cachedValue = nullptr; if(this->addrConstantToValueMap->TryGetValue(addrOpnd->m_address, &cachedValue)) { // The cached value could be from a different block since this is a global (as opposed to a per-block) cache. Since // values are cloned for each block, we can't use the same value object. We also can't have two values with the same // number in one block, so we can't simply copy the cached value either. And finally, there is no deterministic and fast // way to determine if a value with the same value number exists for this block. So the best we can do with a global // cache is to check the sym-store's value in the current block to see if it has a value with the same number. // Otherwise, we have to create a new value with a new value number. Sym *symStore = cachedValue->GetValueInfo()->GetSymStore(); if(symStore && CurrentBlockData()->IsLive(symStore)) { Value *const symStoreValue = CurrentBlockData()->FindValue(symStore); if(symStoreValue && symStoreValue->GetValueNumber() == cachedValue->GetValueNumber()) { ValueInfo *const symStoreValueInfo = symStoreValue->GetValueInfo(); if(symStoreValueInfo->IsVarConstant() && symStoreValueInfo->AsVarConstant()->VarValue() == addrOpnd->m_address) { val = symStoreValue; } } } } else if (isString) { JITJavascriptString* jsString = JITJavascriptString::FromVar(addrOpnd->m_localAddress); Js::InternalString internalString(jsString->GetString(), jsString->GetLength()); if (this->stringConstantToValueMap->TryGetValue(internalString, &cachedValue)) { Sym *symStore = cachedValue->GetValueInfo()->GetSymStore(); if (symStore && CurrentBlockData()->IsLive(symStore)) { Value *const symStoreValue = CurrentBlockData()->FindValue(symStore); if (symStoreValue && symStoreValue->GetValueNumber() == cachedValue->GetValueNumber()) { ValueInfo *const symStoreValueInfo = symStoreValue->GetValueInfo(); if (symStoreValueInfo->IsVarConstant()) { JITJavascriptString * cachedString = JITJavascriptString::FromVar(symStoreValue->GetValueInfo()->AsVarConstant()->VarValue(true)); Js::InternalString cachedInternalString(cachedString->GetString(), cachedString->GetLength()); if (Js::InternalStringComparer::Equals(internalString, cachedInternalString)) { val = symStoreValue; } } } } } } if(!val) { val = NewVarConstantValue(addrOpnd, isString); } addrOpnd->SetValueType(val->GetValueInfo()->Type()); return val; } Value * GlobOpt::NewVarConstantValue(IR::AddrOpnd *addrOpnd, bool isString) { VarConstantValueInfo *valueInfo = VarConstantValueInfo::New(this->alloc, addrOpnd->m_address, addrOpnd->GetValueType(), false, addrOpnd->m_localAddress); Value * value = NewValue(valueInfo); this->addrConstantToValueMap->Item(addrOpnd->m_address, value); if (isString) { JITJavascriptString* jsString = JITJavascriptString::FromVar(addrOpnd->m_localAddress); Js::InternalString internalString(jsString->GetString(), jsString->GetLength()); this->stringConstantToValueMap->Item(internalString, value); } return value; } Value * GlobOpt::HoistConstantLoadAndPropagateValueBackward(Js::Var varConst, IR::Instr * origInstr, Value * value) { if (this->IsLoopPrePass() || ((this->currentBlock == this->func->m_fg->blockList) && origInstr->TransfersSrcValue())) { return value; } // Only hoisting taggable int const loads for now. Could be extended to other constants (floats, strings, addr opnds) if we see some benefit. Assert(Js::TaggedInt::Is(varConst)); // Insert a load of the constant at the top of the function StackSym * dstSym = StackSym::New(this->func); IR::RegOpnd * constRegOpnd = IR::RegOpnd::New(dstSym, TyVar, this->func); IR::Instr * loadInstr = IR::Instr::NewConstantLoad(constRegOpnd, (intptr_t)varConst, ValueType::GetInt(true), this->func); this->func->m_fg->blockList->GetFirstInstr()->InsertAfter(loadInstr); // Type-spec the load (Support for floats needs to be added when we start hoisting float constants). bool typeSpecedToInt = false; if (Js::TaggedInt::Is(varConst) && !IsTypeSpecPhaseOff(this->func)) { typeSpecedToInt = true; loadInstr->m_opcode = Js::OpCode::Ld_I4; ToInt32Dst(loadInstr, loadInstr->GetDst()->AsRegOpnd(), this->currentBlock); loadInstr->GetDst()->GetStackSym()->SetIsConst(); } else { CurrentBlockData()->liveVarSyms->Set(dstSym->m_id); } // Add the value (object) to the current block's symToValueMap and propagate the value backward to all relevant blocks so it is available on merges. value = CurrentBlockData()->InsertNewValue(value, constRegOpnd); BVSparse<JitArenaAllocator>* GlobOptBlockData::*bv; bv = typeSpecedToInt ? &GlobOptBlockData::liveInt32Syms : &GlobOptBlockData::liveVarSyms; // Will need to be expanded when we start hoisting float constants. if (this->currentBlock != this->func->m_fg->blockList) { for (InvariantBlockBackwardIterator it(this, this->currentBlock, this->func->m_fg->blockList, nullptr); it.IsValid(); it.MoveNext()) { BasicBlock * block = it.Block(); (block->globOptData.*bv)->Set(dstSym->m_id); Assert(!block->globOptData.FindValue(dstSym)); Value *const valueCopy = CopyValue(value, value->GetValueNumber()); block->globOptData.SetValue(valueCopy, dstSym); } } return value; } Value * GlobOpt::NewFixedFunctionValue(Js::JavascriptFunction *function, IR::AddrOpnd *addrOpnd) { Assert(function != nullptr); Value *val = nullptr; Value *cachedValue = nullptr; if(this->addrConstantToValueMap->TryGetValue(addrOpnd->m_address, &cachedValue)) { // The cached value could be from a different block since this is a global (as opposed to a per-block) cache. Since // values are cloned for each block, we can't use the same value object. We also can't have two values with the same // number in one block, so we can't simply copy the cached value either. And finally, there is no deterministic and fast // way to determine if a value with the same value number exists for this block. So the best we can do with a global // cache is to check the sym-store's value in the current block to see if it has a value with the same number. // Otherwise, we have to create a new value with a new value number. Sym *symStore = cachedValue->GetValueInfo()->GetSymStore(); if(symStore && CurrentBlockData()->IsLive(symStore)) { Value *const symStoreValue = CurrentBlockData()->FindValue(symStore); if(symStoreValue && symStoreValue->GetValueNumber() == cachedValue->GetValueNumber()) { ValueInfo *const symStoreValueInfo = symStoreValue->GetValueInfo(); if(symStoreValueInfo->IsVarConstant()) { VarConstantValueInfo *const symStoreVarConstantValueInfo = symStoreValueInfo->AsVarConstant(); if(symStoreVarConstantValueInfo->VarValue() == addrOpnd->m_address && symStoreVarConstantValueInfo->IsFunction()) { val = symStoreValue; } } } } } if(!val) { VarConstantValueInfo *valueInfo = VarConstantValueInfo::New(this->alloc, function, addrOpnd->GetValueType(), true, addrOpnd->m_localAddress); val = NewValue(valueInfo); this->addrConstantToValueMap->AddNew(addrOpnd->m_address, val); } CurrentBlockData()->InsertNewValue(val, addrOpnd); return val; } StackSym *GlobOpt::GetTaggedIntConstantStackSym(const int32 intConstantValue) const { Assert(!Js::TaggedInt::IsOverflow(intConstantValue)); return intConstantToStackSymMap->Lookup(intConstantValue, nullptr); } StackSym *GlobOpt::GetOrCreateTaggedIntConstantStackSym(const int32 intConstantValue) const { StackSym *stackSym = GetTaggedIntConstantStackSym(intConstantValue); if(stackSym) { return stackSym; } stackSym = StackSym::New(TyVar,func); intConstantToStackSymMap->Add(intConstantValue, stackSym); return stackSym; } Sym * GlobOpt::SetSymStore(ValueInfo *valueInfo, Sym *sym) { if (sym->IsStackSym()) { StackSym *stackSym = sym->AsStackSym(); if (stackSym->IsTypeSpec()) { stackSym = stackSym->GetVarEquivSym(this->func); sym = stackSym; } } if (valueInfo->GetSymStore() == nullptr || valueInfo->GetSymStore()->IsPropertySym()) { SetSymStoreDirect(valueInfo, sym); } return sym; } void GlobOpt::SetSymStoreDirect(ValueInfo * valueInfo, Sym * sym) { Sym * prevSymStore = valueInfo->GetSymStore(); CurrentBlockData()->SetChangedSym(prevSymStore); valueInfo->SetSymStore(sym); } // Figure out the Value of this dst. Value * GlobOpt::ValueNumberDst(IR::Instr **pInstr, Value *src1Val, Value *src2Val) { IR::Instr *&instr = *pInstr; IR::Opnd *dst = instr->GetDst(); Value *dstVal = nullptr; Sym *sym; if (instr->CallsSetter()) { return nullptr; } if (dst == nullptr) { return nullptr; } switch (dst->GetKind()) { case IR::OpndKindSym: sym = dst->AsSymOpnd()->m_sym; break; case IR::OpndKindReg: sym = dst->AsRegOpnd()->m_sym; if (OpCodeAttr::TempNumberProducing(instr->m_opcode)) { CurrentBlockData()->isTempSrc->Set(sym->m_id); } else if (OpCodeAttr::TempNumberTransfer(instr->m_opcode)) { IR::Opnd *src1 = instr->GetSrc1(); if (src1->IsRegOpnd() && CurrentBlockData()->isTempSrc->Test(src1->AsRegOpnd()->m_sym->m_id)) { StackSym *src1Sym = src1->AsRegOpnd()->m_sym; // isTempSrc is used for marking isTempLastUse, which is used to generate AddLeftDead() // calls instead of the normal Add helpers. It tells the runtime that concats can use string // builders. // We need to be careful in the case where src1 points to a string builder and is getting aliased. // Clear the bit on src and dst of the transfer instr in this case, unless we can prove src1 // isn't pointing at a string builder, like if it is single def and the def instr is not an Add, // but TempProducing. if (src1Sym->IsSingleDef() && src1Sym->m_instrDef->m_opcode != Js::OpCode::Add_A && OpCodeAttr::TempNumberProducing(src1Sym->m_instrDef->m_opcode)) { CurrentBlockData()->isTempSrc->Set(sym->m_id); } else { CurrentBlockData()->isTempSrc->Clear(src1->AsRegOpnd()->m_sym->m_id); CurrentBlockData()->isTempSrc->Clear(sym->m_id); } } else { CurrentBlockData()->isTempSrc->Clear(sym->m_id); } } else { CurrentBlockData()->isTempSrc->Clear(sym->m_id); } break; case IR::OpndKindIndir: return nullptr; default: return nullptr; } int32 min1, max1, min2, max2, newMin, newMax; ValueInfo *src1ValueInfo = (src1Val ? src1Val->GetValueInfo() : nullptr); ValueInfo *src2ValueInfo = (src2Val ? src2Val->GetValueInfo() : nullptr); switch (instr->m_opcode) { case Js::OpCode::Conv_PrimStr: AssertMsg(instr->GetDst()->GetValueType().IsString(), "Creator of this instruction should have set the type"); if (this->IsLoopPrePass() || src1ValueInfo == nullptr || !src1ValueInfo->IsPrimitive()) { break; } instr->m_opcode = Js::OpCode::Conv_Str; // fall-through case Js::OpCode::Conv_Str: // This opcode is commented out since we don't track regex information in GlobOpt now. //case Js::OpCode::Coerce_Regex: case Js::OpCode::Coerce_Str: AssertMsg(instr->GetDst()->GetValueType().IsString(), "Creator of this instruction should have set the type"); // fall-through case Js::OpCode::Coerce_StrOrRegex: // We don't set the ValueType of src1 for Coerce_StrOrRegex, hence skip the ASSERT if (this->IsLoopPrePass() || src1ValueInfo == nullptr || !src1ValueInfo->IsString()) { break; } instr->m_opcode = Js::OpCode::Ld_A; // fall-through case Js::OpCode::BytecodeArgOutCapture: case Js::OpCode::InitConst: case Js::OpCode::LdAsmJsFunc: case Js::OpCode::Ld_A: case Js::OpCode::Ld_I4: // Propagate sym attributes across the reg copy. if (!this->IsLoopPrePass() && instr->GetSrc1()->IsRegOpnd()) { if (dst->AsRegOpnd()->m_sym->IsSingleDef()) { dst->AsRegOpnd()->m_sym->CopySymAttrs(instr->GetSrc1()->AsRegOpnd()->m_sym); } } if (instr->IsProfiledInstr()) { const ValueType profiledValueType(instr->AsProfiledInstr()->u.FldInfo().valueType); if(!( profiledValueType.IsLikelyInt() && ( (dst->IsRegOpnd() && dst->AsRegOpnd()->m_sym->m_isNotInt) || (instr->GetSrc1()->IsRegOpnd() && instr->GetSrc1()->AsRegOpnd()->m_sym->m_isNotInt) ) )) { if(!src1ValueInfo) { dstVal = this->NewGenericValue(profiledValueType, dst); } else if(src1ValueInfo->IsUninitialized()) { if(IsLoopPrePass()) { dstVal = this->NewGenericValue(profiledValueType, dst); } else { // Assuming the profile data gives more precise value types based on the path it took at runtime, we // can improve the original value type. src1ValueInfo->Type() = profiledValueType; instr->GetSrc1()->SetValueType(profiledValueType); } } } } if (dstVal == nullptr) { // Ld_A is just transferring the value dstVal = this->ValueNumberTransferDst(instr, src1Val); } break; case Js::OpCode::ExtendArg_A: { // SIMD_JS // We avoid transforming EAs to Lds to keep the IR shape consistent and avoid CSEing of EAs. // CSEOptimize only assigns a Value to the EA dst, and doesn't turn it to a Ld. If this happened, we shouldn't assign a new Value here. if (DoCSE()) { IR::Opnd * currDst = instr->GetDst(); Value * currDstVal = CurrentBlockData()->FindValue(currDst->GetStackSym()); if (currDstVal != nullptr) { return currDstVal; } } break; } case Js::OpCode::CheckFixedFld: AssertMsg(false, "CheckFixedFld doesn't have a dst, so we should never get here"); break; case Js::OpCode::LdSlot: case Js::OpCode::LdSlotArr: case Js::OpCode::LdFld: case Js::OpCode::LdFldForTypeOf: case Js::OpCode::LdFldForCallApplyTarget: // Do not transfer value type on ldFldForTypeOf to prevent copy-prop to LdRootFld in case the field doesn't exist since LdRootFldForTypeOf does not throw //case Js::OpCode::LdRootFldForTypeOf: case Js::OpCode::LdRootFld: case Js::OpCode::LdMethodFld: case Js::OpCode::LdRootMethodFld: case Js::OpCode::ScopedLdMethodFld: case Js::OpCode::LdMethodFromFlags: if (instr->IsProfiledInstr()) { ValueType profiledValueType(instr->AsProfiledInstr()->u.FldInfo().valueType); if(!(profiledValueType.IsLikelyInt() && dst->IsRegOpnd() && dst->AsRegOpnd()->m_sym->m_isNotInt)) { if(!src1ValueInfo) { dstVal = this->NewGenericValue(profiledValueType, dst); } else if(src1ValueInfo->IsUninitialized()) { if(IsLoopPrePass() && (!dst->IsRegOpnd() || !dst->AsRegOpnd()->m_sym->IsSingleDef() || DoFieldHoisting())) { dstVal = this->NewGenericValue(profiledValueType, dst); } else { // Assuming the profile data gives more precise value types based on the path it took at runtime, we // can improve the original value type. src1ValueInfo->Type() = profiledValueType; instr->GetSrc1()->SetValueType(profiledValueType); } } } } if (dstVal == nullptr) { dstVal = this->ValueNumberTransferDst(instr, src1Val); } if(!this->IsLoopPrePass()) { // We cannot transfer value if the field hasn't been copy prop'd because we don't generate // an implicit call bailout between those values if we don't have "live fields" unless, we are hoisting the field. PropertySym *propertySym = instr->GetSrc1()->AsSymOpnd()->m_sym->AsPropertySym(); StackSym * fieldHoistSym; Loop * loop = this->FindFieldHoistStackSym(this->currentBlock->loop, propertySym->m_id, &fieldHoistSym, instr); ValueInfo *dstValueInfo = (dstVal ? dstVal->GetValueInfo() : nullptr); // Update symStore for field hoisting if (loop != nullptr && (dstValueInfo != nullptr)) { this->SetSymStoreDirect(dstValueInfo, fieldHoistSym); } // Update symStore if it isn't a stackSym if (dstVal && (!dstValueInfo->GetSymStore() || !dstValueInfo->GetSymStore()->IsStackSym())) { Assert(dst->IsRegOpnd()); this->SetSymStoreDirect(dstValueInfo, dst->AsRegOpnd()->m_sym); } if (src1Val != dstVal) { CurrentBlockData()->SetValue(dstVal, instr->GetSrc1()); } } break; case Js::OpCode::LdC_A_R8: case Js::OpCode::LdC_A_I4: case Js::OpCode::ArgIn_A: dstVal = src1Val; break; case Js::OpCode::LdStr: if (src1Val == nullptr) { src1Val = NewGenericValue(ValueType::String, dst); } dstVal = src1Val; break; // LdElemUndef only assign undef if the field doesn't exist. // So we don't actually know what the value is, so we can't really copy prop it. //case Js::OpCode::LdElemUndef: case Js::OpCode::StSlot: case Js::OpCode::StSlotChkUndecl: case Js::OpCode::StFld: case Js::OpCode::StRootFld: case Js::OpCode::StFldStrict: case Js::OpCode::StRootFldStrict: case Js::OpCode::InitFld: case Js::OpCode::InitComputedProperty: if (DoFieldCopyProp()) { if (src1Val == nullptr) { // src1 may have no value if it's not a valid var, e.g., NULL for let/const initialization. // Consider creating generic values for such things. return nullptr; } AssertMsg(!src2Val, "Bad src Values..."); Assert(sym->IsPropertySym()); SymID symId = sym->m_id; Assert(instr->m_opcode == Js::OpCode::StSlot || instr->m_opcode == Js::OpCode::StSlotChkUndecl || !CurrentBlockData()->liveFields->Test(symId)); if (IsHoistablePropertySym(symId)) { // We have changed the value of a hoistable field, load afterwards shouldn't get hoisted, // but we will still copy prop the pre-assign sym to it if we have a live value. Assert((instr->m_opcode == Js::OpCode::StSlot || instr->m_opcode == Js::OpCode::StSlotChkUndecl) && CurrentBlockData()->liveFields->Test(symId)); CurrentBlockData()->hoistableFields->Clear(symId); } CurrentBlockData()->liveFields->Set(symId); if (!this->IsLoopPrePass() && dst->GetIsDead()) { // Take the property sym out of the live fields set (with special handling for loops). this->EndFieldLifetime(dst->AsSymOpnd()); } dstVal = this->ValueNumberTransferDst(instr, src1Val); } else { return nullptr; } break; case Js::OpCode::Conv_Num: if(src1ValueInfo->IsNumber()) { dstVal = ValueNumberTransferDst(instr, src1Val); } else { return NewGenericValue(src1ValueInfo->Type().ToDefiniteAnyNumber(), dst); } break; case Js::OpCode::Not_A: { if (!src1Val || !src1ValueInfo->GetIntValMinMax(&min1, &max1, this->DoAggressiveIntTypeSpec())) { min1 = INT32_MIN; max1 = INT32_MAX; } this->PropagateIntRangeForNot(min1, max1, &newMin, &newMax); return CreateDstUntransferredIntValue(newMin, newMax, instr, src1Val, src2Val); } case Js::OpCode::Xor_A: case Js::OpCode::Or_A: case Js::OpCode::And_A: case Js::OpCode::Shl_A: case Js::OpCode::Shr_A: case Js::OpCode::ShrU_A: { if (!src1Val || !src1ValueInfo->GetIntValMinMax(&min1, &max1, this->DoAggressiveIntTypeSpec())) { min1 = INT32_MIN; max1 = INT32_MAX; } if (!src2Val || !src2ValueInfo->GetIntValMinMax(&min2, &max2, this->DoAggressiveIntTypeSpec())) { min2 = INT32_MIN; max2 = INT32_MAX; } if (instr->m_opcode == Js::OpCode::ShrU_A && min1 < 0 && IntConstantBounds(min2, max2).And_0x1f().Contains(0)) { // Src1 may be too large to represent as a signed int32, and src2 may be zero. // Since the result can therefore be too large to represent as a signed int32, // include Number in the value type. return CreateDstUntransferredValue( ValueType::AnyNumber.SetCanBeTaggedValue(true), instr, src1Val, src2Val); } this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax); return CreateDstUntransferredIntValue(newMin, newMax, instr, src1Val, src2Val); } case Js::OpCode::Incr_A: case Js::OpCode::Decr_A: { ValueType valueType; if(src1Val) { valueType = src1Val->GetValueInfo()->Type().ToDefiniteAnyNumber(); } else { valueType = ValueType::Number; } return CreateDstUntransferredValue(valueType, instr, src1Val, src2Val); } case Js::OpCode::Add_A: { ValueType valueType; if (src1Val && src1ValueInfo->IsLikelyNumber() && src2Val && src2ValueInfo->IsLikelyNumber()) { if(src1ValueInfo->IsLikelyInt() && src2ValueInfo->IsLikelyInt()) { // When doing aggressiveIntType, just assume the result is likely going to be int // if both input is int. const bool isLikelyTagged = src1ValueInfo->IsLikelyTaggedInt() && src2ValueInfo->IsLikelyTaggedInt(); if(src1ValueInfo->IsNumber() && src2ValueInfo->IsNumber()) { // If both of them are numbers then we can definitely say that the result is a number. valueType = ValueType::GetNumberAndLikelyInt(isLikelyTagged); } else { // This is only likely going to be int but can be a string as well. valueType = ValueType::GetInt(isLikelyTagged).ToLikely(); } } else { // We can only be certain of any thing if both of them are numbers. // Otherwise, the result could be string. if (src1ValueInfo->IsNumber() && src2ValueInfo->IsNumber()) { if (src1ValueInfo->IsFloat() || src2ValueInfo->IsFloat()) { // If one of them is a float, the result probably is a float instead of just int // but should always be a number. valueType = ValueType::Float; } else { // Could be int, could be number valueType = ValueType::Number; } } else if (src1ValueInfo->IsLikelyFloat() || src2ValueInfo->IsLikelyFloat()) { // Result is likely a float (but can be anything) valueType = ValueType::Float.ToLikely(); } else { // Otherwise it is a likely int or float (but can be anything) valueType = ValueType::Number.ToLikely(); } } } else if((src1Val && src1ValueInfo->IsString()) || (src2Val && src2ValueInfo->IsString())) { // String + anything should always result in a string valueType = ValueType::String; } else if((src1Val && src1ValueInfo->IsNotString() && src1ValueInfo->IsPrimitive()) && (src2Val && src2ValueInfo->IsNotString() && src2ValueInfo->IsPrimitive())) { // If src1 and src2 are not strings and primitive, add should yield a number. valueType = ValueType::Number; } else if((src1Val && src1ValueInfo->IsLikelyString()) || (src2Val && src2ValueInfo->IsLikelyString())) { // likelystring + anything should always result in a likelystring valueType = ValueType::String.ToLikely(); } else { // Number or string. Could make the value a merge of Number and String, but Uninitialized is more useful at the moment. Assert(valueType.IsUninitialized()); } return CreateDstUntransferredValue(valueType, instr, src1Val, src2Val); } case Js::OpCode::Div_A: { ValueType divValueType = GetDivValueType(instr, src1Val, src2Val, false); if (divValueType.IsLikelyInt() || divValueType.IsFloat()) { return CreateDstUntransferredValue(divValueType, instr, src1Val, src2Val); } } // fall-through case Js::OpCode::Sub_A: case Js::OpCode::Mul_A: case Js::OpCode::Rem_A: { ValueType valueType; if( src1Val && src1ValueInfo->IsLikelyInt() && src2Val && src2ValueInfo->IsLikelyInt() && instr->m_opcode != Js::OpCode::Div_A) { const bool isLikelyTagged = src1ValueInfo->IsLikelyTaggedInt() && (src2ValueInfo->IsLikelyTaggedInt() || instr->m_opcode == Js::OpCode::Rem_A); if(src1ValueInfo->IsNumber() && src2ValueInfo->IsNumber()) { valueType = ValueType::GetNumberAndLikelyInt(isLikelyTagged); } else { valueType = ValueType::GetInt(isLikelyTagged).ToLikely(); } } else if ((src1Val && src1ValueInfo->IsLikelyFloat()) || (src2Val && src2ValueInfo->IsLikelyFloat())) { // This should ideally be NewNumberAndLikelyFloatValue since we know the result is a number but not sure if it will // be a float value. However, that Number/LikelyFloat value type doesn't exist currently and all the necessary // checks are done for float values (tagged int checks, etc.) so it's sufficient to just create a float value here. valueType = ValueType::Float; } else { valueType = ValueType::Number; } return CreateDstUntransferredValue(valueType, instr, src1Val, src2Val); } case Js::OpCode::CallI: Assert(dst->IsRegOpnd()); return NewGenericValue(dst->AsRegOpnd()->GetValueType(), dst); case Js::OpCode::LdElemI_A: { dstVal = ValueNumberLdElemDst(pInstr, src1Val); const ValueType baseValueType(instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->GetValueType()); if( ( baseValueType.IsLikelyNativeArray() || #ifdef _M_IX86 ( !AutoSystemInfo::Data.SSE2Available() && baseValueType.IsLikelyObject() && ( baseValueType.GetObjectType() == ObjectType::Float32Array || baseValueType.GetObjectType() == ObjectType::Float64Array ) ) #else false #endif ) && instr->GetDst()->IsVar() && instr->HasBailOutInfo()) { // The lowerer is not going to generate a fast path for this case. Remove any bailouts that require the fast // path. Note that the removed bailouts should not be necessary for correctness. IR::BailOutKind bailOutKind = instr->GetBailOutKind(); if(bailOutKind & IR::BailOutOnArrayAccessHelperCall) { bailOutKind -= IR::BailOutOnArrayAccessHelperCall; } if(bailOutKind == IR::BailOutOnImplicitCallsPreOp) { bailOutKind -= IR::BailOutOnImplicitCallsPreOp; } if(bailOutKind) { instr->SetBailOutKind(bailOutKind); } else { instr->ClearBailOutInfo(); } } return dstVal; } case Js::OpCode::LdMethodElem: // Not worth profiling this, just assume it's likely object (should be likely function but ValueType does not track // functions currently, so using ObjectType::Object instead) dstVal = NewGenericValue(ValueType::GetObject(ObjectType::Object).ToLikely(), dst); if(instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->GetValueType().IsLikelyNativeArray() && instr->HasBailOutInfo()) { // The lowerer is not going to generate a fast path for this case. Remove any bailouts that require the fast // path. Note that the removed bailouts should not be necessary for correctness. IR::BailOutKind bailOutKind = instr->GetBailOutKind(); if(bailOutKind & IR::BailOutOnArrayAccessHelperCall) { bailOutKind -= IR::BailOutOnArrayAccessHelperCall; } if(bailOutKind == IR::BailOutOnImplicitCallsPreOp) { bailOutKind -= IR::BailOutOnImplicitCallsPreOp; } if(bailOutKind) { instr->SetBailOutKind(bailOutKind); } else { instr->ClearBailOutInfo(); } } return dstVal; case Js::OpCode::StElemI_A: case Js::OpCode::StElemI_A_Strict: dstVal = this->ValueNumberTransferDst(instr, src1Val); break; case Js::OpCode::LdLen_A: if (instr->IsProfiledInstr()) { const ValueType profiledValueType(instr->AsProfiledInstr()->u.ldElemInfo->GetElementType()); if(!(profiledValueType.IsLikelyInt() && dst->AsRegOpnd()->m_sym->m_isNotInt)) { return this->NewGenericValue(profiledValueType, dst); } } break; case Js::OpCode::BrOnEmpty: case Js::OpCode::BrOnNotEmpty: Assert(dst->IsRegOpnd()); Assert(dst->GetValueType().IsString()); return this->NewGenericValue(ValueType::String, dst); case Js::OpCode::IsInst: case Js::OpCode::LdTrue: case Js::OpCode::LdFalse: return this->NewGenericValue(ValueType::Boolean, dst); case Js::OpCode::LdUndef: return this->NewGenericValue(ValueType::Undefined, dst); case Js::OpCode::LdC_A_Null: return this->NewGenericValue(ValueType::Null, dst); case Js::OpCode::LdThis: if (!PHASE_OFF(Js::OptTagChecksPhase, this->func) && (src1ValueInfo == nullptr || src1ValueInfo->IsUninitialized())) { return this->NewGenericValue(ValueType::GetObject(ObjectType::Object), dst); } break; case Js::OpCode::Typeof: case Js::OpCode::TypeofElem: return this->NewGenericValue(ValueType::String, dst); case Js::OpCode::InitLocalClosure: Assert(instr->GetDst()); Assert(instr->GetDst()->IsRegOpnd()); IR::RegOpnd *regOpnd = instr->GetDst()->AsRegOpnd(); StackSym *opndStackSym = regOpnd->m_sym; Assert(opndStackSym != nullptr); ObjectSymInfo *objectSymInfo = opndStackSym->m_objectInfo; Assert(objectSymInfo != nullptr); for (PropertySym *localVarSlotList = objectSymInfo->m_propertySymList; localVarSlotList; localVarSlotList = localVarSlotList->m_nextInStackSymList) { this->slotSyms->Set(localVarSlotList->m_id); } break; } #ifdef ENABLE_SIMDJS // SIMD_JS if (Js::IsSimd128Opcode(instr->m_opcode) && !func->GetJITFunctionBody()->IsAsmJsMode()) { ThreadContext::SimdFuncSignature simdFuncSignature; instr->m_func->GetScriptContext()->GetThreadContext()->GetSimdFuncSignatureFromOpcode(instr->m_opcode, simdFuncSignature); return this->NewGenericValue(simdFuncSignature.returnType, dst); } #endif if (dstVal == nullptr) { return this->NewGenericValue(dst->GetValueType(), dst); } return CurrentBlockData()->SetValue(dstVal, dst); } Value * GlobOpt::ValueNumberLdElemDst(IR::Instr **pInstr, Value *srcVal) { IR::Instr *&instr = *pInstr; IR::Opnd *dst = instr->GetDst(); Value *dstVal = nullptr; int32 newMin, newMax; ValueInfo *srcValueInfo = (srcVal ? srcVal->GetValueInfo() : nullptr); ValueType profiledElementType; if (instr->IsProfiledInstr()) { profiledElementType = instr->AsProfiledInstr()->u.ldElemInfo->GetElementType(); if(!(profiledElementType.IsLikelyInt() && dst->IsRegOpnd() && dst->AsRegOpnd()->m_sym->m_isNotInt) && srcVal && srcValueInfo->IsUninitialized()) { if(IsLoopPrePass()) { dstVal = NewGenericValue(profiledElementType, dst); } else { // Assuming the profile data gives more precise value types based on the path it took at runtime, we // can improve the original value type. srcValueInfo->Type() = profiledElementType; instr->GetSrc1()->SetValueType(profiledElementType); } } } IR::IndirOpnd *src = instr->GetSrc1()->AsIndirOpnd(); const ValueType baseValueType(src->GetBaseOpnd()->GetValueType()); if (instr->DoStackArgsOpt(this->func) || !( baseValueType.IsLikelyOptimizedTypedArray() || (baseValueType.IsLikelyNativeArray() && instr->IsProfiledInstr()) // Specialized native array lowering for LdElem requires that it is profiled. ) || (!this->DoTypedArrayTypeSpec() && baseValueType.IsLikelyOptimizedTypedArray()) || // Don't do type spec on native array with a history of accessing gaps, as this is a bailout (!this->DoNativeArrayTypeSpec() && baseValueType.IsLikelyNativeArray()) || !ShouldExpectConventionalArrayIndexValue(src)) { if(DoTypedArrayTypeSpec() && !IsLoopPrePass()) { GOPT_TRACE_INSTR(instr, _u("Didn't specialize array access.\n")); if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; baseValueType.ToString(baseValueTypeStr); Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, did not type specialize, because %s.\n"), this->func->GetJITFunctionBody()->GetDisplayName(), this->func->GetDebugNumberSet(debugStringBuffer), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode), baseValueTypeStr, instr->DoStackArgsOpt(this->func) ? _u("instruction uses the arguments object") : baseValueType.IsLikelyOptimizedTypedArray() ? _u("index is negative or likely not int") : _u("of array type")); Output::Flush(); } } if(!dstVal) { if(srcVal) { dstVal = this->ValueNumberTransferDst(instr, srcVal); } else { dstVal = NewGenericValue(profiledElementType, dst); } } return dstVal; } Assert(instr->GetSrc1()->IsIndirOpnd()); IRType toType = TyVar; IR::BailOutKind bailOutKind = IR::BailOutConventionalTypedArrayAccessOnly; switch(baseValueType.GetObjectType()) { case ObjectType::Int8Array: case ObjectType::Int8VirtualArray: case ObjectType::Int8MixedArray: newMin = Int8ConstMin; newMax = Int8ConstMax; goto IntArrayCommon; case ObjectType::Uint8Array: case ObjectType::Uint8VirtualArray: case ObjectType::Uint8MixedArray: case ObjectType::Uint8ClampedArray: case ObjectType::Uint8ClampedVirtualArray: case ObjectType::Uint8ClampedMixedArray: newMin = Uint8ConstMin; newMax = Uint8ConstMax; goto IntArrayCommon; case ObjectType::Int16Array: case ObjectType::Int16VirtualArray: case ObjectType::Int16MixedArray: newMin = Int16ConstMin; newMax = Int16ConstMax; goto IntArrayCommon; case ObjectType::Uint16Array: case ObjectType::Uint16VirtualArray: case ObjectType::Uint16MixedArray: newMin = Uint16ConstMin; newMax = Uint16ConstMax; goto IntArrayCommon; case ObjectType::Int32Array: case ObjectType::Int32VirtualArray: case ObjectType::Int32MixedArray: case ObjectType::Uint32Array: // int-specialized loads from uint32 arrays will bail out on values that don't fit in an int32 case ObjectType::Uint32VirtualArray: case ObjectType::Uint32MixedArray: Int32Array: newMin = Int32ConstMin; newMax = Int32ConstMax; goto IntArrayCommon; IntArrayCommon: Assert(dst->IsRegOpnd()); // If int type spec is disabled, it is ok to load int values as they can help float type spec, and merging int32 with float64 => float64. // But if float type spec is also disabled, we'll have problems because float64 merged with var => float64... if (!this->DoAggressiveIntTypeSpec() && !this->DoFloatTypeSpec()) { if (!dstVal) { if (srcVal) { dstVal = this->ValueNumberTransferDst(instr, srcVal); } else { dstVal = NewGenericValue(profiledElementType, dst); } } return dstVal; } if (!this->IsLoopPrePass()) { if (instr->HasBailOutInfo()) { const IR::BailOutKind oldBailOutKind = instr->GetBailOutKind(); Assert( ( !(oldBailOutKind & ~IR::BailOutKindBits) || (oldBailOutKind & ~IR::BailOutKindBits) == IR::BailOutOnImplicitCallsPreOp ) && !(oldBailOutKind & IR::BailOutKindBits & ~(IR::BailOutOnArrayAccessHelperCall | IR::BailOutMarkTempObject))); if (bailOutKind == IR::BailOutConventionalTypedArrayAccessOnly) { // BailOutConventionalTypedArrayAccessOnly also bails out if the array access is outside the head // segment bounds, and guarantees no implicit calls. Override the bailout kind so that the instruction // bails out for the right reason. instr->SetBailOutKind( bailOutKind | (oldBailOutKind & (IR::BailOutKindBits - IR::BailOutOnArrayAccessHelperCall))); } else { // BailOutConventionalNativeArrayAccessOnly by itself may generate a helper call, and may cause implicit // calls to occur, so it must be merged in to eliminate generating the helper call Assert(bailOutKind == IR::BailOutConventionalNativeArrayAccessOnly); instr->SetBailOutKind(oldBailOutKind | bailOutKind); } } else { GenerateBailAtOperation(&instr, bailOutKind); } } TypeSpecializeIntDst(instr, instr->m_opcode, nullptr, nullptr, nullptr, bailOutKind, newMin, newMax, &dstVal); toType = TyInt32; break; case ObjectType::Float32Array: case ObjectType::Float32VirtualArray: case ObjectType::Float32MixedArray: case ObjectType::Float64Array: case ObjectType::Float64VirtualArray: case ObjectType::Float64MixedArray: Float64Array: Assert(dst->IsRegOpnd()); // If float type spec is disabled, don't load float64 values if (!this->DoFloatTypeSpec()) { if (!dstVal) { if (srcVal) { dstVal = this->ValueNumberTransferDst(instr, srcVal); } else { dstVal = NewGenericValue(profiledElementType, dst); } } return dstVal; } if (!this->IsLoopPrePass()) { if (instr->HasBailOutInfo()) { const IR::BailOutKind oldBailOutKind = instr->GetBailOutKind(); Assert( ( !(oldBailOutKind & ~IR::BailOutKindBits) || (oldBailOutKind & ~IR::BailOutKindBits) == IR::BailOutOnImplicitCallsPreOp ) && !(oldBailOutKind & IR::BailOutKindBits & ~(IR::BailOutOnArrayAccessHelperCall | IR::BailOutMarkTempObject))); if (bailOutKind == IR::BailOutConventionalTypedArrayAccessOnly) { // BailOutConventionalTypedArrayAccessOnly also bails out if the array access is outside the head // segment bounds, and guarantees no implicit calls. Override the bailout kind so that the instruction // bails out for the right reason. instr->SetBailOutKind( bailOutKind | (oldBailOutKind & (IR::BailOutKindBits - IR::BailOutOnArrayAccessHelperCall))); } else { // BailOutConventionalNativeArrayAccessOnly by itself may generate a helper call, and may cause implicit // calls to occur, so it must be merged in to eliminate generating the helper call Assert(bailOutKind == IR::BailOutConventionalNativeArrayAccessOnly); instr->SetBailOutKind(oldBailOutKind | bailOutKind); } } else { GenerateBailAtOperation(&instr, bailOutKind); } } TypeSpecializeFloatDst(instr, nullptr, nullptr, nullptr, &dstVal); toType = TyFloat64; break; default: Assert(baseValueType.IsLikelyNativeArray()); bailOutKind = IR::BailOutConventionalNativeArrayAccessOnly; if(baseValueType.HasIntElements()) { goto Int32Array; } Assert(baseValueType.HasFloatElements()); goto Float64Array; } if(!dstVal) { dstVal = NewGenericValue(profiledElementType, dst); } Assert(toType != TyVar); GOPT_TRACE_INSTR(instr, _u("Type specialized array access.\n")); if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; baseValueType.ToString(baseValueTypeStr); char dstValTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; dstVal->GetValueInfo()->Type().ToString(dstValTypeStr); Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, type specialized to %s producing %S"), this->func->GetJITFunctionBody()->GetDisplayName(), this->func->GetDebugNumberSet(debugStringBuffer), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode), baseValueTypeStr, toType == TyInt32 ? _u("int32") : _u("float64"), dstValTypeStr); #if DBG_DUMP Output::Print(_u(" (")); dstVal->Dump(); Output::Print(_u(").\n")); #else Output::Print(_u(".\n")); #endif Output::Flush(); } return dstVal; } ValueType GlobOpt::GetPrepassValueTypeForDst( const ValueType desiredValueType, IR::Instr *const instr, Value *const src1Value, Value *const src2Value, bool *const isValueInfoPreciseRef) const { // Values with definite types can be created in the loop prepass only when it is guaranteed that the value type will be the // same on any iteration of the loop. The heuristics currently used are: // - If the source sym is not live on the back-edge, then it acquires a new value for each iteration of the loop, so // that value type can be definite // - Consider: A better solution for this is to track values that originate in this loop, which can have definite value // types. That catches more cases, should look into that in the future. // - If the source sym has a constant value that doesn't change for the duration of the function // - The operation always results in a definite value type. For instance, signed bitwise operations always result in an // int32, conv_num and ++ always result in a number, etc. // - For operations that always result in an int32, the resulting int range is precise only if the source syms pass // the above heuristics. Otherwise, the range must be expanded to the full int32 range. Assert(IsLoopPrePass()); Assert(instr); if(isValueInfoPreciseRef) { *isValueInfoPreciseRef = false; } if(!desiredValueType.IsDefinite()) { return desiredValueType; } if((instr->GetSrc1() && !IsPrepassSrcValueInfoPrecise(instr->GetSrc1(), src1Value)) || (instr->GetSrc2() && !IsPrepassSrcValueInfoPrecise(instr->GetSrc2(), src2Value))) { // If the desired value type is not precise, the value type of the destination is derived from the value types of the // sources. Since the value type of a source sym is not definite, the destination value type also cannot be definite. if(desiredValueType.IsInt() && OpCodeAttr::IsInt32(instr->m_opcode)) { // The op always produces an int32, but not always a tagged int return ValueType::GetInt(desiredValueType.IsLikelyTaggedInt()); } if(desiredValueType.IsNumber() && OpCodeAttr::ProducesNumber(instr->m_opcode)) { // The op always produces a number, but not always an int return desiredValueType.ToDefiniteAnyNumber(); } return desiredValueType.ToLikely(); } if(isValueInfoPreciseRef) { // The produced value info is derived from the sources, which have precise value infos *isValueInfoPreciseRef = true; } return desiredValueType; } bool GlobOpt::IsPrepassSrcValueInfoPrecise(IR::Opnd *const src, Value *const srcValue) const { Assert(IsLoopPrePass()); Assert(src); if(!src->IsRegOpnd() || !srcValue) { return false; } ValueInfo *const srcValueInfo = srcValue->GetValueInfo(); if(!srcValueInfo->IsDefinite()) { return false; } StackSym *srcSym = src->AsRegOpnd()->m_sym; Assert(!srcSym->IsTypeSpec()); int32 intConstantValue; return srcSym->IsFromByteCodeConstantTable() || ( srcValueInfo->TryGetIntConstantValue(&intConstantValue) && !Js::TaggedInt::IsOverflow(intConstantValue) && GetTaggedIntConstantStackSym(intConstantValue) == srcSym ) || !currentBlock->loop->regAlloc.liveOnBackEdgeSyms->Test(srcSym->m_id); } Value *GlobOpt::CreateDstUntransferredIntValue( const int32 min, const int32 max, IR::Instr *const instr, Value *const src1Value, Value *const src2Value) { Assert(instr); Assert(instr->GetDst()); Assert(OpCodeAttr::ProducesNumber(instr->m_opcode) || (instr->m_opcode == Js::OpCode::Add_A && src1Value->GetValueInfo()->IsNumber() && src2Value->GetValueInfo()->IsNumber())); ValueType valueType(ValueType::GetInt(IntConstantBounds(min, max).IsLikelyTaggable())); Assert(valueType.IsInt()); bool isValueInfoPrecise; if(IsLoopPrePass()) { valueType = GetPrepassValueTypeForDst(valueType, instr, src1Value, src2Value, &isValueInfoPrecise); } else { isValueInfoPrecise = true; } IR::Opnd *const dst = instr->GetDst(); if(isValueInfoPrecise) { Assert(valueType == ValueType::GetInt(IntConstantBounds(min, max).IsLikelyTaggable())); Assert(!(dst->IsRegOpnd() && dst->AsRegOpnd()->m_sym->IsTypeSpec())); return NewIntRangeValue(min, max, false, dst); } return NewGenericValue(valueType, dst); } Value * GlobOpt::CreateDstUntransferredValue( const ValueType desiredValueType, IR::Instr *const instr, Value *const src1Value, Value *const src2Value) { Assert(instr); Assert(instr->GetDst()); Assert(!desiredValueType.IsInt()); // use CreateDstUntransferredIntValue instead ValueType valueType(desiredValueType); if(IsLoopPrePass()) { valueType = GetPrepassValueTypeForDst(valueType, instr, src1Value, src2Value); } return NewGenericValue(valueType, instr->GetDst()); } Value * GlobOpt::ValueNumberTransferDst(IR::Instr *const instr, Value * src1Val) { Value *dstVal = this->IsLoopPrePass() ? this->ValueNumberTransferDstInPrepass(instr, src1Val) : src1Val; // Don't copy-prop a temp over a user symbol. This is likely to extend the temp's lifetime, as the user symbol // is more likely to already have later references. // REVIEW: Enabling this does cause perf issues... #if 0 if (dstVal != src1Val) { return dstVal; } Sym *dstSym = dst->GetStackSym(); if (dstVal && dstSym && dstSym->IsStackSym() && !dstSym->AsStackSym()->m_isBytecodeTmp) { Sym *dstValSym = dstVal->GetValueInfo()->GetSymStore(); if (dstValSym && dstValSym->AsStackSym()->m_isBytecodeTmp /* src->GetIsDead()*/) { dstVal->GetValueInfo()->SetSymStore(dstSym); } } #endif return dstVal; } bool GlobOpt::IsSafeToTransferInPrePass(IR::Opnd *src, Value *srcValue) { if (this->DoFieldHoisting()) { return false; } if (src->IsRegOpnd()) { StackSym *srcSym = src->AsRegOpnd()->m_sym; if (srcSym->IsFromByteCodeConstantTable()) { return true; } ValueInfo *srcValueInfo = srcValue->GetValueInfo(); int32 srcIntConstantValue; if (srcValueInfo->TryGetIntConstantValue(&srcIntConstantValue) && !Js::TaggedInt::IsOverflow(srcIntConstantValue) && GetTaggedIntConstantStackSym(srcIntConstantValue) == srcSym) { return true; } } return false; } Value * GlobOpt::ValueNumberTransferDstInPrepass(IR::Instr *const instr, Value *const src1Val) { Value *dstVal = nullptr; if (!src1Val) { return nullptr; } bool isValueInfoPrecise; ValueInfo *const src1ValueInfo = src1Val->GetValueInfo(); // TODO: This conflicts with new values created by the type specialization code // We should re-enable if we change that code to avoid the new values. #if 0 if (this->IsSafeToTransferInPrePass(instr->GetSrc1(), src1Val)) { return src1Val; } if (this->IsPREInstrCandidateLoad(instr->m_opcode) && instr->GetDst()) { StackSym *dstSym = instr->GetDst()->AsRegOpnd()->m_sym; for (Loop *curLoop = this->currentBlock->loop; curLoop; curLoop = curLoop->parent) { if (curLoop->fieldPRESymStore->Test(dstSym->m_id)) { return src1Val; } } } if (!this->DoFieldHoisting()) { if (instr->GetDst()->IsRegOpnd()) { StackSym *stackSym = instr->GetDst()->AsRegOpnd()->m_sym; if (stackSym->IsSingleDef() || this->IsLive(stackSym, this->prePassLoop->landingPad)) { IntConstantBounds src1IntConstantBounds; if (src1ValueInfo->TryGetIntConstantBounds(&src1IntConstantBounds) && !( src1IntConstantBounds.LowerBound() == INT32_MIN && src1IntConstantBounds.UpperBound() == INT32_MAX )) { const ValueType valueType( GetPrepassValueTypeForDst(src1ValueInfo->Type(), instr, src1Val, nullptr, &isValueInfoPrecise)); if (isValueInfoPrecise) { return src1Val; } } else { return src1Val; } } } } #endif // Src1's value could change later in the loop, so the value wouldn't be the same for each // iteration. Since we don't iterate over loops "while (!changed)", go conservative on the // first pass when transferring a value that is live on the back-edge. // In prepass we are going to copy the value but with a different value number // for aggressive int type spec. const ValueType valueType(GetPrepassValueTypeForDst(src1ValueInfo->Type(), instr, src1Val, nullptr, &isValueInfoPrecise)); if(isValueInfoPrecise || (valueType == src1ValueInfo->Type() && src1ValueInfo->IsGeneric())) { Assert(valueType == src1ValueInfo->Type()); dstVal = CopyValue(src1Val); TrackCopiedValueForKills(dstVal); } else { dstVal = NewGenericValue(valueType); dstVal->GetValueInfo()->SetSymStore(src1ValueInfo->GetSymStore()); } return dstVal; } void GlobOpt::PropagateIntRangeForNot(int32 minimum, int32 maximum, int32 *pNewMin, int32* pNewMax) { int32 tmp; Int32Math::Not(minimum, pNewMin); *pNewMax = *pNewMin; Int32Math::Not(maximum, &tmp); *pNewMin = min(*pNewMin, tmp); *pNewMax = max(*pNewMax, tmp); } void GlobOpt::PropagateIntRangeBinary(IR::Instr *instr, int32 min1, int32 max1, int32 min2, int32 max2, int32 *pNewMin, int32* pNewMax) { int32 min, max, tmp, tmp2; min = INT32_MIN; max = INT32_MAX; switch (instr->m_opcode) { case Js::OpCode::Xor_A: case Js::OpCode::Or_A: // Find range with highest high order bit tmp = ::max((uint32)min1, (uint32)max1); tmp2 = ::max((uint32)min2, (uint32)max2); if ((uint32)tmp > (uint32)tmp2) { max = tmp; } else { max = tmp2; } if (max < 0) { min = INT32_MIN; // REVIEW: conservative... max = INT32_MAX; } else { // Turn values like 0x1010 into 0x1111 max = 1 << Math::Log2(max); max = (uint32)(max << 1) - 1; min = 0; } break; case Js::OpCode::And_A: if (min1 == INT32_MIN && min2 == INT32_MIN) { // Shortcut break; } // Find range with lowest higher bit tmp = ::max((uint32)min1, (uint32)max1); tmp2 = ::max((uint32)min2, (uint32)max2); if ((uint32)tmp < (uint32)tmp2) { min = min1; max = max1; } else { min = min2; max = max2; } // To compute max, look if min has higher high bit if ((uint32)min > (uint32)max) { max = min; } // If max is negative, max let's assume it could be -1, so result in MAX_INT if (max < 0) { max = INT32_MAX; } // If min is positive, the resulting min is zero if (min >= 0) { min = 0; } else { min = INT32_MIN; } break; case Js::OpCode::Shl_A: { // Shift count if (min2 != max2 && ((uint32)min2 > 0x1F || (uint32)max2 > 0x1F)) { min2 = 0; max2 = 0x1F; } else { min2 &= 0x1F; max2 &= 0x1F; } int32 min1FreeTopBitCount = min1 ? (sizeof(int32) * 8) - (Math::Log2(min1) + 1) : (sizeof(int32) * 8); int32 max1FreeTopBitCount = max1 ? (sizeof(int32) * 8) - (Math::Log2(max1) + 1) : (sizeof(int32) * 8); if (min1FreeTopBitCount <= max2 || max1FreeTopBitCount <= max2) { // If the shift is going to touch the sign bit return the max range min = INT32_MIN; max = INT32_MAX; } else { // Compute max // Turn values like 0x1010 into 0x1111 if (min1) { min1 = 1 << Math::Log2(min1); min1 = (min1 << 1) - 1; } if (max1) { max1 = 1 << Math::Log2(max1); max1 = (uint32)(max1 << 1) - 1; } if (max1 > 0) { int32 nrTopBits = (sizeof(int32) * 8) - Math::Log2(max1); if (nrTopBits < ::min(max2, 30)) max = INT32_MAX; else max = ::max((max1 << ::min(max2, 30)) & ~0x80000000, (min1 << min2) & ~0x80000000); } else { max = (max1 << min2) & ~0x80000000; } // Compute min if (min1 < 0) { min = ::min(min1 << max2, max1 << max2); } else { min = ::min(min1 << min2, max1 << max2); } // Turn values like 0x1110 into 0x1000 if (min) { min = 1 << Math::Log2(min); } } } break; case Js::OpCode::Shr_A: // Shift count if (min2 != max2 && ((uint32)min2 > 0x1F || (uint32)max2 > 0x1F)) { min2 = 0; max2 = 0x1F; } else { min2 &= 0x1F; max2 &= 0x1F; } // Compute max if (max1 < 0) { max = max1 >> max2; } else { max = max1 >> min2; } // Compute min if (min1 < 0) { min = min1 >> min2; } else { min = min1 >> max2; } break; case Js::OpCode::ShrU_A: // shift count is constant zero if ((min2 == max2) && (max2 & 0x1f) == 0) { // We can't encode uint32 result, so it has to be used as int32 only or the original value is positive. Assert(instr->ignoreIntOverflow || min1 >= 0); // We can transfer the signed int32 range. min = min1; max = max1; break; } const IntConstantBounds src2NewBounds = IntConstantBounds(min2, max2).And_0x1f(); // Zero is only allowed if result is always a signed int32 or always used as a signed int32 Assert(min1 >= 0 || instr->ignoreIntOverflow || !src2NewBounds.Contains(0)); min2 = src2NewBounds.LowerBound(); max2 = src2NewBounds.UpperBound(); Assert(min2 <= max2); // zero shift count is only allowed if result is used as int32 and/or value is positive Assert(min2 > 0 || instr->ignoreIntOverflow || min1 >= 0); uint32 umin1 = (uint32)min1; uint32 umax1 = (uint32)max1; if (umin1 > umax1) { uint32 temp = umax1; umax1 = umin1; umin1 = temp; } Assert(min2 >= 0 && max2 < 32); // Compute max if (min1 < 0) { umax1 = UINT32_MAX; } max = umax1 >> min2; // Compute min if (min1 <= 0 && max1 >=0) { min = 0; } else { min = umin1 >> max2; } // We should be able to fit uint32 range as int32 Assert(instr->ignoreIntOverflow || (min >= 0 && max >= 0) ); if (min > max) { // can only happen if shift count can be zero Assert(min2 == 0 && (instr->ignoreIntOverflow || min1 >= 0)); min = Int32ConstMin; max = Int32ConstMax; } break; } *pNewMin = min; *pNewMax = max; } IR::Instr * GlobOpt::TypeSpecialization( IR::Instr *instr, Value **pSrc1Val, Value **pSrc2Val, Value **pDstVal, bool *redoTypeSpecRef, bool *const forceInvariantHoistingRef) { Value *&src1Val = *pSrc1Val; Value *&src2Val = *pSrc2Val; *redoTypeSpecRef = false; Assert(!*forceInvariantHoistingRef); this->ignoredIntOverflowForCurrentInstr = false; this->ignoredNegativeZeroForCurrentInstr = false; // - Int32 values that can't be tagged are created as float constant values instead because a JavascriptNumber var is needed // for that value at runtime. For the purposes of type specialization, recover the int32 values so that they will be // treated as ints. // - If int overflow does not matter for the instruction, we can additionally treat uint32 values as int32 values because // the value resulting from the operation will eventually be converted to int32 anyway Value *const src1OriginalVal = src1Val; Value *const src2OriginalVal = src2Val; #ifdef ENABLE_SIMDJS // SIMD_JS if (TypeSpecializeSimd128(instr, pSrc1Val, pSrc2Val, pDstVal)) { return instr; } #endif if(!instr->ShouldCheckForIntOverflow()) { if(src1Val && src1Val->GetValueInfo()->IsFloatConstant()) { int32 int32Value; bool isInt32; if(Js::JavascriptNumber::TryGetInt32OrUInt32Value( src1Val->GetValueInfo()->AsFloatConstant()->FloatValue(), &int32Value, &isInt32)) { src1Val = GetIntConstantValue(int32Value, instr); if(!isInt32) { this->ignoredIntOverflowForCurrentInstr = true; } } } if(src2Val && src2Val->GetValueInfo()->IsFloatConstant()) { int32 int32Value; bool isInt32; if(Js::JavascriptNumber::TryGetInt32OrUInt32Value( src2Val->GetValueInfo()->AsFloatConstant()->FloatValue(), &int32Value, &isInt32)) { src2Val = GetIntConstantValue(int32Value, instr); if(!isInt32) { this->ignoredIntOverflowForCurrentInstr = true; } } } } const AutoRestoreVal autoRestoreSrc1Val(src1OriginalVal, &src1Val); const AutoRestoreVal autoRestoreSrc2Val(src2OriginalVal, &src2Val); if (src1Val && instr->GetSrc2() == nullptr) { // Unary // Note make sure that native array StElemI gets to TypeSpecializeStElem. Do this for typed arrays, too? int32 intConstantValue; if (!this->IsLoopPrePass() && !instr->IsBranchInstr() && src1Val->GetValueInfo()->TryGetIntConstantValue(&intConstantValue) && !( // Nothing to fold for element stores. Go into type specialization to see if they can at least be specialized. instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict || instr->m_opcode == Js::OpCode::StElemC || instr->m_opcode == Js::OpCode::MultiBr || instr->m_opcode == Js::OpCode::InlineArrayPop )) { if (OptConstFoldUnary(&instr, intConstantValue, src1Val == src1OriginalVal, pDstVal)) { return instr; } } else if (this->TypeSpecializeUnary( &instr, &src1Val, pDstVal, src1OriginalVal, redoTypeSpecRef, forceInvariantHoistingRef)) { return instr; } else if(*redoTypeSpecRef) { return instr; } } else if (instr->GetSrc2() && !instr->IsBranchInstr()) { // Binary if (!this->IsLoopPrePass()) { if (GetIsAsmJSFunc()) { if (CONFIG_FLAG(WasmFold)) { bool success = instr->GetSrc1()->IsInt64() ? this->OptConstFoldBinaryWasm<int64>(&instr, src1Val, src2Val, pDstVal) : this->OptConstFoldBinaryWasm<int>(&instr, src1Val, src2Val, pDstVal); if (success) { return instr; } } } else { // OptConstFoldBinary doesn't do type spec, so only deal with things we are sure are int (IntConstant and IntRange) // and not just likely ints TypeSpecializeBinary will deal with type specializing them and fold them again IntConstantBounds src1IntConstantBounds, src2IntConstantBounds; if (src1Val && src1Val->GetValueInfo()->TryGetIntConstantBounds(&src1IntConstantBounds)) { if (src2Val && src2Val->GetValueInfo()->TryGetIntConstantBounds(&src2IntConstantBounds)) { if (this->OptConstFoldBinary(&instr, src1IntConstantBounds, src2IntConstantBounds, pDstVal)) { return instr; } } } } } } if (instr->GetSrc2() && this->TypeSpecializeBinary(&instr, pSrc1Val, pSrc2Val, pDstVal, src1OriginalVal, src2OriginalVal, redoTypeSpecRef)) { if (!this->IsLoopPrePass() && instr->m_opcode != Js::OpCode::Nop && instr->m_opcode != Js::OpCode::Br && // We may have const fold a branch // Cannot const-peep if the result of the operation is required for a bailout check !(instr->HasBailOutInfo() && instr->GetBailOutKind() & IR::BailOutOnResultConditions)) { if (src1Val && src1Val->GetValueInfo()->HasIntConstantValue()) { if (this->OptConstPeep(instr, instr->GetSrc1(), pDstVal, src1Val->GetValueInfo())) { return instr; } } else if (src2Val && src2Val->GetValueInfo()->HasIntConstantValue()) { if (this->OptConstPeep(instr, instr->GetSrc2(), pDstVal, src2Val->GetValueInfo())) { return instr; } } } return instr; } else if(*redoTypeSpecRef) { return instr; } if (instr->IsBranchInstr() && !this->IsLoopPrePass()) { if (this->OptConstFoldBranch(instr, src1Val, src2Val, pDstVal)) { return instr; } } // We didn't type specialize, make sure the srcs are unspecialized IR::Opnd *src1 = instr->GetSrc1(); if (src1) { instr = this->ToVarUses(instr, src1, false, src1Val); IR::Opnd *src2 = instr->GetSrc2(); if (src2) { instr = this->ToVarUses(instr, src2, false, src2Val); } } IR::Opnd *dst = instr->GetDst(); if (dst) { instr = this->ToVarUses(instr, dst, true, nullptr); // Handling for instructions other than built-ins that may require only dst type specialization // should be added here. if(OpCodeAttr::IsInlineBuiltIn(instr->m_opcode) && !GetIsAsmJSFunc()) // don't need to do typespec for asmjs { this->TypeSpecializeInlineBuiltInDst(&instr, pDstVal); return instr; } // Clear the int specialized bit on the dst. if (dst->IsRegOpnd()) { IR::RegOpnd *dstRegOpnd = dst->AsRegOpnd(); if (!dstRegOpnd->m_sym->IsTypeSpec()) { this->ToVarRegOpnd(dstRegOpnd, this->currentBlock); } else if (dstRegOpnd->m_sym->IsInt32()) { this->ToInt32Dst(instr, dstRegOpnd, this->currentBlock); } else if (dstRegOpnd->m_sym->IsUInt32() && GetIsAsmJSFunc()) { this->ToUInt32Dst(instr, dstRegOpnd, this->currentBlock); } else if (dstRegOpnd->m_sym->IsFloat64()) { this->ToFloat64Dst(instr, dstRegOpnd, this->currentBlock); } } else if (dst->IsSymOpnd() && dst->AsSymOpnd()->m_sym->IsStackSym()) { this->ToVarStackSym(dst->AsSymOpnd()->m_sym->AsStackSym(), this->currentBlock); } } return instr; } bool GlobOpt::OptConstPeep(IR::Instr *instr, IR::Opnd *constSrc, Value **pDstVal, ValueInfo *valuInfo) { int32 value; IR::Opnd *src; IR::Opnd *nonConstSrc = (constSrc == instr->GetSrc1() ? instr->GetSrc2() : instr->GetSrc1()); // Try to find the value from value info first if (valuInfo->TryGetIntConstantValue(&value)) { } else if (constSrc->IsAddrOpnd()) { IR::AddrOpnd *addrOpnd = constSrc->AsAddrOpnd(); #ifdef _M_X64 Assert(addrOpnd->IsVar() || Math::FitsInDWord((size_t)addrOpnd->m_address)); #else Assert(sizeof(value) == sizeof(addrOpnd->m_address)); #endif if (addrOpnd->IsVar()) { value = Js::TaggedInt::ToInt32(addrOpnd->m_address); } else { // We asserted that the address will fit in a DWORD above value = ::Math::PointerCastToIntegral<int32>(constSrc->AsAddrOpnd()->m_address); } } else if (constSrc->IsIntConstOpnd()) { value = constSrc->AsIntConstOpnd()->AsInt32(); } else { return false; } switch(instr->m_opcode) { // Can't do all Add_A because of string concats. // Sub_A cannot be transformed to a NEG_A because 0 - 0 != -0 case Js::OpCode::Add_A: src = nonConstSrc; if (!src->GetValueType().IsInt()) { // 0 + -0 != -0 // "Foo" + 0 != "Foo return false; } // fall-through case Js::OpCode::Add_I4: if (value != 0) { return false; } if (constSrc == instr->GetSrc1()) { src = instr->GetSrc2(); } else { src = instr->GetSrc1(); } break; case Js::OpCode::Mul_A: case Js::OpCode::Mul_I4: if (value == 0) { // -0 * 0 != 0 return false; } else if (value == 1) { src = nonConstSrc; } else { return false; } break; case Js::OpCode::Div_A: if (value == 1 && constSrc == instr->GetSrc2()) { src = instr->GetSrc1(); } else { return false; } break; case Js::OpCode::Or_I4: if (value == -1) { src = constSrc; } else if (value == 0) { src = nonConstSrc; } else { return false; } break; case Js::OpCode::And_I4: if (value == -1) { src = nonConstSrc; } else if (value == 0) { src = constSrc; } else { return false; } break; case Js::OpCode::Shl_I4: case Js::OpCode::ShrU_I4: case Js::OpCode::Shr_I4: if (value != 0 || constSrc != instr->GetSrc2()) { return false; } src = instr->GetSrc1(); break; default: return false; } this->CaptureByteCodeSymUses(instr); if (src == instr->GetSrc1()) { instr->FreeSrc2(); } else { Assert(src == instr->GetSrc2()); instr->ReplaceSrc1(instr->UnlinkSrc2()); } instr->m_opcode = Js::OpCode::Ld_A; return true; } Js::Var // TODO: michhol OOP JIT, shouldn't play with Vars GlobOpt::GetConstantVar(IR::Opnd *opnd, Value *val) { ValueInfo *valueInfo = val->GetValueInfo(); if (valueInfo->IsVarConstant() && valueInfo->IsPrimitive()) { return valueInfo->AsVarConstant()->VarValue(); } if (opnd->IsAddrOpnd()) { IR::AddrOpnd *addrOpnd = opnd->AsAddrOpnd(); if (addrOpnd->IsVar()) { return addrOpnd->m_address; } } else if (opnd->IsIntConstOpnd()) { if (!Js::TaggedInt::IsOverflow(opnd->AsIntConstOpnd()->AsInt32())) { return Js::TaggedInt::ToVarUnchecked(opnd->AsIntConstOpnd()->AsInt32()); } } else if (opnd->IsRegOpnd() && opnd->AsRegOpnd()->m_sym->IsSingleDef()) { if (valueInfo->IsBoolean()) { IR::Instr * defInstr = opnd->AsRegOpnd()->m_sym->GetInstrDef(); if (defInstr->m_opcode != Js::OpCode::Ld_A || !defInstr->GetSrc1()->IsAddrOpnd()) { return nullptr; } Assert(defInstr->GetSrc1()->AsAddrOpnd()->IsVar()); return defInstr->GetSrc1()->AsAddrOpnd()->m_address; } else if (valueInfo->IsUndefined()) { return (Js::Var)this->func->GetScriptContextInfo()->GetUndefinedAddr(); } else if (valueInfo->IsNull()) { return (Js::Var)this->func->GetScriptContextInfo()->GetNullAddr(); } } return nullptr; } bool BoolAndIntStaticAndTypeMismatch(Value* src1Val, Value* src2Val, Js::Var src1Var, Js::Var src2Var) { ValueInfo *src1ValInfo = src1Val->GetValueInfo(); ValueInfo *src2ValInfo = src2Val->GetValueInfo(); return (src1ValInfo->IsNumber() && src1Var && src2ValInfo->IsBoolean() && src1Var != Js::TaggedInt::ToVarUnchecked(0) && src1Var != Js::TaggedInt::ToVarUnchecked(1)) || (src2ValInfo->IsNumber() && src2Var && src1ValInfo->IsBoolean() && src2Var != Js::TaggedInt::ToVarUnchecked(0) && src2Var != Js::TaggedInt::ToVarUnchecked(1)); } bool GlobOpt::OptConstFoldBranch(IR::Instr *instr, Value *src1Val, Value*src2Val, Value **pDstVal) { if (!src1Val) { return false; } int64 left64, right64; Js::Var src1Var = this->GetConstantVar(instr->GetSrc1(), src1Val); Js::Var src2Var = nullptr; if (instr->GetSrc2()) { if (!src2Val) { return false; } src2Var = this->GetConstantVar(instr->GetSrc2(), src2Val); } auto AreSourcesEqual = [&](Value * val1, Value * val2) -> bool { // NaN !== NaN, and objects can have valueOf/toString return val1->IsEqualTo(val2) && val1->GetValueInfo()->IsPrimitive() && val1->GetValueInfo()->IsNotFloat(); }; // Make sure GetConstantVar only returns primitives. // TODO: OOP JIT, enabled these asserts //Assert(!src1Var || !Js::JavascriptOperators::IsObject(src1Var)); //Assert(!src2Var || !Js::JavascriptOperators::IsObject(src2Var)); BOOL result; int32 constVal; switch (instr->m_opcode) { #define BRANCH(OPCODE,CMP,TYPE,UNSIGNEDNESS) \ case Js::OpCode::##OPCODE: \ if (src1Val->GetValueInfo()->TryGetInt64ConstantValue(&left64, UNSIGNEDNESS) && \ src2Val->GetValueInfo()->TryGetInt64ConstantValue(&right64, UNSIGNEDNESS)) \ { \ result = (TYPE)left64 CMP (TYPE)right64; \ } \ else if (AreSourcesEqual(src1Val, src2Val)) \ { \ result = 0 CMP 0; \ } \ else \ { \ return false; \ } \ break; BRANCH(BrEq_I4, == , int64, false) BRANCH(BrGe_I4, >= , int64, false) BRANCH(BrGt_I4, >, int64, false) BRANCH(BrLt_I4, <, int64, false) BRANCH(BrLe_I4, <= , int64, false) BRANCH(BrNeq_I4, != , int64, false) BRANCH(BrUnGe_I4, >= , uint64, true) BRANCH(BrUnGt_I4, >, uint64, true) BRANCH(BrUnLt_I4, <, uint64, true) BRANCH(BrUnLe_I4, <= , uint64, true) case Js::OpCode::BrEq_A: case Js::OpCode::BrNotNeq_A: if (!src1Var || !src2Var) { if (BoolAndIntStaticAndTypeMismatch(src1Val, src2Val, src1Var, src2Var)) { result = false; } else if (AreSourcesEqual(src1Val, src2Val)) { result = true; } else { return false; } } else { if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts)) { // TODO: OOP JIT, const folding return false; } result = Js::JavascriptOperators::Equal(src1Var, src2Var, this->func->GetScriptContext()); } break; case Js::OpCode::BrNeq_A: case Js::OpCode::BrNotEq_A: if (!src1Var || !src2Var) { if (BoolAndIntStaticAndTypeMismatch(src1Val, src2Val, src1Var, src2Var)) { result = true; } else if (AreSourcesEqual(src1Val, src2Val)) { result = false; } else { return false; } } else { if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts)) { // TODO: OOP JIT, const folding return false; } result = Js::JavascriptOperators::NotEqual(src1Var, src2Var, this->func->GetScriptContext()); } break; case Js::OpCode::BrSrEq_A: case Js::OpCode::BrSrNotNeq_A: if (!src1Var || !src2Var) { ValueInfo *src1ValInfo = src1Val->GetValueInfo(); ValueInfo *src2ValInfo = src2Val->GetValueInfo(); if ( (src1ValInfo->IsUndefined() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenUndefined()) || (src1ValInfo->IsNull() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenNull()) || (src1ValInfo->IsBoolean() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenBoolean()) || (src1ValInfo->IsNumber() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenNumber()) || (src1ValInfo->IsString() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenString()) || (src2ValInfo->IsUndefined() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenUndefined()) || (src2ValInfo->IsNull() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenNull()) || (src2ValInfo->IsBoolean() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenBoolean()) || (src2ValInfo->IsNumber() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenNumber()) || (src2ValInfo->IsString() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenString()) ) { result = false; } else if (AreSourcesEqual(src1Val, src2Val)) { result = true; } else { return false; } } else { if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts)) { // TODO: OOP JIT, const folding return false; } result = Js::JavascriptOperators::StrictEqual(src1Var, src2Var, this->func->GetScriptContext()); } break; case Js::OpCode::BrSrNeq_A: case Js::OpCode::BrSrNotEq_A: if (!src1Var || !src2Var) { ValueInfo *src1ValInfo = src1Val->GetValueInfo(); ValueInfo *src2ValInfo = src2Val->GetValueInfo(); if ( (src1ValInfo->IsUndefined() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenUndefined()) || (src1ValInfo->IsNull() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenNull()) || (src1ValInfo->IsBoolean() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenBoolean()) || (src1ValInfo->IsNumber() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenNumber()) || (src1ValInfo->IsString() && src2ValInfo->IsDefinite() && !src2ValInfo->HasBeenString()) || (src2ValInfo->IsUndefined() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenUndefined()) || (src2ValInfo->IsNull() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenNull()) || (src2ValInfo->IsBoolean() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenBoolean()) || (src2ValInfo->IsNumber() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenNumber()) || (src2ValInfo->IsString() && src1ValInfo->IsDefinite() && !src1ValInfo->HasBeenString()) ) { result = true; } else if (AreSourcesEqual(src1Val, src2Val)) { result = false; } else { return false; } } else { if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts)) { // TODO: OOP JIT, const folding return false; } result = Js::JavascriptOperators::NotStrictEqual(src1Var, src2Var, this->func->GetScriptContext()); } break; case Js::OpCode::BrFalse_A: case Js::OpCode::BrTrue_A: { ValueInfo *const src1ValueInfo = src1Val->GetValueInfo(); if(src1ValueInfo->IsNull() || src1ValueInfo->IsUndefined()) { result = instr->m_opcode == Js::OpCode::BrFalse_A; break; } if(src1ValueInfo->IsObject() && src1ValueInfo->GetObjectType() > ObjectType::Object) { // Specific object types that are tracked are equivalent to 'true' result = instr->m_opcode == Js::OpCode::BrTrue_A; break; } if (func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts)) { // TODO: OOP JIT, const folding return false; } if (!src1Var) { return false; } result = Js::JavascriptConversion::ToBoolean(src1Var, this->func->GetScriptContext()); if(instr->m_opcode == Js::OpCode::BrFalse_A) { result = !result; } break; } case Js::OpCode::BrFalse_I4: // this path would probably work outside of asm.js, but we should verify that if we ever hit this scenario Assert(GetIsAsmJSFunc()); constVal = 0; if (!src1Val->GetValueInfo()->TryGetIntConstantValue(&constVal)) { return false; } result = constVal == 0; break; default: return false; #undef BRANCH } this->OptConstFoldBr(!!result, instr); return true; } bool GlobOpt::OptConstFoldUnary( IR::Instr * *pInstr, const int32 intConstantValue, const bool isUsingOriginalSrc1Value, Value **pDstVal) { IR::Instr * &instr = *pInstr; int32 value = 0; IR::Opnd *constOpnd; bool isInt = true; bool doSetDstVal = true; FloatConstType fValue = 0.0; if (!DoConstFold()) { return false; } if (instr->GetDst() && !instr->GetDst()->IsRegOpnd()) { return false; } switch(instr->m_opcode) { case Js::OpCode::Neg_A: if (intConstantValue == 0) { // Could fold to -0.0 return false; } if (Int32Math::Neg(intConstantValue, &value)) { return false; } break; case Js::OpCode::Not_A: Int32Math::Not(intConstantValue, &value); break; case Js::OpCode::Ld_A: if (instr->HasBailOutInfo()) { //The profile data for switch expr can be string and in GlobOpt we realize it is an int. if(instr->GetBailOutKind() == IR::BailOutExpectingString) { throw Js::RejitException(RejitReason::DisableSwitchOptExpectingString); } Assert(instr->GetBailOutKind() == IR::BailOutExpectingInteger); instr->ClearBailOutInfo(); } value = intConstantValue; if(isUsingOriginalSrc1Value) { doSetDstVal = false; // Let OptDst do it by copying src1Val } break; case Js::OpCode::Conv_Num: case Js::OpCode::LdC_A_I4: value = intConstantValue; if(isUsingOriginalSrc1Value) { doSetDstVal = false; // Let OptDst do it by copying src1Val } break; case Js::OpCode::Incr_A: if (Int32Math::Inc(intConstantValue, &value)) { return false; } break; case Js::OpCode::Decr_A: if (Int32Math::Dec(intConstantValue, &value)) { return false; } break; case Js::OpCode::InlineMathAcos: fValue = Js::Math::Acos((double)intConstantValue); isInt = false; break; case Js::OpCode::InlineMathAsin: fValue = Js::Math::Asin((double)intConstantValue); isInt = false; break; case Js::OpCode::InlineMathAtan: fValue = Js::Math::Atan((double)intConstantValue); isInt = false; break; case Js::OpCode::InlineMathCos: fValue = Js::Math::Cos((double)intConstantValue); isInt = false; break; case Js::OpCode::InlineMathExp: fValue = Js::Math::Exp((double)intConstantValue); isInt = false; break; case Js::OpCode::InlineMathLog: fValue = Js::Math::Log((double)intConstantValue); isInt = false; break; case Js::OpCode::InlineMathSin: fValue = Js::Math::Sin((double)intConstantValue); isInt = false; break; case Js::OpCode::InlineMathSqrt: fValue = ::sqrt((double)intConstantValue); isInt = false; break; case Js::OpCode::InlineMathTan: fValue = ::tan((double)intConstantValue); isInt = false; break; case Js::OpCode::InlineMathFround: fValue = (double) (float) intConstantValue; isInt = false; break; case Js::OpCode::InlineMathAbs: if (intConstantValue == INT32_MIN) { if (instr->GetDst()->IsInt32()) { // if dst is an int (e.g. in asm.js), we should coerce it, not convert to float value = static_cast<int32>(2147483648U); } else { // Rejit with AggressiveIntTypeSpecDisabled for Math.abs(INT32_MIN) because it causes dst // to be float type which could be different with previous type spec result in LoopPrePass throw Js::RejitException(RejitReason::AggressiveIntTypeSpecDisabled); } } else { value = ::abs(intConstantValue); } break; case Js::OpCode::InlineMathClz: DWORD clz; if (_BitScanReverse(&clz, intConstantValue)) { value = 31 - clz; } else { value = 32; } instr->ClearBailOutInfo(); break; case Js::OpCode::Ctz: Assert(func->GetJITFunctionBody()->IsWasmFunction()); Assert(!instr->HasBailOutInfo()); DWORD ctz; if (_BitScanForward(&ctz, intConstantValue)) { value = ctz; } else { value = 32; } break; case Js::OpCode::InlineMathFloor: value = intConstantValue; instr->ClearBailOutInfo(); break; case Js::OpCode::InlineMathCeil: value = intConstantValue; instr->ClearBailOutInfo(); break; case Js::OpCode::InlineMathRound: value = intConstantValue; instr->ClearBailOutInfo(); break; case Js::OpCode::ToVar: if (Js::TaggedInt::IsOverflow(intConstantValue)) { return false; } else { value = intConstantValue; instr->ClearBailOutInfo(); break; } default: return false; } this->CaptureByteCodeSymUses(instr); Assert(!instr->HasBailOutInfo()); // If we are, in fact, successful in constant folding the instruction, there is no point in having the bailoutinfo around anymore. // Make sure that it is cleared if it was initially present. if (!isInt) { value = (int32)fValue; if (fValue == (double)value) { isInt = true; } } if (isInt) { constOpnd = IR::IntConstOpnd::New(value, TyInt32, instr->m_func); GOPT_TRACE(_u("Constant folding to %d\n"), value); } else { constOpnd = IR::FloatConstOpnd::New(fValue, TyFloat64, instr->m_func); GOPT_TRACE(_u("Constant folding to %f\n"), fValue); } instr->ReplaceSrc1(constOpnd); this->OptSrc(constOpnd, &instr); IR::Opnd *dst = instr->GetDst(); Assert(dst->IsRegOpnd()); StackSym *dstSym = dst->AsRegOpnd()->m_sym; if (isInt) { if (dstSym->IsSingleDef()) { dstSym->SetIsIntConst(value); } if (doSetDstVal) { *pDstVal = GetIntConstantValue(value, instr, dst); } if (IsTypeSpecPhaseOff(this->func)) { instr->m_opcode = Js::OpCode::LdC_A_I4; this->ToVarRegOpnd(dst->AsRegOpnd(), this->currentBlock); } else { instr->m_opcode = Js::OpCode::Ld_I4; this->ToInt32Dst(instr, dst->AsRegOpnd(), this->currentBlock); StackSym * currDstSym = instr->GetDst()->AsRegOpnd()->m_sym; if (currDstSym->IsSingleDef()) { currDstSym->SetIsIntConst(value); } } } else { *pDstVal = NewFloatConstantValue(fValue, dst); if (IsTypeSpecPhaseOff(this->func)) { instr->m_opcode = Js::OpCode::LdC_A_R8; this->ToVarRegOpnd(dst->AsRegOpnd(), this->currentBlock); } else { instr->m_opcode = Js::OpCode::LdC_F8_R8; this->ToFloat64Dst(instr, dst->AsRegOpnd(), this->currentBlock); } } // If this is an induction variable, then treat it the way the prepass would have if it had seen // the assignment and the resulting change to the value number, and mark it as indeterminate. for (Loop * loop = this->currentBlock->loop; loop; loop = loop->parent) { InductionVariable *iv = nullptr; if (loop->inductionVariables && loop->inductionVariables->TryGetReference(dstSym->m_id, &iv)) { iv->SetChangeIsIndeterminate(); } } return true; } //------------------------------------------------------------------------------------------------------ // Type specialization //------------------------------------------------------------------------------------------------------ bool GlobOpt::IsWorthSpecializingToInt32DueToSrc(IR::Opnd *const src, Value *const val) { Assert(src); Assert(val); ValueInfo *valueInfo = val->GetValueInfo(); Assert(valueInfo->IsLikelyInt()); // If it is not known that the operand is definitely an int, the operand is not already type-specialized, and it's not live // in the loop landing pad (if we're in a loop), it's probably not worth type-specializing this instruction. The common case // where type-specializing this would be bad is where the operations are entirely on properties or array elements, where the // ratio of FromVars and ToVars to the number of actual operations is high, and the conversions would dominate the time // spent. On the other hand, if we're using a function formal parameter more than once, it would probably be worth // type-specializing it, hence the IsDead check on the operands. return valueInfo->IsInt() || valueInfo->HasIntConstantValue(true) || !src->GetIsDead() || !src->IsRegOpnd() || CurrentBlockData()->IsInt32TypeSpecialized(src->AsRegOpnd()->m_sym) || (this->currentBlock->loop && this->currentBlock->loop->landingPad->globOptData.IsLive(src->AsRegOpnd()->m_sym)); } bool GlobOpt::IsWorthSpecializingToInt32DueToDst(IR::Opnd *const dst) { Assert(dst); const auto sym = dst->AsRegOpnd()->m_sym; return CurrentBlockData()->IsInt32TypeSpecialized(sym) || (this->currentBlock->loop && this->currentBlock->loop->landingPad->globOptData.IsLive(sym)); } bool GlobOpt::IsWorthSpecializingToInt32(IR::Instr *const instr, Value *const src1Val, Value *const src2Val) { Assert(instr); const auto src1 = instr->GetSrc1(); const auto src2 = instr->GetSrc2(); // In addition to checking each operand and the destination, if for any reason we only have to do a maximum of two // conversions instead of the worst-case 3 conversions, it's probably worth specializing. if (IsWorthSpecializingToInt32DueToSrc(src1, src1Val) || (src2Val && IsWorthSpecializingToInt32DueToSrc(src2, src2Val))) { return true; } IR::Opnd *dst = instr->GetDst(); if (!dst || IsWorthSpecializingToInt32DueToDst(dst)) { return true; } if (dst->IsEqual(src1) || (src2Val && (dst->IsEqual(src2) || src1->IsEqual(src2)))) { return true; } IR::Instr *instrNext = instr->GetNextRealInstrOrLabel(); // Skip useless Ld_A's do { switch (instrNext->m_opcode) { case Js::OpCode::Ld_A: if (!dst->IsEqual(instrNext->GetSrc1())) { goto done; } dst = instrNext->GetDst(); break; case Js::OpCode::LdFld: case Js::OpCode::LdRootFld: case Js::OpCode::LdRootFldForTypeOf: case Js::OpCode::LdFldForTypeOf: case Js::OpCode::LdElemI_A: case Js::OpCode::ByteCodeUses: break; default: goto done; } instrNext = instrNext->GetNextRealInstrOrLabel(); } while (true); done: // If the next instr could also be type specialized, then it is probably worth it. if ((instrNext->GetSrc1() && dst->IsEqual(instrNext->GetSrc1())) || (instrNext->GetSrc2() && dst->IsEqual(instrNext->GetSrc2()))) { switch (instrNext->m_opcode) { case Js::OpCode::Add_A: case Js::OpCode::Sub_A: case Js::OpCode::Mul_A: case Js::OpCode::Div_A: case Js::OpCode::Rem_A: case Js::OpCode::Xor_A: case Js::OpCode::And_A: case Js::OpCode::Or_A: case Js::OpCode::Shl_A: case Js::OpCode::Shr_A: case Js::OpCode::Incr_A: case Js::OpCode::Decr_A: case Js::OpCode::Neg_A: case Js::OpCode::Not_A: case Js::OpCode::Conv_Num: case Js::OpCode::BrEq_I4: case Js::OpCode::BrTrue_I4: case Js::OpCode::BrFalse_I4: case Js::OpCode::BrGe_I4: case Js::OpCode::BrGt_I4: case Js::OpCode::BrLt_I4: case Js::OpCode::BrLe_I4: case Js::OpCode::BrNeq_I4: return true; } } return false; } bool GlobOpt::TypeSpecializeNumberUnary(IR::Instr *instr, Value *src1Val, Value **pDstVal) { Assert(src1Val->GetValueInfo()->IsNumber()); if (this->IsLoopPrePass()) { return false; } switch (instr->m_opcode) { case Js::OpCode::Conv_Num: // Optimize Conv_Num away since we know this is a number instr->m_opcode = Js::OpCode::Ld_A; return false; } return false; } bool GlobOpt::TypeSpecializeUnary( IR::Instr **pInstr, Value **pSrc1Val, Value **pDstVal, Value *const src1OriginalVal, bool *redoTypeSpecRef, bool *const forceInvariantHoistingRef) { Assert(pSrc1Val); Value *&src1Val = *pSrc1Val; Assert(src1Val); // We don't need to do typespec for asmjs if (IsTypeSpecPhaseOff(this->func) || GetIsAsmJSFunc()) { return false; } IR::Instr *&instr = *pInstr; int32 min, max; // Inline built-ins explicitly specify how srcs/dst must be specialized. if (OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)) { TypeSpecializeInlineBuiltInUnary(pInstr, &src1Val, pDstVal, src1OriginalVal, redoTypeSpecRef); return true; } // Consider: If type spec wasn't completely done, make sure that we don't type-spec the dst 2nd time. if(instr->m_opcode == Js::OpCode::LdLen_A && TypeSpecializeLdLen(&instr, &src1Val, pDstVal, forceInvariantHoistingRef)) { return true; } if (!src1Val->GetValueInfo()->GetIntValMinMax(&min, &max, this->DoAggressiveIntTypeSpec())) { src1Val = src1OriginalVal; if (src1Val->GetValueInfo()->IsLikelyFloat()) { // Try to type specialize to float return this->TypeSpecializeFloatUnary(pInstr, src1Val, pDstVal); } else if (src1Val->GetValueInfo()->IsNumber()) { return TypeSpecializeNumberUnary(instr, src1Val, pDstVal); } return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } return this->TypeSpecializeIntUnary(pInstr, &src1Val, pDstVal, min, max, src1OriginalVal, redoTypeSpecRef); } // Returns true if the built-in requested type specialization, and no further action needed, // otherwise returns false. void GlobOpt::TypeSpecializeInlineBuiltInUnary(IR::Instr **pInstr, Value **pSrc1Val, Value **pDstVal, Value *const src1OriginalVal, bool *redoTypeSpecRef) { IR::Instr *&instr = *pInstr; Assert(pSrc1Val); Value *&src1Val = *pSrc1Val; Assert(OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)); Js::BuiltinFunction builtInId = Js::JavascriptLibrary::GetBuiltInInlineCandidateId(instr->m_opcode); // From actual instr, not profile based. Assert(builtInId != Js::BuiltinFunction::None); // Consider using different bailout for float/int FromVars, so that when the arg cannot be converted to number we don't disable // type spec for other parts of the big function but rather just don't inline that built-in instr. // E.g. could do that if the value is not likelyInt/likelyFloat. Js::BuiltInFlags builtInFlags = Js::JavascriptLibrary::GetFlagsForBuiltIn(builtInId); bool areAllArgsAlwaysFloat = (builtInFlags & Js::BuiltInFlags::BIF_Args) == Js::BuiltInFlags::BIF_TypeSpecUnaryToFloat; if (areAllArgsAlwaysFloat) { // InlineMathAcos, InlineMathAsin, InlineMathAtan, InlineMathCos, InlineMathExp, InlineMathLog, InlineMathSin, InlineMathSqrt, InlineMathTan. Assert(this->DoFloatTypeSpec()); // Type-spec the src. src1Val = src1OriginalVal; bool retVal = this->TypeSpecializeFloatUnary(pInstr, src1Val, pDstVal, /* skipDst = */ true); AssertMsg(retVal, "For inline built-ins the args have to be type-specialized to float, but something failed during the process."); // Type-spec the dst. this->TypeSpecializeFloatDst(instr, nullptr, src1Val, nullptr, pDstVal); } else if (instr->m_opcode == Js::OpCode::InlineMathAbs) { // Consider the case when the value is unknown - because of bailout in abs we may disable type spec for the whole function which is too much. // First, try int. int minVal, maxVal; bool shouldTypeSpecToInt = src1Val->GetValueInfo()->GetIntValMinMax(&minVal, &maxVal, /* doAggressiveIntTypeSpec = */ true); if (shouldTypeSpecToInt) { Assert(this->DoAggressiveIntTypeSpec()); bool retVal = this->TypeSpecializeIntUnary(pInstr, &src1Val, pDstVal, minVal, maxVal, src1OriginalVal, redoTypeSpecRef, true); AssertMsg(retVal, "For inline built-ins the args have to be type-specialized (int), but something failed during the process."); if (!this->IsLoopPrePass()) { // Create bailout for INT_MIN which does not have corresponding int value on the positive side. // Check int range: if we know the range is out of overflow, we do not need the bail out at all. if (minVal == INT32_MIN) { GenerateBailAtOperation(&instr, IR::BailOnIntMin); } } // Account for ::abs(INT_MIN) == INT_MIN (which is less than 0). maxVal = ::max( ::abs(Int32Math::NearestInRangeTo(minVal, INT_MIN + 1, INT_MAX)), ::abs(Int32Math::NearestInRangeTo(maxVal, INT_MIN + 1, INT_MAX))); minVal = minVal >= 0 ? minVal : 0; this->TypeSpecializeIntDst(instr, instr->m_opcode, nullptr, src1Val, nullptr, IR::BailOutInvalid, minVal, maxVal, pDstVal); } else { // If we couldn't do int, do float. Assert(this->DoFloatTypeSpec()); src1Val = src1OriginalVal; bool retVal = this->TypeSpecializeFloatUnary(pInstr, src1Val, pDstVal, true); AssertMsg(retVal, "For inline built-ins the args have to be type-specialized (float), but something failed during the process."); this->TypeSpecializeFloatDst(instr, nullptr, src1Val, nullptr, pDstVal); } } else if (instr->m_opcode == Js::OpCode::InlineMathFloor || instr->m_opcode == Js::OpCode::InlineMathCeil || instr->m_opcode == Js::OpCode::InlineMathRound) { // Type specialize src to float src1Val = src1OriginalVal; bool retVal = this->TypeSpecializeFloatUnary(pInstr, src1Val, pDstVal, /* skipDst = */ true); AssertMsg(retVal, "For inline Math.floor and Math.ceil the src has to be type-specialized to float, but something failed during the process."); // Type specialize dst to int this->TypeSpecializeIntDst( instr, instr->m_opcode, nullptr, src1Val, nullptr, IR::BailOutInvalid, INT32_MIN, INT32_MAX, pDstVal); } else if(instr->m_opcode == Js::OpCode::InlineArrayPop) { IR::Opnd *const thisOpnd = instr->GetSrc1(); Assert(thisOpnd); // Ensure src1 (Array) is a var this->ToVarUses(instr, thisOpnd, false, src1Val); if(!this->IsLoopPrePass() && thisOpnd->GetValueType().IsLikelyNativeArray()) { // We bail out, if there is illegal access or a mismatch in the Native array type that is optimized for, during the run time. GenerateBailAtOperation(&instr, IR::BailOutConventionalNativeArrayAccessOnly); } if(!instr->GetDst()) { return; } // Try Type Specializing the element (return item from Pop) based on the array's profile data. if(thisOpnd->GetValueType().IsLikelyNativeIntArray()) { this->TypeSpecializeIntDst(instr, instr->m_opcode, nullptr, nullptr, nullptr, IR::BailOutInvalid, INT32_MIN, INT32_MAX, pDstVal); } else if(thisOpnd->GetValueType().IsLikelyNativeFloatArray()) { this->TypeSpecializeFloatDst(instr, nullptr, nullptr, nullptr, pDstVal); } else { // We reached here so the Element is not yet type specialized. Ensure element is a var if(instr->GetDst()->IsRegOpnd()) { this->ToVarRegOpnd(instr->GetDst()->AsRegOpnd(), currentBlock); } } } else if (instr->m_opcode == Js::OpCode::InlineMathClz) { Assert(this->DoAggressiveIntTypeSpec()); Assert(this->DoLossyIntTypeSpec()); //Type specialize to int bool retVal = this->TypeSpecializeIntUnary(pInstr, &src1Val, pDstVal, INT32_MIN, INT32_MAX, src1OriginalVal, redoTypeSpecRef); AssertMsg(retVal, "For clz32, the arg has to be type-specialized to int."); } else { AssertMsg(FALSE, "Unsupported built-in!"); } } void GlobOpt::TypeSpecializeInlineBuiltInBinary(IR::Instr **pInstr, Value *src1Val, Value* src2Val, Value **pDstVal, Value *const src1OriginalVal, Value *const src2OriginalVal) { IR::Instr *&instr = *pInstr; Assert(OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)); switch(instr->m_opcode) { case Js::OpCode::InlineMathAtan2: { Js::BuiltinFunction builtInId = Js::JavascriptLibrary::GetBuiltInInlineCandidateId(instr->m_opcode); // From actual instr, not profile based. Js::BuiltInFlags builtInFlags = Js::JavascriptLibrary::GetFlagsForBuiltIn(builtInId); bool areAllArgsAlwaysFloat = (builtInFlags & Js::BuiltInFlags::BIF_TypeSpecAllToFloat) != 0; Assert(areAllArgsAlwaysFloat); Assert(this->DoFloatTypeSpec()); // Type-spec the src1, src2 and dst. src1Val = src1OriginalVal; src2Val = src2OriginalVal; bool retVal = this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal); AssertMsg(retVal, "For pow and atnan2 the args have to be type-specialized to float, but something failed during the process."); break; } case Js::OpCode::InlineMathPow: { #ifndef _M_ARM32_OR_ARM64 if (src2Val->GetValueInfo()->IsLikelyInt()) { bool lossy = false; this->ToInt32(instr, instr->GetSrc2(), this->currentBlock, src2Val, nullptr, lossy); IR::Opnd* src1 = instr->GetSrc1(); int32 valueMin, valueMax; if (src1Val->GetValueInfo()->IsLikelyInt() && this->DoPowIntIntTypeSpec() && src2Val->GetValueInfo()->GetIntValMinMax(&valueMin, &valueMax, this->DoAggressiveIntTypeSpec()) && valueMin >= 0) { this->ToInt32(instr, src1, this->currentBlock, src1Val, nullptr, lossy); this->TypeSpecializeIntDst(instr, instr->m_opcode, nullptr, src1Val, src2Val, IR::BailOutInvalid, INT32_MIN, INT32_MAX, pDstVal); if(!this->IsLoopPrePass()) { GenerateBailAtOperation(&instr, IR::BailOutOnPowIntIntOverflow); } } else { this->ToFloat64(instr, src1, this->currentBlock, src1Val, nullptr, IR::BailOutPrimitiveButString); TypeSpecializeFloatDst(instr, nullptr, src1Val, src2Val, pDstVal); } } else { #endif this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal); #ifndef _M_ARM32_OR_ARM64 } #endif break; } case Js::OpCode::InlineMathImul: { Assert(this->DoAggressiveIntTypeSpec()); Assert(this->DoLossyIntTypeSpec()); //Type specialize to int bool retVal = this->TypeSpecializeIntBinary(pInstr, src1Val, src2Val, pDstVal, INT32_MIN, INT32_MAX, false /* skipDst */); AssertMsg(retVal, "For imul, the args have to be type-specialized to int but something failed during the process."); break; } case Js::OpCode::InlineMathMin: case Js::OpCode::InlineMathMax: { if(src1Val->GetValueInfo()->IsLikelyInt() && src2Val->GetValueInfo()->IsLikelyInt()) { // Compute resulting range info int32 min1 = INT32_MIN; int32 max1 = INT32_MAX; int32 min2 = INT32_MIN; int32 max2 = INT32_MAX; int32 newMin, newMax; Assert(this->DoAggressiveIntTypeSpec()); src1Val->GetValueInfo()->GetIntValMinMax(&min1, &max1, this->DoAggressiveIntTypeSpec()); src2Val->GetValueInfo()->GetIntValMinMax(&min2, &max2, this->DoAggressiveIntTypeSpec()); if (instr->m_opcode == Js::OpCode::InlineMathMin) { newMin = min(min1, min2); newMax = min(max1, max2); } else { Assert(instr->m_opcode == Js::OpCode::InlineMathMax); newMin = max(min1, min2); newMax = max(max1, max2); } // Type specialize to int bool retVal = this->TypeSpecializeIntBinary(pInstr, src1Val, src2Val, pDstVal, newMin, newMax, false /* skipDst */); AssertMsg(retVal, "For min and max, the args have to be type-specialized to int if any one of the sources is an int, but something failed during the process."); } // Couldn't type specialize to int, type specialize to float else { Assert(this->DoFloatTypeSpec()); src1Val = src1OriginalVal; src2Val = src2OriginalVal; bool retVal = this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal); AssertMsg(retVal, "For min and max, the args have to be type-specialized to float if any one of the sources is a float, but something failed during the process."); } break; } case Js::OpCode::InlineArrayPush: { IR::Opnd *const thisOpnd = instr->GetSrc1(); Assert(thisOpnd); if(instr->GetDst() && instr->GetDst()->IsRegOpnd()) { // Set the dst as live here, as the built-ins return early from the TypeSpecialization functions - before the dst is marked as live. // Also, we are not specializing the dst separately and we are skipping the dst to be handled when we specialize the instruction above. this->ToVarRegOpnd(instr->GetDst()->AsRegOpnd(), currentBlock); } // Ensure src1 (Array) is a var this->ToVarUses(instr, thisOpnd, false, src1Val); if(!this->IsLoopPrePass()) { if(thisOpnd->GetValueType().IsLikelyNativeArray()) { // We bail out, if there is illegal access or a mismatch in the Native array type that is optimized for, during run time. GenerateBailAtOperation(&instr, IR::BailOutConventionalNativeArrayAccessOnly); } else { GenerateBailAtOperation(&instr, IR::BailOutOnImplicitCallsPreOp); } } // Try Type Specializing the element based on the array's profile data. if(thisOpnd->GetValueType().IsLikelyNativeFloatArray()) { src1Val = src1OriginalVal; src2Val = src2OriginalVal; } if((thisOpnd->GetValueType().IsLikelyNativeIntArray() && this->TypeSpecializeIntBinary(pInstr, src1Val, src2Val, pDstVal, INT32_MIN, INT32_MAX, true)) || (thisOpnd->GetValueType().IsLikelyNativeFloatArray() && this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal))) { break; } // The Element is not yet type specialized. Ensure element is a var this->ToVarUses(instr, instr->GetSrc2(), false, src2Val); break; } } } void GlobOpt::TypeSpecializeInlineBuiltInDst(IR::Instr **pInstr, Value **pDstVal) { IR::Instr *&instr = *pInstr; Assert(OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)); if (instr->m_opcode == Js::OpCode::InlineMathRandom) { Assert(this->DoFloatTypeSpec()); // Type specialize dst to float this->TypeSpecializeFloatDst(instr, nullptr, nullptr, nullptr, pDstVal); } } bool GlobOpt::TryTypeSpecializeUnaryToFloatHelper(IR::Instr** pInstr, Value** pSrc1Val, Value* const src1OriginalVal, Value **pDstVal) { // It has been determined that this instruction cannot be int-specialized. We need to determine whether to attempt to // float-specialize the instruction, or leave it unspecialized. #if !INT32VAR Value*& src1Val = *pSrc1Val; if(src1Val->GetValueInfo()->IsLikelyUntaggedInt()) { // An input range is completely outside the range of an int31. Even if the operation may overflow, it is // unlikely to overflow on these operations, so we leave it unspecialized on 64-bit platforms. However, on // 32-bit platforms, the value is untaggable and will be a JavascriptNumber, which is significantly slower to // use in an unspecialized operation compared to a tagged int. So, try to float-specialize the instruction. src1Val = src1OriginalVal; return this->TypeSpecializeFloatUnary(pInstr, src1Val, pDstVal); } #endif return false; } bool GlobOpt::TypeSpecializeIntBinary(IR::Instr **pInstr, Value *src1Val, Value *src2Val, Value **pDstVal, int32 min, int32 max, bool skipDst /* = false */) { // Consider moving the code for int type spec-ing binary functions here. IR::Instr *&instr = *pInstr; bool lossy = false; if(OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)) { if(instr->m_opcode == Js::OpCode::InlineArrayPush) { int32 intConstantValue; bool isIntConstMissingItem = src2Val->GetValueInfo()->TryGetIntConstantValue(&intConstantValue); if(isIntConstMissingItem) { isIntConstMissingItem = Js::SparseArraySegment<int>::IsMissingItem(&intConstantValue); } // Don't specialize if the element is not likelyInt or an IntConst which is a missing item value. if(!(src2Val->GetValueInfo()->IsLikelyInt()) || isIntConstMissingItem) { return false; } // We don't want to specialize both the source operands, though it is a binary instr. IR::Opnd * elementOpnd = instr->GetSrc2(); this->ToInt32(instr, elementOpnd, this->currentBlock, src2Val, nullptr, lossy); } else { IR::Opnd *src1 = instr->GetSrc1(); this->ToInt32(instr, src1, this->currentBlock, src1Val, nullptr, lossy); IR::Opnd *src2 = instr->GetSrc2(); this->ToInt32(instr, src2, this->currentBlock, src2Val, nullptr, lossy); } if(!skipDst) { IR::Opnd *dst = instr->GetDst(); if (dst) { TypeSpecializeIntDst(instr, instr->m_opcode, nullptr, src1Val, src2Val, IR::BailOutInvalid, min, max, pDstVal); } } return true; } else { AssertMsg(false, "Yet to move code for other binary functions here"); return false; } } bool GlobOpt::TypeSpecializeIntUnary( IR::Instr **pInstr, Value **pSrc1Val, Value **pDstVal, int32 min, int32 max, Value *const src1OriginalVal, bool *redoTypeSpecRef, bool skipDst /* = false */) { IR::Instr *&instr = *pInstr; Assert(pSrc1Val); Value *&src1Val = *pSrc1Val; bool isTransfer = false; Js::OpCode opcode; int32 newMin, newMax; bool lossy = false; IR::BailOutKind bailOutKind = IR::BailOutInvalid; bool ignoredIntOverflow = this->ignoredIntOverflowForCurrentInstr; bool ignoredNegativeZero = false; bool checkTypeSpecWorth = false; if(instr->GetSrc1()->IsRegOpnd() && instr->GetSrc1()->AsRegOpnd()->m_sym->m_isNotInt) { return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } AddSubConstantInfo addSubConstantInfo; switch(instr->m_opcode) { case Js::OpCode::Ld_A: if (instr->GetSrc1()->IsRegOpnd()) { StackSym *sym = instr->GetSrc1()->AsRegOpnd()->m_sym; if (CurrentBlockData()->IsInt32TypeSpecialized(sym) == false) { // Type specializing an Ld_A isn't worth it, unless the src // is already type specialized. return false; } } newMin = min; newMax = max; opcode = Js::OpCode::Ld_I4; isTransfer = true; break; case Js::OpCode::Conv_Num: newMin = min; newMax = max; opcode = Js::OpCode::Ld_I4; isTransfer = true; break; case Js::OpCode::LdC_A_I4: newMin = newMax = instr->GetSrc1()->AsIntConstOpnd()->AsInt32(); opcode = Js::OpCode::Ld_I4; break; case Js::OpCode::Neg_A: if (min <= 0 && max >= 0) { if(instr->ShouldCheckForNegativeZero()) { // -0 matters since the sym is not a local, or is used in a way in which -0 would differ from +0 if(!DoAggressiveIntTypeSpec()) { // May result in -0 // Consider adding a dynamic check for src1 == 0 return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } if(min == 0 && max == 0) { // Always results in -0 return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } bailOutKind |= IR::BailOutOnNegativeZero; } else { ignoredNegativeZero = true; } } if (Int32Math::Neg(min, &newMax)) { if(instr->ShouldCheckForIntOverflow()) { if(!DoAggressiveIntTypeSpec()) { // May overflow return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } if(min == max) { // Always overflows return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } bailOutKind |= IR::BailOutOnOverflow; newMax = INT32_MAX; } else { ignoredIntOverflow = true; } } if (Int32Math::Neg(max, &newMin)) { if(instr->ShouldCheckForIntOverflow()) { if(!DoAggressiveIntTypeSpec()) { // May overflow return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } bailOutKind |= IR::BailOutOnOverflow; newMin = INT32_MAX; } else { ignoredIntOverflow = true; } } if(!instr->ShouldCheckForIntOverflow() && newMin > newMax) { // When ignoring overflow, the range needs to account for overflow. Since MIN_INT is the only int32 value that // overflows on Neg, and the value resulting from overflow is also MIN_INT, if calculating only the new min or new // max overflowed but not both, then the new min will be greater than the new max. In that case we need to consider // the full range of int32s as possible resulting values. newMin = INT32_MIN; newMax = INT32_MAX; } opcode = Js::OpCode::Neg_I4; checkTypeSpecWorth = true; break; case Js::OpCode::Not_A: if(!DoLossyIntTypeSpec()) { return false; } this->PropagateIntRangeForNot(min, max, &newMin, &newMax); opcode = Js::OpCode::Not_I4; lossy = true; break; case Js::OpCode::Incr_A: do // while(false) { const auto CannotOverflowBasedOnRelativeBounds = [&]() { const ValueInfo *const src1ValueInfo = src1Val->GetValueInfo(); return (src1ValueInfo->IsInt() || DoAggressiveIntTypeSpec()) && src1ValueInfo->IsIntBounded() && src1ValueInfo->AsIntBounded()->Bounds()->AddCannotOverflowBasedOnRelativeBounds(1); }; if (Int32Math::Inc(min, &newMin)) { if(CannotOverflowBasedOnRelativeBounds()) { newMin = INT32_MAX; } else if(instr->ShouldCheckForIntOverflow()) { // Always overflows return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } else { // When ignoring overflow, the range needs to account for overflow. For any Add or Sub, since overflow // causes the value to wrap around, and we don't have a way to specify a lower and upper range of ints, // we use the full range of int32s. ignoredIntOverflow = true; newMin = INT32_MIN; newMax = INT32_MAX; break; } } if (Int32Math::Inc(max, &newMax)) { if(CannotOverflowBasedOnRelativeBounds()) { newMax = INT32_MAX; } else if(instr->ShouldCheckForIntOverflow()) { if(!DoAggressiveIntTypeSpec()) { // May overflow return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } bailOutKind |= IR::BailOutOnOverflow; newMax = INT32_MAX; } else { // See comment about ignoring overflow above ignoredIntOverflow = true; newMin = INT32_MIN; newMax = INT32_MAX; break; } } } while(false); if(!ignoredIntOverflow && instr->GetSrc1()->IsRegOpnd()) { addSubConstantInfo.Set(instr->GetSrc1()->AsRegOpnd()->m_sym, src1Val, min == max, 1); } opcode = Js::OpCode::Add_I4; if (!this->IsLoopPrePass()) { instr->SetSrc2(IR::IntConstOpnd::New(1, TyInt32, instr->m_func)); } checkTypeSpecWorth = true; break; case Js::OpCode::Decr_A: do // while(false) { const auto CannotOverflowBasedOnRelativeBounds = [&]() { const ValueInfo *const src1ValueInfo = src1Val->GetValueInfo(); return (src1ValueInfo->IsInt() || DoAggressiveIntTypeSpec()) && src1ValueInfo->IsIntBounded() && src1ValueInfo->AsIntBounded()->Bounds()->SubCannotOverflowBasedOnRelativeBounds(1); }; if (Int32Math::Dec(max, &newMax)) { if(CannotOverflowBasedOnRelativeBounds()) { newMax = INT32_MIN; } else if(instr->ShouldCheckForIntOverflow()) { // Always overflows return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } else { // When ignoring overflow, the range needs to account for overflow. For any Add or Sub, since overflow // causes the value to wrap around, and we don't have a way to specify a lower and upper range of ints, we // use the full range of int32s. ignoredIntOverflow = true; newMin = INT32_MIN; newMax = INT32_MAX; break; } } if (Int32Math::Dec(min, &newMin)) { if(CannotOverflowBasedOnRelativeBounds()) { newMin = INT32_MIN; } else if(instr->ShouldCheckForIntOverflow()) { if(!DoAggressiveIntTypeSpec()) { // May overflow return TryTypeSpecializeUnaryToFloatHelper(pInstr, &src1Val, src1OriginalVal, pDstVal); } bailOutKind |= IR::BailOutOnOverflow; newMin = INT32_MIN; } else { // See comment about ignoring overflow above ignoredIntOverflow = true; newMin = INT32_MIN; newMax = INT32_MAX; break; } } } while(false); if(!ignoredIntOverflow && instr->GetSrc1()->IsRegOpnd()) { addSubConstantInfo.Set(instr->GetSrc1()->AsRegOpnd()->m_sym, src1Val, min == max, -1); } opcode = Js::OpCode::Sub_I4; if (!this->IsLoopPrePass()) { instr->SetSrc2(IR::IntConstOpnd::New(1, TyInt32, instr->m_func)); } checkTypeSpecWorth = true; break; case Js::OpCode::BrFalse_A: case Js::OpCode::BrTrue_A: { if(DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrFalse(instr, src1Val, min, max)) { return true; } bool specialize = true; if (!src1Val->GetValueInfo()->HasIntConstantValue() && instr->GetSrc1()->IsRegOpnd()) { StackSym *sym = instr->GetSrc1()->AsRegOpnd()->m_sym; if (CurrentBlockData()->IsInt32TypeSpecialized(sym) == false) { // Type specializing a BrTrue_A/BrFalse_A isn't worth it, unless the src // is already type specialized specialize = false; } } if(instr->m_opcode == Js::OpCode::BrTrue_A) { UpdateIntBoundsForNotEqualBranch(src1Val, nullptr, 0); opcode = Js::OpCode::BrTrue_I4; } else { UpdateIntBoundsForEqualBranch(src1Val, nullptr, 0); opcode = Js::OpCode::BrFalse_I4; } if(!specialize) { return false; } newMin = 2; newMax = 1; // We'll assert if we make a range where min > max break; } case Js::OpCode::MultiBr: newMin = min; newMax = max; opcode = instr->m_opcode; break; case Js::OpCode::StElemI_A: case Js::OpCode::StElemI_A_Strict: case Js::OpCode::StElemC: if(instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetValueType().IsLikelyAnyArrayWithNativeFloatValues()) { src1Val = src1OriginalVal; } return TypeSpecializeStElem(pInstr, src1Val, pDstVal); case Js::OpCode::NewScArray: case Js::OpCode::NewScArrayWithMissingValues: case Js::OpCode::InitFld: case Js::OpCode::InitRootFld: case Js::OpCode::StSlot: case Js::OpCode::StSlotChkUndecl: #if !FLOATVAR case Js::OpCode::StSlotBoxTemp: #endif case Js::OpCode::StFld: case Js::OpCode::StRootFld: case Js::OpCode::StFldStrict: case Js::OpCode::StRootFldStrict: case Js::OpCode::ArgOut_A: case Js::OpCode::ArgOut_A_Inline: case Js::OpCode::ArgOut_A_FixupForStackArgs: case Js::OpCode::ArgOut_A_Dynamic: case Js::OpCode::ArgOut_A_FromStackArgs: case Js::OpCode::ArgOut_A_SpreadArg: // For this one we need to implement type specialization //case Js::OpCode::ArgOut_A_InlineBuiltIn: case Js::OpCode::Ret: case Js::OpCode::LdElemUndef: case Js::OpCode::LdElemUndefScoped: return false; default: if (OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)) { newMin = min; newMax = max; opcode = instr->m_opcode; break; // Note: we must keep checkTypeSpecWorth = false to make sure we never return false from this function. } return false; } // If this instruction is in a range of instructions where int overflow does not matter, we will still specialize it (won't // leave it unspecialized based on heuristics), since it is most likely worth specializing, and the dst value needs to be // guaranteed to be an int if(checkTypeSpecWorth && !ignoredIntOverflow && !ignoredNegativeZero && instr->ShouldCheckForIntOverflow() && !IsWorthSpecializingToInt32(instr, src1Val)) { // Even though type specialization is being skipped since it may not be worth it, the proper value should still be // maintained so that the result may be type specialized later. An int value is not created for the dst in any of // the following cases. // - A bailout check is necessary to specialize this instruction. The bailout check is what guarantees the result to be // an int, but since we're not going to specialize this instruction, there won't be a bailout check. // - Aggressive int type specialization is disabled and we're in a loop prepass. We're conservative on dst values in // that case, especially if the dst sym is live on the back-edge. if(bailOutKind == IR::BailOutInvalid && instr->GetDst() && (DoAggressiveIntTypeSpec() || !this->IsLoopPrePass())) { *pDstVal = CreateDstUntransferredIntValue(newMin, newMax, instr, src1Val, nullptr); } if(instr->GetSrc2()) { instr->FreeSrc2(); } return false; } this->ignoredIntOverflowForCurrentInstr = ignoredIntOverflow; this->ignoredNegativeZeroForCurrentInstr = ignoredNegativeZero; { // Try CSE again before modifying the IR, in case some attributes are required for successful CSE Value *src1IndirIndexVal = nullptr; Value *src2Val = nullptr; if(CSEOptimize(currentBlock, &instr, &src1Val, &src2Val, &src1IndirIndexVal, true /* intMathExprOnly */)) { *redoTypeSpecRef = true; return false; } } const Js::OpCode originalOpCode = instr->m_opcode; if (!this->IsLoopPrePass()) { // No re-write on prepass instr->m_opcode = opcode; } Value *src1ValueToSpecialize = src1Val; if(lossy) { // Lossy conversions to int32 must be done based on the original source values. For instance, if one of the values is a // float constant with a value that fits in a uint32 but not an int32, and the instruction can ignore int overflow, the // source value for the purposes of int specialization would have been changed to an int constant value by ignoring // overflow. If we were to specialize the sym using the int constant value, it would be treated as a lossless // conversion, but since there may be subsequent uses of the same float constant value that may not ignore overflow, // this must be treated as a lossy conversion by specializing the sym using the original float constant value. src1ValueToSpecialize = src1OriginalVal; } // Make sure the srcs are specialized IR::Opnd *src1 = instr->GetSrc1(); this->ToInt32(instr, src1, this->currentBlock, src1ValueToSpecialize, nullptr, lossy); if(bailOutKind != IR::BailOutInvalid && !this->IsLoopPrePass()) { GenerateBailAtOperation(&instr, bailOutKind); } if (!skipDst) { IR::Opnd *dst = instr->GetDst(); if (dst) { AssertMsg(!(isTransfer && !this->IsLoopPrePass()) || min == newMin && max == newMax, "If this is just a copy, old/new min/max should be the same"); TypeSpecializeIntDst( instr, originalOpCode, isTransfer ? src1Val : nullptr, src1Val, nullptr, bailOutKind, newMin, newMax, pDstVal, addSubConstantInfo.HasInfo() ? &addSubConstantInfo : nullptr); } } if(bailOutKind == IR::BailOutInvalid) { GOPT_TRACE(_u("Type specialized to INT\n")); #if ENABLE_DEBUG_CONFIG_OPTIONS if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::AggressiveIntTypeSpecPhase)) { Output::Print(_u("Type specialized to INT: ")); Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); } #endif } else { GOPT_TRACE(_u("Type specialized to INT with bailout on:\n")); if(bailOutKind & IR::BailOutOnOverflow) { GOPT_TRACE(_u(" Overflow\n")); #if ENABLE_DEBUG_CONFIG_OPTIONS if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::AggressiveIntTypeSpecPhase)) { Output::Print(_u("Type specialized to INT with bailout (%S): "), "Overflow"); Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); } #endif } if(bailOutKind & IR::BailOutOnNegativeZero) { GOPT_TRACE(_u(" Zero\n")); #if ENABLE_DEBUG_CONFIG_OPTIONS if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::AggressiveIntTypeSpecPhase)) { Output::Print(_u("Type specialized to INT with bailout (%S): "), "Zero"); Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); } #endif } } return true; } void GlobOpt::TypeSpecializeIntDst(IR::Instr* instr, Js::OpCode originalOpCode, Value* valToTransfer, Value *const src1Value, Value *const src2Value, const IR::BailOutKind bailOutKind, int32 newMin, int32 newMax, Value** pDstVal, const AddSubConstantInfo *const addSubConstantInfo) { this->TypeSpecializeIntDst(instr, originalOpCode, valToTransfer, src1Value, src2Value, bailOutKind, ValueType::GetInt(IntConstantBounds(newMin, newMax).IsLikelyTaggable()), newMin, newMax, pDstVal, addSubConstantInfo); } void GlobOpt::TypeSpecializeIntDst(IR::Instr* instr, Js::OpCode originalOpCode, Value* valToTransfer, Value *const src1Value, Value *const src2Value, const IR::BailOutKind bailOutKind, ValueType valueType, Value** pDstVal, const AddSubConstantInfo *const addSubConstantInfo) { this->TypeSpecializeIntDst(instr, originalOpCode, valToTransfer, src1Value, src2Value, bailOutKind, valueType, 0, 0, pDstVal, addSubConstantInfo); } void GlobOpt::TypeSpecializeIntDst(IR::Instr* instr, Js::OpCode originalOpCode, Value* valToTransfer, Value *const src1Value, Value *const src2Value, const IR::BailOutKind bailOutKind, ValueType valueType, int32 newMin, int32 newMax, Value** pDstVal, const AddSubConstantInfo *const addSubConstantInfo) { Assert(valueType.IsInt() || (valueType.IsNumber() && valueType.IsLikelyInt() && newMin == 0 && newMax == 0)); Assert(!valToTransfer || valToTransfer == src1Value); Assert(!addSubConstantInfo || addSubConstantInfo->HasInfo()); IR::Opnd *dst = instr->GetDst(); Assert(dst); bool isValueInfoPrecise; if(IsLoopPrePass()) { valueType = GetPrepassValueTypeForDst(valueType, instr, src1Value, src2Value, &isValueInfoPrecise); } else { isValueInfoPrecise = true; } // If dst has a circular reference in a loop, it probably won't get specialized. Don't mark the dst as type-specialized on // the pre-pass. With aggressive int spec though, it will take care of bailing out if necessary so there's no need to assume // that the dst will be a var even if it's live on the back-edge. Also if the op always produces an int32, then there's no // ambiguity in the dst's value type even in the prepass. if (!DoAggressiveIntTypeSpec() && this->IsLoopPrePass() && !valueType.IsInt()) { if (dst->IsRegOpnd()) { this->ToVarRegOpnd(dst->AsRegOpnd(), this->currentBlock); } return; } const IntBounds *dstBounds = nullptr; if(addSubConstantInfo && !addSubConstantInfo->SrcValueIsLikelyConstant() && DoTrackRelativeIntBounds()) { Assert(!ignoredIntOverflowForCurrentInstr); // Track bounds for add or sub with a constant. For instance, consider (b = a + 2). The value of 'b' should track that // it is equal to (the value of 'a') + 2. Additionally, the value of 'b' should inherit the bounds of 'a', offset by // the constant value. if(!valueType.IsInt() || !isValueInfoPrecise) { newMin = INT32_MIN; newMax = INT32_MAX; } dstBounds = IntBounds::Add( addSubConstantInfo->SrcValue(), addSubConstantInfo->Offset(), isValueInfoPrecise, IntConstantBounds(newMin, newMax), alloc); } // Src1's value could change later in the loop, so the value wouldn't be the same for each // iteration. Since we don't iterate over loops "while (!changed)", go conservative on the // pre-pass. if (valToTransfer) { // If this is just a copy, no need for creating a new value. Assert(!addSubConstantInfo); *pDstVal = this->ValueNumberTransferDst(instr, valToTransfer); CurrentBlockData()->InsertNewValue(*pDstVal, dst); } else if (valueType.IsInt() && isValueInfoPrecise) { bool wasNegativeZeroPreventedByBailout = false; if(newMin <= 0 && newMax >= 0) { switch(originalOpCode) { case Js::OpCode::Add_A: // -0 + -0 == -0 Assert(src1Value); Assert(src2Value); wasNegativeZeroPreventedByBailout = src1Value->GetValueInfo()->WasNegativeZeroPreventedByBailout() && src2Value->GetValueInfo()->WasNegativeZeroPreventedByBailout(); break; case Js::OpCode::Sub_A: // -0 - 0 == -0 Assert(src1Value); wasNegativeZeroPreventedByBailout = src1Value->GetValueInfo()->WasNegativeZeroPreventedByBailout(); break; case Js::OpCode::Neg_A: case Js::OpCode::Mul_A: case Js::OpCode::Div_A: case Js::OpCode::Rem_A: wasNegativeZeroPreventedByBailout = !!(bailOutKind & IR::BailOutOnNegativeZero); break; } } *pDstVal = dstBounds ? NewIntBoundedValue(valueType, dstBounds, wasNegativeZeroPreventedByBailout, nullptr) : NewIntRangeValue(newMin, newMax, wasNegativeZeroPreventedByBailout, nullptr); } else { *pDstVal = dstBounds ? NewIntBoundedValue(valueType, dstBounds, false, nullptr) : NewGenericValue(valueType); } if(addSubConstantInfo || updateInductionVariableValueNumber) { TrackIntSpecializedAddSubConstant(instr, addSubConstantInfo, *pDstVal, !!dstBounds); } CurrentBlockData()->SetValue(*pDstVal, dst); AssertMsg(dst->IsRegOpnd(), "What else?"); this->ToInt32Dst(instr, dst->AsRegOpnd(), this->currentBlock); } bool GlobOpt::TypeSpecializeBinary(IR::Instr **pInstr, Value **pSrc1Val, Value **pSrc2Val, Value **pDstVal, Value *const src1OriginalVal, Value *const src2OriginalVal, bool *redoTypeSpecRef) { IR::Instr *&instr = *pInstr; int32 min1 = INT32_MIN, max1 = INT32_MAX, min2 = INT32_MIN, max2 = INT32_MAX, newMin, newMax, tmp; Js::OpCode opcode; Value *&src1Val = *pSrc1Val; Value *&src2Val = *pSrc2Val; // We don't need to do typespec for asmjs if (IsTypeSpecPhaseOff(this->func) || GetIsAsmJSFunc()) { return false; } if (OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)) { this->TypeSpecializeInlineBuiltInBinary(pInstr, src1Val, src2Val, pDstVal, src1OriginalVal, src2OriginalVal); return true; } if (src1Val) { src1Val->GetValueInfo()->GetIntValMinMax(&min1, &max1, this->DoAggressiveIntTypeSpec()); } if (src2Val) { src2Val->GetValueInfo()->GetIntValMinMax(&min2, &max2, this->DoAggressiveIntTypeSpec()); } // Type specialize binary operators to int32 bool src1Lossy = true; bool src2Lossy = true; IR::BailOutKind bailOutKind = IR::BailOutInvalid; bool ignoredIntOverflow = this->ignoredIntOverflowForCurrentInstr; bool ignoredNegativeZero = false; bool skipSrc2 = false; bool skipDst = false; bool needsBoolConv = false; AddSubConstantInfo addSubConstantInfo; switch (instr->m_opcode) { case Js::OpCode::Or_A: if (!DoLossyIntTypeSpec()) { return false; } this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax); opcode = Js::OpCode::Or_I4; break; case Js::OpCode::And_A: if (!DoLossyIntTypeSpec()) { return false; } this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax); opcode = Js::OpCode::And_I4; break; case Js::OpCode::Xor_A: if (!DoLossyIntTypeSpec()) { return false; } this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax); opcode = Js::OpCode::Xor_I4; break; case Js::OpCode::Shl_A: if (!DoLossyIntTypeSpec()) { return false; } this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax); opcode = Js::OpCode::Shl_I4; break; case Js::OpCode::Shr_A: if (!DoLossyIntTypeSpec()) { return false; } this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax); opcode = Js::OpCode::Shr_I4; break; case Js::OpCode::ShrU_A: if (!DoLossyIntTypeSpec()) { return false; } if (min1 < 0 && IntConstantBounds(min2, max2).And_0x1f().Contains(0)) { // Src1 may be too large to represent as a signed int32, and src2 may be zero. Unless the resulting value is only // used as a signed int32 (hence allowing us to ignore the result's sign), don't specialize the instruction. if (!instr->ignoreIntOverflow) return false; ignoredIntOverflow = true; } this->PropagateIntRangeBinary(instr, min1, max1, min2, max2, &newMin, &newMax); opcode = Js::OpCode::ShrU_I4; break; case Js::OpCode::BrUnLe_A: // Folding the branch based on bounds will attempt a lossless int32 conversion of the sources if they are not definitely // int already, so require that both sources are likely int for folding. if (DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrUnsignedGreaterThan(instr, false, src1Val, min1, max1, src2Val, min2, max2)) { return true; } if (min1 >= 0 && min2 >= 0) { // Only handle positive values since this is unsigned... // Bounds are tracked only for likely int values. Only likely int values may have bounds that are not the defaults // (INT32_MIN, INT32_MAX), so we're good. Assert(src1Val); Assert(src1Val->GetValueInfo()->IsLikelyInt()); Assert(src2Val); Assert(src2Val->GetValueInfo()->IsLikelyInt()); UpdateIntBoundsForLessThanOrEqualBranch(src1Val, src2Val); } if (!DoLossyIntTypeSpec()) { return false; } newMin = newMax = 0; opcode = Js::OpCode::BrUnLe_I4; break; case Js::OpCode::BrUnLt_A: // Folding the branch based on bounds will attempt a lossless int32 conversion of the sources if they are not definitely // int already, so require that both sources are likely int for folding. if (DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrUnsignedLessThan(instr, true, src1Val, min1, max1, src2Val, min2, max2)) { return true; } if (min1 >= 0 && min2 >= 0) { // Only handle positive values since this is unsigned... // Bounds are tracked only for likely int values. Only likely int values may have bounds that are not the defaults // (INT32_MIN, INT32_MAX), so we're good. Assert(src1Val); Assert(src1Val->GetValueInfo()->IsLikelyInt()); Assert(src2Val); Assert(src2Val->GetValueInfo()->IsLikelyInt()); UpdateIntBoundsForLessThanBranch(src1Val, src2Val); } if (!DoLossyIntTypeSpec()) { return false; } newMin = newMax = 0; opcode = Js::OpCode::BrUnLt_I4; break; case Js::OpCode::BrUnGe_A: // Folding the branch based on bounds will attempt a lossless int32 conversion of the sources if they are not definitely // int already, so require that both sources are likely int for folding. if (DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrUnsignedLessThan(instr, false, src1Val, min1, max1, src2Val, min2, max2)) { return true; } if (min1 >= 0 && min2 >= 0) { // Only handle positive values since this is unsigned... // Bounds are tracked only for likely int values. Only likely int values may have bounds that are not the defaults // (INT32_MIN, INT32_MAX), so we're good. Assert(src1Val); Assert(src1Val->GetValueInfo()->IsLikelyInt()); Assert(src2Val); Assert(src2Val->GetValueInfo()->IsLikelyInt()); UpdateIntBoundsForGreaterThanOrEqualBranch(src1Val, src2Val); } if (!DoLossyIntTypeSpec()) { return false; } newMin = newMax = 0; opcode = Js::OpCode::BrUnGe_I4; break; case Js::OpCode::BrUnGt_A: // Folding the branch based on bounds will attempt a lossless int32 conversion of the sources if they are not definitely // int already, so require that both sources are likely int for folding. if (DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrUnsignedGreaterThan(instr, true, src1Val, min1, max1, src2Val, min2, max2)) { return true; } if (min1 >= 0 && min2 >= 0) { // Only handle positive values since this is unsigned... // Bounds are tracked only for likely int values. Only likely int values may have bounds that are not the defaults // (INT32_MIN, INT32_MAX), so we're good. Assert(src1Val); Assert(src1Val->GetValueInfo()->IsLikelyInt()); Assert(src2Val); Assert(src2Val->GetValueInfo()->IsLikelyInt()); UpdateIntBoundsForGreaterThanBranch(src1Val, src2Val); } if (!DoLossyIntTypeSpec()) { return false; } newMin = newMax = 0; opcode = Js::OpCode::BrUnGt_I4; break; case Js::OpCode::CmUnLe_A: if (!DoLossyIntTypeSpec()) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmUnLe_I4; needsBoolConv = true; break; case Js::OpCode::CmUnLt_A: if (!DoLossyIntTypeSpec()) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmUnLt_I4; needsBoolConv = true; break; case Js::OpCode::CmUnGe_A: if (!DoLossyIntTypeSpec()) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmUnGe_I4; needsBoolConv = true; break; case Js::OpCode::CmUnGt_A: if (!DoLossyIntTypeSpec()) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmUnGt_I4; needsBoolConv = true; break; case Js::OpCode::Expo_A: { src1Val = src1OriginalVal; src2Val = src2OriginalVal; return this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal); } case Js::OpCode::Div_A: { ValueType specializedValueType = GetDivValueType(instr, src1Val, src2Val, true); if (specializedValueType.IsFloat()) { // Either result is float or 1/x or cst1/cst2 where cst1%cst2 != 0 // Note: We should really constant fold cst1%cst2... src1Val = src1OriginalVal; src2Val = src2OriginalVal; return this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal); } #ifdef _M_ARM if (!AutoSystemInfo::Data.ArmDivAvailable()) { return false; } #endif if (specializedValueType.IsInt()) { if (max2 == 0x80000000 || (min2 == 0 && max2 == 00)) { return false; } if (min1 == 0x80000000 && min2 <= -1 && max2 >= -1) { // Prevent integer overflow, as div by zero or MIN_INT / -1 will throw an exception // Or we know we are dividing by zero (which is weird to have because the profile data // say we got an int) bailOutKind = IR::BailOutOnDivOfMinInt; } src1Lossy = false; // Detect -0 on the sources src2Lossy = false; opcode = Js::OpCode::Div_I4; Assert(!instr->GetSrc1()->IsUnsigned()); bailOutKind |= IR::BailOnDivResultNotInt; if (max2 >= 0 && min2 <= 0) { // Need to check for divide by zero if the denominator range includes 0 bailOutKind |= IR::BailOutOnDivByZero; } if (max1 >= 0 && min1 <= 0) { // Numerator contains 0 so the result contains 0 newMin = 0; newMax = 0; if (min2 < 0) { // Denominator may be negative, so the result could be negative 0 if (instr->ShouldCheckForNegativeZero()) { bailOutKind |= IR::BailOutOnNegativeZero; } else { ignoredNegativeZero = true; } } } else { // Initialize to invalid value, one of the condition below will update it correctly newMin = INT_MAX; newMax = INT_MIN; } // Deal with the positive and negative range separately for both the numerator and the denominator, // and integrate to the overall min and max. // If the result is positive (positive/positive or negative/negative): // The min should be the smallest magnitude numerator (positive_Min1 | negative_Max1) // divided by --------------------------------------------------------------- // largest magnitude denominator (positive_Max2 | negative_Min2) // // The max should be the largest magnitude numerator (positive_Max1 | negative_Max1) // divided by --------------------------------------------------------------- // smallest magnitude denominator (positive_Min2 | negative_Max2) // If the result is negative (positive/negative or positive/negative): // The min should be the largest magnitude numerator (positive_Max1 | negative_Min1) // divided by --------------------------------------------------------------- // smallest magnitude denominator (negative_Max2 | positive_Min2) // // The max should be the smallest magnitude numerator (positive_Min1 | negative_Max1) // divided by --------------------------------------------------------------- // largest magnitude denominator (negative_Min2 | positive_Max2) // Consider: The range can be slightly more precise if we take care of the rounding if (max1 > 0) { // Take only the positive numerator range int32 positive_Min1 = max(1, min1); int32 positive_Max1 = max1; if (max2 > 0) { // Take only the positive denominator range int32 positive_Min2 = max(1, min2); int32 positive_Max2 = max2; // Positive / Positive int32 quadrant1_Min = positive_Min1 <= positive_Max2? 1 : positive_Min1 / positive_Max2; int32 quadrant1_Max = positive_Max1 <= positive_Min2? 1 : positive_Max1 / positive_Min2; Assert(1 <= quadrant1_Min && quadrant1_Min <= quadrant1_Max); // The result should positive newMin = min(newMin, quadrant1_Min); newMax = max(newMax, quadrant1_Max); } if (min2 < 0) { // Take only the negative denominator range int32 negative_Min2 = min2; int32 negative_Max2 = min(-1, max2); // Positive / Negative int32 quadrant2_Min = -positive_Max1 >= negative_Max2? -1 : positive_Max1 / negative_Max2; int32 quadrant2_Max = -positive_Min1 >= negative_Min2? -1 : positive_Min1 / negative_Min2; // The result should negative Assert(quadrant2_Min <= quadrant2_Max && quadrant2_Max <= -1); newMin = min(newMin, quadrant2_Min); newMax = max(newMax, quadrant2_Max); } } if (min1 < 0) { // Take only the native numerator range int32 negative_Min1 = min1; int32 negative_Max1 = min(-1, max1); if (max2 > 0) { // Take only the positive denominator range int32 positive_Min2 = max(1, min2); int32 positive_Max2 = max2; // Negative / Positive int32 quadrant4_Min = negative_Min1 >= -positive_Min2? -1 : negative_Min1 / positive_Min2; int32 quadrant4_Max = negative_Max1 >= -positive_Max2? -1 : negative_Max1 / positive_Max2; // The result should negative Assert(quadrant4_Min <= quadrant4_Max && quadrant4_Max <= -1); newMin = min(newMin, quadrant4_Min); newMax = max(newMax, quadrant4_Max); } if (min2 < 0) { // Take only the negative denominator range int32 negative_Min2 = min2; int32 negative_Max2 = min(-1, max2); int32 quadrant3_Min; int32 quadrant3_Max; // Negative / Negative if (negative_Max1 == 0x80000000 && negative_Min2 == -1) { quadrant3_Min = negative_Max1 >= negative_Min2? 1 : (negative_Max1+1) / negative_Min2; } else { quadrant3_Min = negative_Max1 >= negative_Min2? 1 : negative_Max1 / negative_Min2; } if (negative_Min1 == 0x80000000 && negative_Max2 == -1) { quadrant3_Max = negative_Min1 >= negative_Max2? 1 : (negative_Min1+1) / negative_Max2; } else { quadrant3_Max = negative_Min1 >= negative_Max2? 1 : negative_Min1 / negative_Max2; } // The result should positive Assert(1 <= quadrant3_Min && quadrant3_Min <= quadrant3_Max); newMin = min(newMin, quadrant3_Min); newMax = max(newMax, quadrant3_Max); } } Assert(newMin <= newMax); // Continue to int type spec break; } } // fall-through default: { const bool involesLargeInt32 = (src1Val && src1Val->GetValueInfo()->IsLikelyUntaggedInt()) || (src2Val && src2Val->GetValueInfo()->IsLikelyUntaggedInt()); const auto trySpecializeToFloat = [&](const bool mayOverflow) -> bool { // It has been determined that this instruction cannot be int-specialized. Need to determine whether to attempt // to float-specialize the instruction, or leave it unspecialized. if((involesLargeInt32 #if INT32VAR && mayOverflow #endif ) || (instr->m_opcode == Js::OpCode::Mul_A && !this->DoAggressiveMulIntTypeSpec()) ) { // An input range is completely outside the range of an int31 and the operation is likely to overflow. // Additionally, on 32-bit platforms, the value is untaggable and will be a JavascriptNumber, which is // significantly slower to use in an unspecialized operation compared to a tagged int. So, try to // float-specialize the instruction. src1Val = src1OriginalVal; src2Val = src2OriginalVal; return TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal); } return false; }; if (instr->m_opcode != Js::OpCode::ArgOut_A_InlineBuiltIn) { if ((src1Val && src1Val->GetValueInfo()->IsLikelyFloat()) || (src2Val && src2Val->GetValueInfo()->IsLikelyFloat())) { // Try to type specialize to float src1Val = src1OriginalVal; src2Val = src2OriginalVal; return this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal); } if (src1Val == nullptr || src2Val == nullptr || !src1Val->GetValueInfo()->IsLikelyInt() || !src2Val->GetValueInfo()->IsLikelyInt() || ( !DoAggressiveIntTypeSpec() && ( !(src1Val->GetValueInfo()->IsInt() || CurrentBlockData()->IsSwitchInt32TypeSpecialized(instr)) || !src2Val->GetValueInfo()->IsInt() ) ) || (instr->GetSrc1()->IsRegOpnd() && instr->GetSrc1()->AsRegOpnd()->m_sym->m_isNotInt) || (instr->GetSrc2()->IsRegOpnd() && instr->GetSrc2()->AsRegOpnd()->m_sym->m_isNotInt)) { return trySpecializeToFloat(true); } } // Try to type specialize to int32 // If one of the values is a float constant with a value that fits in a uint32 but not an int32, // and the instruction can ignore int overflow, the source value for the purposes of int specialization // would have been changed to an int constant value by ignoring overflow. But, the conversion is still lossy. if (!(src1OriginalVal && src1OriginalVal->GetValueInfo()->IsFloatConstant() && src1Val && src1Val->GetValueInfo()->HasIntConstantValue())) { src1Lossy = false; } if (!(src2OriginalVal && src2OriginalVal->GetValueInfo()->IsFloatConstant() && src2Val && src2Val->GetValueInfo()->HasIntConstantValue())) { src2Lossy = false; } switch(instr->m_opcode) { case Js::OpCode::ArgOut_A_InlineBuiltIn: // If the src is already type-specialized, if we don't type-specialize ArgOut_A_InlineBuiltIn instr, we'll get additional ToVar. // So, to avoid that, type-specialize the ArgOut_A_InlineBuiltIn instr. // Else we don't need to type-specialize the instr, we are fine with src being Var. if (instr->GetSrc1()->IsRegOpnd()) { StackSym *sym = instr->GetSrc1()->AsRegOpnd()->m_sym; if (CurrentBlockData()->IsInt32TypeSpecialized(sym)) { opcode = instr->m_opcode; skipDst = true; // We should keep dst as is, otherwise the link opnd for next ArgOut/InlineBuiltInStart would be broken. skipSrc2 = true; // src2 is linkOpnd. We don't need to type-specialize it. newMin = min1; newMax = max1; // Values don't matter, these are unused. goto LOutsideSwitch; // Continue to int-type-specialize. } else if (CurrentBlockData()->IsFloat64TypeSpecialized(sym)) { src1Val = src1OriginalVal; src2Val = src2OriginalVal; return this->TypeSpecializeFloatBinary(instr, src1Val, src2Val, pDstVal); } #ifdef ENABLE_SIMDJS else if (CurrentBlockData()->IsSimd128F4TypeSpecialized(sym)) { // SIMD_JS // We should be already using the SIMD type-spec sym. See TypeSpecializeSimd128. Assert(IRType_IsSimd128(instr->GetSrc1()->GetType())); } #endif } return false; case Js::OpCode::Add_A: do // while(false) { const auto CannotOverflowBasedOnRelativeBounds = [&](int32 *const constantValueRef) { Assert(constantValueRef); if(min2 == max2 && src1Val->GetValueInfo()->IsIntBounded() && src1Val->GetValueInfo()->AsIntBounded()->Bounds()->AddCannotOverflowBasedOnRelativeBounds(min2)) { *constantValueRef = min2; return true; } else if( min1 == max1 && src2Val->GetValueInfo()->IsIntBounded() && src2Val->GetValueInfo()->AsIntBounded()->Bounds()->AddCannotOverflowBasedOnRelativeBounds(min1)) { *constantValueRef = min1; return true; } return false; }; if (Int32Math::Add(min1, min2, &newMin)) { int32 constantSrcValue; if(CannotOverflowBasedOnRelativeBounds(&constantSrcValue)) { newMin = constantSrcValue >= 0 ? INT32_MAX : INT32_MIN; } else if(instr->ShouldCheckForIntOverflow()) { if(involesLargeInt32 || !DoAggressiveIntTypeSpec()) { // May overflow return trySpecializeToFloat(true); } bailOutKind |= IR::BailOutOnOverflow; newMin = min1 < 0 ? INT32_MIN : INT32_MAX; } else { // When ignoring overflow, the range needs to account for overflow. For any Add or Sub, since // overflow causes the value to wrap around, and we don't have a way to specify a lower and upper // range of ints, we use the full range of int32s. ignoredIntOverflow = true; newMin = INT32_MIN; newMax = INT32_MAX; break; } } if (Int32Math::Add(max1, max2, &newMax)) { int32 constantSrcValue; if(CannotOverflowBasedOnRelativeBounds(&constantSrcValue)) { newMax = constantSrcValue >= 0 ? INT32_MAX : INT32_MIN; } else if(instr->ShouldCheckForIntOverflow()) { if(involesLargeInt32 || !DoAggressiveIntTypeSpec()) { // May overflow return trySpecializeToFloat(true); } bailOutKind |= IR::BailOutOnOverflow; newMax = max1 < 0 ? INT32_MIN : INT32_MAX; } else { // See comment about ignoring overflow above ignoredIntOverflow = true; newMin = INT32_MIN; newMax = INT32_MAX; break; } } if(bailOutKind & IR::BailOutOnOverflow) { Assert(bailOutKind == IR::BailOutOnOverflow); Assert(instr->ShouldCheckForIntOverflow()); int32 temp; if(Int32Math::Add( Int32Math::NearestInRangeTo(0, min1, max1), Int32Math::NearestInRangeTo(0, min2, max2), &temp)) { // Always overflows return trySpecializeToFloat(true); } } } while(false); if (!this->IsLoopPrePass() && newMin == newMax && bailOutKind == IR::BailOutInvalid) { // Take care of Add with zero here, since we know we're dealing with 2 numbers. this->CaptureByteCodeSymUses(instr); IR::Opnd *src; bool isAddZero = true; int32 intConstantValue; if (src1Val->GetValueInfo()->TryGetIntConstantValue(&intConstantValue) && intConstantValue == 0) { src = instr->UnlinkSrc2(); instr->FreeSrc1(); } else if (src2Val->GetValueInfo()->TryGetIntConstantValue(&intConstantValue) && intConstantValue == 0) { src = instr->UnlinkSrc1(); instr->FreeSrc2(); } else { // This should have been handled by const folding, unless: // - A source's value was substituted with a different value here, which is after const folding happened // - A value is not definitely int, but once converted to definite int, it would be zero due to a // condition in the source code such as if(a === 0). Ideally, we would specialize the sources and // remove the add, but doesn't seem too important for now. Assert( !DoConstFold() || src1Val != src1OriginalVal || src2Val != src2OriginalVal || !src1Val->GetValueInfo()->IsInt() || !src2Val->GetValueInfo()->IsInt()); isAddZero = false; src = nullptr; } if (isAddZero) { IR::Instr *newInstr = IR::Instr::New(Js::OpCode::Ld_A, instr->UnlinkDst(), src, instr->m_func); newInstr->SetByteCodeOffset(instr); instr->m_opcode = Js::OpCode::Nop; this->currentBlock->InsertInstrAfter(newInstr, instr); return true; } } if(!ignoredIntOverflow) { if(min2 == max2 && (!IsLoopPrePass() || IsPrepassSrcValueInfoPrecise(instr->GetSrc2(), src2Val)) && instr->GetSrc1()->IsRegOpnd()) { addSubConstantInfo.Set(instr->GetSrc1()->AsRegOpnd()->m_sym, src1Val, min1 == max1, min2); } else if( min1 == max1 && (!IsLoopPrePass() || IsPrepassSrcValueInfoPrecise(instr->GetSrc1(), src1Val)) && instr->GetSrc2()->IsRegOpnd()) { addSubConstantInfo.Set(instr->GetSrc2()->AsRegOpnd()->m_sym, src2Val, min2 == max2, min1); } } opcode = Js::OpCode::Add_I4; break; case Js::OpCode::Sub_A: do // while(false) { const auto CannotOverflowBasedOnRelativeBounds = [&]() { return min2 == max2 && src1Val->GetValueInfo()->IsIntBounded() && src1Val->GetValueInfo()->AsIntBounded()->Bounds()->SubCannotOverflowBasedOnRelativeBounds(min2); }; if (Int32Math::Sub(min1, max2, &newMin)) { if(CannotOverflowBasedOnRelativeBounds()) { Assert(min2 == max2); newMin = min2 >= 0 ? INT32_MIN : INT32_MAX; } else if(instr->ShouldCheckForIntOverflow()) { if(involesLargeInt32 || !DoAggressiveIntTypeSpec()) { // May overflow return trySpecializeToFloat(true); } bailOutKind |= IR::BailOutOnOverflow; newMin = min1 < 0 ? INT32_MIN : INT32_MAX; } else { // When ignoring overflow, the range needs to account for overflow. For any Add or Sub, since overflow // causes the value to wrap around, and we don't have a way to specify a lower and upper range of ints, // we use the full range of int32s. ignoredIntOverflow = true; newMin = INT32_MIN; newMax = INT32_MAX; break; } } if (Int32Math::Sub(max1, min2, &newMax)) { if(CannotOverflowBasedOnRelativeBounds()) { Assert(min2 == max2); newMax = min2 >= 0 ? INT32_MIN: INT32_MAX; } else if(instr->ShouldCheckForIntOverflow()) { if(involesLargeInt32 || !DoAggressiveIntTypeSpec()) { // May overflow return trySpecializeToFloat(true); } bailOutKind |= IR::BailOutOnOverflow; newMax = max1 < 0 ? INT32_MIN : INT32_MAX; } else { // See comment about ignoring overflow above ignoredIntOverflow = true; newMin = INT32_MIN; newMax = INT32_MAX; break; } } if(bailOutKind & IR::BailOutOnOverflow) { Assert(bailOutKind == IR::BailOutOnOverflow); Assert(instr->ShouldCheckForIntOverflow()); int32 temp; if(Int32Math::Sub( Int32Math::NearestInRangeTo(-1, min1, max1), Int32Math::NearestInRangeTo(0, min2, max2), &temp)) { // Always overflows return trySpecializeToFloat(true); } } } while(false); if(!ignoredIntOverflow && min2 == max2 && min2 != INT32_MIN && (!IsLoopPrePass() || IsPrepassSrcValueInfoPrecise(instr->GetSrc2(), src2Val)) && instr->GetSrc1()->IsRegOpnd()) { addSubConstantInfo.Set(instr->GetSrc1()->AsRegOpnd()->m_sym, src1Val, min1 == max1, -min2); } opcode = Js::OpCode::Sub_I4; break; case Js::OpCode::Mul_A: { if (Int32Math::Mul(min1, min2, &newMin)) { if (involesLargeInt32 || !DoAggressiveMulIntTypeSpec() || !DoAggressiveIntTypeSpec()) { // May overflow return trySpecializeToFloat(true); } bailOutKind |= IR::BailOutOnMulOverflow; newMin = (min1 < 0) ^ (min2 < 0) ? INT32_MIN : INT32_MAX; } newMax = newMin; if (Int32Math::Mul(max1, max2, &tmp)) { if (involesLargeInt32 || !DoAggressiveMulIntTypeSpec() || !DoAggressiveIntTypeSpec()) { // May overflow return trySpecializeToFloat(true); } bailOutKind |= IR::BailOutOnMulOverflow; tmp = (max1 < 0) ^ (max2 < 0) ? INT32_MIN : INT32_MAX; } newMin = min(newMin, tmp); newMax = max(newMax, tmp); if (Int32Math::Mul(min1, max2, &tmp)) { if (involesLargeInt32 || !DoAggressiveMulIntTypeSpec() || !DoAggressiveIntTypeSpec()) { // May overflow return trySpecializeToFloat(true); } bailOutKind |= IR::BailOutOnMulOverflow; tmp = (min1 < 0) ^ (max2 < 0) ? INT32_MIN : INT32_MAX; } newMin = min(newMin, tmp); newMax = max(newMax, tmp); if (Int32Math::Mul(max1, min2, &tmp)) { if (involesLargeInt32 || !DoAggressiveMulIntTypeSpec() || !DoAggressiveIntTypeSpec()) { // May overflow return trySpecializeToFloat(true); } bailOutKind |= IR::BailOutOnMulOverflow; tmp = (max1 < 0) ^ (min2 < 0) ? INT32_MIN : INT32_MAX; } newMin = min(newMin, tmp); newMax = max(newMax, tmp); if (bailOutKind & IR::BailOutOnMulOverflow) { // CSE only if two MULs have the same overflow check behavior. // Currently this is set to be ignore int32 overflow, but not 53-bit, or int32 overflow matters. if (!instr->ShouldCheckFor32BitOverflow() && instr->ShouldCheckForNon32BitOverflow()) { // If we allow int to overflow then there can be anything in the resulting int newMin = IntConstMin; newMax = IntConstMax; ignoredIntOverflow = true; } int32 temp, overflowValue; if (Int32Math::Mul( Int32Math::NearestInRangeTo(0, min1, max1), Int32Math::NearestInRangeTo(0, min2, max2), &temp, &overflowValue)) { Assert(instr->ignoreOverflowBitCount >= 32); int overflowMatters = 64 - instr->ignoreOverflowBitCount; if (!ignoredIntOverflow || // Use shift to check high bits in case its negative ((overflowValue << overflowMatters) >> overflowMatters) != overflowValue ) { // Always overflows return trySpecializeToFloat(true); } } } if (newMin <= 0 && newMax >= 0 && // New range crosses zero (min1 < 0 || min2 < 0) && // An operand's range contains a negative integer !(min1 > 0 || min2 > 0) && // Neither operand's range contains only positive integers !instr->GetSrc1()->IsEqual(instr->GetSrc2())) // The operands don't have the same value { if (instr->ShouldCheckForNegativeZero()) { // -0 matters since the sym is not a local, or is used in a way in which -0 would differ from +0 if (!DoAggressiveIntTypeSpec()) { // May result in -0 return trySpecializeToFloat(false); } if (((min1 == 0 && max1 == 0) || (min2 == 0 && max2 == 0)) && (max1 < 0 || max2 < 0)) { // Always results in -0 return trySpecializeToFloat(false); } bailOutKind |= IR::BailOutOnNegativeZero; } else { ignoredNegativeZero = true; } } opcode = Js::OpCode::Mul_I4; break; } case Js::OpCode::Rem_A: { IR::Opnd* src2 = instr->GetSrc2(); if (!this->IsLoopPrePass() && min2 == max2 && min1 >= 0) { int32 value = min2; if (value == (1 << Math::Log2(value)) && src2->IsAddrOpnd()) { Assert(src2->AsAddrOpnd()->IsVar()); instr->m_opcode = Js::OpCode::And_A; src2->AsAddrOpnd()->SetAddress(Js::TaggedInt::ToVarUnchecked(value - 1), IR::AddrOpndKindConstantVar); *pSrc2Val = GetIntConstantValue(value - 1, instr); src2Val = *pSrc2Val; return this->TypeSpecializeBinary(&instr, pSrc1Val, pSrc2Val, pDstVal, src1OriginalVal, src2Val, redoTypeSpecRef); } } #ifdef _M_ARM if (!AutoSystemInfo::Data.ArmDivAvailable()) { return false; } #endif if (min1 < 0) { // The most negative it can be is min1, unless limited by min2/max2 int32 negMaxAbs2; if (min2 == INT32_MIN) { negMaxAbs2 = INT32_MIN; } else { negMaxAbs2 = -max(abs(min2), abs(max2)) + 1; } newMin = max(min1, negMaxAbs2); } else { newMin = 0; } bool isModByPowerOf2 = (instr->IsProfiledInstr() && instr->m_func->HasProfileInfo() && instr->m_func->GetReadOnlyProfileInfo()->IsModulusOpByPowerOf2(static_cast<Js::ProfileId>(instr->AsProfiledInstr()->u.profileId))); if(isModByPowerOf2) { Assert(bailOutKind == IR::BailOutInvalid); bailOutKind = IR::BailOnModByPowerOf2; newMin = 0; } else { if (min2 <= 0 && max2 >= 0) { // Consider: We could handle the zero case with a check and bailout... return false; } if (min1 == 0x80000000 && (min2 <= -1 && max2 >= -1)) { // Prevent integer overflow, as div by zero or MIN_INT / -1 will throw an exception return false; } if (min1 < 0) { if(instr->ShouldCheckForNegativeZero()) { if (!DoAggressiveIntTypeSpec()) { return false; } bailOutKind |= IR::BailOutOnNegativeZero; } else { ignoredNegativeZero = true; } } } { int32 absMax2; if (min2 == INT32_MIN) { // abs(INT32_MIN) == INT32_MAX because of overflow absMax2 = INT32_MAX; } else { absMax2 = max(abs(min2), abs(max2)) - 1; } newMax = min(absMax2, max(max1, 0)); newMax = max(newMin, newMax); } opcode = Js::OpCode::Rem_I4; Assert(!instr->GetSrc1()->IsUnsigned()); break; } case Js::OpCode::CmEq_A: case Js::OpCode::CmSrEq_A: if (!IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val)) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmEq_I4; needsBoolConv = true; break; case Js::OpCode::CmNeq_A: case Js::OpCode::CmSrNeq_A: if (!IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val)) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmNeq_I4; needsBoolConv = true; break; case Js::OpCode::CmLe_A: if (!IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val)) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmLe_I4; needsBoolConv = true; break; case Js::OpCode::CmLt_A: if (!IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val)) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmLt_I4; needsBoolConv = true; break; case Js::OpCode::CmGe_A: if (!IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val)) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmGe_I4; needsBoolConv = true; break; case Js::OpCode::CmGt_A: if (!IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val)) { return false; } newMin = 0; newMax = 1; opcode = Js::OpCode::CmGt_I4; needsBoolConv = true; break; case Js::OpCode::BrSrEq_A: case Js::OpCode::BrEq_A: case Js::OpCode::BrNotNeq_A: case Js::OpCode::BrSrNotNeq_A: { if(DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrEqual(instr, true, src1Val, min1, max1, src2Val, min2, max2)) { return true; } const bool specialize = IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val); UpdateIntBoundsForEqualBranch(src1Val, src2Val); if(!specialize) { return false; } opcode = Js::OpCode::BrEq_I4; // We'll get a warning if we don't assign a value to these... // We'll assert if we use them and make a range where min > max newMin = 2; newMax = 1; break; } case Js::OpCode::BrSrNeq_A: case Js::OpCode::BrNeq_A: case Js::OpCode::BrSrNotEq_A: case Js::OpCode::BrNotEq_A: { if(DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrEqual(instr, false, src1Val, min1, max1, src2Val, min2, max2)) { return true; } const bool specialize = IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val); UpdateIntBoundsForNotEqualBranch(src1Val, src2Val); if(!specialize) { return false; } opcode = Js::OpCode::BrNeq_I4; // We'll get a warning if we don't assign a value to these... // We'll assert if we use them and make a range where min > max newMin = 2; newMax = 1; break; } case Js::OpCode::BrGt_A: case Js::OpCode::BrNotLe_A: { if(DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrGreaterThan(instr, true, src1Val, min1, max1, src2Val, min2, max2)) { return true; } const bool specialize = IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val); UpdateIntBoundsForGreaterThanBranch(src1Val, src2Val); if(!specialize) { return false; } opcode = Js::OpCode::BrGt_I4; // We'll get a warning if we don't assign a value to these... // We'll assert if we use them and make a range where min > max newMin = 2; newMax = 1; break; } case Js::OpCode::BrGe_A: case Js::OpCode::BrNotLt_A: { if(DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrGreaterThanOrEqual(instr, true, src1Val, min1, max1, src2Val, min2, max2)) { return true; } const bool specialize = IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val); UpdateIntBoundsForGreaterThanOrEqualBranch(src1Val, src2Val); if(!specialize) { return false; } opcode = Js::OpCode::BrGe_I4; // We'll get a warning if we don't assign a value to these... // We'll assert if we use them and make a range where min > max newMin = 2; newMax = 1; break; } case Js::OpCode::BrLt_A: case Js::OpCode::BrNotGe_A: { if(DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrGreaterThanOrEqual(instr, false, src1Val, min1, max1, src2Val, min2, max2)) { return true; } const bool specialize = IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val); UpdateIntBoundsForLessThanBranch(src1Val, src2Val); if(!specialize) { return false; } opcode = Js::OpCode::BrLt_I4; // We'll get a warning if we don't assign a value to these... // We'll assert if we use them and make a range where min > max newMin = 2; newMax = 1; break; } case Js::OpCode::BrLe_A: case Js::OpCode::BrNotGt_A: { if(DoConstFold() && !IsLoopPrePass() && TryOptConstFoldBrGreaterThan(instr, false, src1Val, min1, max1, src2Val, min2, max2)) { return true; } const bool specialize = IsWorthSpecializingToInt32Branch(instr, src1Val, src2Val); UpdateIntBoundsForLessThanOrEqualBranch(src1Val, src2Val); if(!specialize) { return false; } opcode = Js::OpCode::BrLe_I4; // We'll get a warning if we don't assign a value to these... // We'll assert if we use them and make a range where min > max newMin = 2; newMax = 1; break; } default: return false; } // If this instruction is in a range of instructions where int overflow does not matter, we will still specialize it // (won't leave it unspecialized based on heuristics), since it is most likely worth specializing, and the dst value // needs to be guaranteed to be an int if(!ignoredIntOverflow && !ignoredNegativeZero && !needsBoolConv && instr->ShouldCheckForIntOverflow() && !IsWorthSpecializingToInt32(instr, src1Val, src2Val)) { // Even though type specialization is being skipped since it may not be worth it, the proper value should still be // maintained so that the result may be type specialized later. An int value is not created for the dst in any of // the following cases. // - A bailout check is necessary to specialize this instruction. The bailout check is what guarantees the result to // be an int, but since we're not going to specialize this instruction, there won't be a bailout check. // - Aggressive int type specialization is disabled and we're in a loop prepass. We're conservative on dst values in // that case, especially if the dst sym is live on the back-edge. if(bailOutKind == IR::BailOutInvalid && instr->GetDst() && src1Val->GetValueInfo()->IsInt() && src2Val->GetValueInfo()->IsInt() && (DoAggressiveIntTypeSpec() || !this->IsLoopPrePass())) { *pDstVal = CreateDstUntransferredIntValue(newMin, newMax, instr, src1Val, src2Val); } return false; } } // case default } // switch LOutsideSwitch: this->ignoredIntOverflowForCurrentInstr = ignoredIntOverflow; this->ignoredNegativeZeroForCurrentInstr = ignoredNegativeZero; { // Try CSE again before modifying the IR, in case some attributes are required for successful CSE Value *src1IndirIndexVal = nullptr; if(CSEOptimize(currentBlock, &instr, &src1Val, &src2Val, &src1IndirIndexVal, true /* intMathExprOnly */)) { *redoTypeSpecRef = true; return false; } } const Js::OpCode originalOpCode = instr->m_opcode; if (!this->IsLoopPrePass()) { // No re-write on prepass instr->m_opcode = opcode; } Value *src1ValueToSpecialize = src1Val, *src2ValueToSpecialize = src2Val; // Lossy conversions to int32 must be done based on the original source values. For instance, if one of the values is a // float constant with a value that fits in a uint32 but not an int32, and the instruction can ignore int overflow, the // source value for the purposes of int specialization would have been changed to an int constant value by ignoring // overflow. If we were to specialize the sym using the int constant value, it would be treated as a lossless // conversion, but since there may be subsequent uses of the same float constant value that may not ignore overflow, // this must be treated as a lossy conversion by specializing the sym using the original float constant value. if(src1Lossy) { src1ValueToSpecialize = src1OriginalVal; } if (src2Lossy) { src2ValueToSpecialize = src2OriginalVal; } // Make sure the srcs are specialized IR::Opnd* src1 = instr->GetSrc1(); this->ToInt32(instr, src1, this->currentBlock, src1ValueToSpecialize, nullptr, src1Lossy); if (!skipSrc2) { IR::Opnd* src2 = instr->GetSrc2(); this->ToInt32(instr, src2, this->currentBlock, src2ValueToSpecialize, nullptr, src2Lossy); } if(bailOutKind != IR::BailOutInvalid && !this->IsLoopPrePass()) { GenerateBailAtOperation(&instr, bailOutKind); } if (!skipDst && instr->GetDst()) { if (needsBoolConv) { IR::RegOpnd *varDst; if (this->IsLoopPrePass()) { varDst = instr->GetDst()->AsRegOpnd(); this->ToVarRegOpnd(varDst, this->currentBlock); } else { // Generate: // t1.i = CmCC t2.i, t3.i // t1.v = Conv_bool t1.i // // If the only uses of t1 are ints, the conv_bool will get dead-stored TypeSpecializeIntDst(instr, originalOpCode, nullptr, src1Val, src2Val, bailOutKind, newMin, newMax, pDstVal); IR::RegOpnd *intDst = instr->GetDst()->AsRegOpnd(); intDst->SetIsJITOptimizedReg(true); varDst = IR::RegOpnd::New(intDst->m_sym->GetVarEquivSym(this->func), TyVar, this->func); IR::Instr *convBoolInstr = IR::Instr::New(Js::OpCode::Conv_Bool, varDst, intDst, this->func); // In some cases (e.g. unsigned compare peep code), a comparison will use variables // other than the ones initially intended for it, if we can determine that we would // arrive at the same result. This means that we get a ByteCodeUses operation after // the actual comparison. Since Inserting the Conv_bool just after the compare, and // just before the ByteCodeUses, would cause issues later on with register lifetime // calculation, we want to insert the Conv_bool after the whole compare instruction // block. IR::Instr *putAfter = instr; while (putAfter->m_next && putAfter->m_next->m_opcode == Js::OpCode::ByteCodeUses) { putAfter = putAfter->m_next; } putAfter->InsertAfter(convBoolInstr); convBoolInstr->SetByteCodeOffset(instr); this->ToVarRegOpnd(varDst, this->currentBlock); CurrentBlockData()->liveInt32Syms->Set(varDst->m_sym->m_id); CurrentBlockData()->liveLossyInt32Syms->Set(varDst->m_sym->m_id); } *pDstVal = this->NewGenericValue(ValueType::Boolean, varDst); } else { TypeSpecializeIntDst( instr, originalOpCode, nullptr, src1Val, src2Val, bailOutKind, newMin, newMax, pDstVal, addSubConstantInfo.HasInfo() ? &addSubConstantInfo : nullptr); } } if(bailOutKind == IR::BailOutInvalid) { GOPT_TRACE(_u("Type specialized to INT\n")); #if ENABLE_DEBUG_CONFIG_OPTIONS if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::AggressiveIntTypeSpecPhase)) { Output::Print(_u("Type specialized to INT: ")); Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); } #endif } else { GOPT_TRACE(_u("Type specialized to INT with bailout on:\n")); if(bailOutKind & (IR::BailOutOnOverflow | IR::BailOutOnMulOverflow) ) { GOPT_TRACE(_u(" Overflow\n")); #if ENABLE_DEBUG_CONFIG_OPTIONS if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::AggressiveIntTypeSpecPhase)) { Output::Print(_u("Type specialized to INT with bailout (%S): "), "Overflow"); Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); } #endif } if(bailOutKind & IR::BailOutOnNegativeZero) { GOPT_TRACE(_u(" Zero\n")); #if ENABLE_DEBUG_CONFIG_OPTIONS if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::AggressiveIntTypeSpecPhase)) { Output::Print(_u("Type specialized to INT with bailout (%S): "), "Zero"); Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); } #endif } } return true; } bool GlobOpt::IsWorthSpecializingToInt32Branch(IR::Instr const * instr, Value const * src1Val, Value const * src2Val) const { if (!src1Val->GetValueInfo()->HasIntConstantValue() && instr->GetSrc1()->IsRegOpnd()) { StackSym const *sym1 = instr->GetSrc1()->AsRegOpnd()->m_sym; if (CurrentBlockData()->IsInt32TypeSpecialized(sym1) == false) { if (!src2Val->GetValueInfo()->HasIntConstantValue() && instr->GetSrc2()->IsRegOpnd()) { StackSym const *sym2 = instr->GetSrc2()->AsRegOpnd()->m_sym; if (CurrentBlockData()->IsInt32TypeSpecialized(sym2) == false) { // Type specializing a Br itself isn't worth it, unless one src // is already type specialized return false; } } } } return true; } bool GlobOpt::TryOptConstFoldBrFalse( IR::Instr *const instr, Value *const srcValue, const int32 min, const int32 max) { Assert(instr); Assert(instr->m_opcode == Js::OpCode::BrFalse_A || instr->m_opcode == Js::OpCode::BrTrue_A); Assert(srcValue); if(!(DoAggressiveIntTypeSpec() ? srcValue->GetValueInfo()->IsLikelyInt() : srcValue->GetValueInfo()->IsInt())) { return false; } if(ValueInfo::IsEqualTo(srcValue, min, max, nullptr, 0, 0)) { OptConstFoldBr(instr->m_opcode == Js::OpCode::BrFalse_A, instr, srcValue); return true; } if(ValueInfo::IsNotEqualTo(srcValue, min, max, nullptr, 0, 0)) { OptConstFoldBr(instr->m_opcode == Js::OpCode::BrTrue_A, instr, srcValue); return true; } return false; } bool GlobOpt::TryOptConstFoldBrEqual( IR::Instr *const instr, const bool branchOnEqual, Value *const src1Value, const int32 min1, const int32 max1, Value *const src2Value, const int32 min2, const int32 max2) { Assert(instr); Assert(src1Value); Assert(DoAggressiveIntTypeSpec() ? src1Value->GetValueInfo()->IsLikelyInt() : src1Value->GetValueInfo()->IsInt()); Assert(src2Value); Assert(DoAggressiveIntTypeSpec() ? src2Value->GetValueInfo()->IsLikelyInt() : src2Value->GetValueInfo()->IsInt()); if(ValueInfo::IsEqualTo(src1Value, min1, max1, src2Value, min2, max2)) { OptConstFoldBr(branchOnEqual, instr, src1Value, src2Value); return true; } if(ValueInfo::IsNotEqualTo(src1Value, min1, max1, src2Value, min2, max2)) { OptConstFoldBr(!branchOnEqual, instr, src1Value, src2Value); return true; } return false; } bool GlobOpt::TryOptConstFoldBrGreaterThan( IR::Instr *const instr, const bool branchOnGreaterThan, Value *const src1Value, const int32 min1, const int32 max1, Value *const src2Value, const int32 min2, const int32 max2) { Assert(instr); Assert(src1Value); Assert(DoAggressiveIntTypeSpec() ? src1Value->GetValueInfo()->IsLikelyInt() : src1Value->GetValueInfo()->IsInt()); Assert(src2Value); Assert(DoAggressiveIntTypeSpec() ? src2Value->GetValueInfo()->IsLikelyInt() : src2Value->GetValueInfo()->IsInt()); if(ValueInfo::IsGreaterThan(src1Value, min1, max1, src2Value, min2, max2)) { OptConstFoldBr(branchOnGreaterThan, instr, src1Value, src2Value); return true; } if(ValueInfo::IsLessThanOrEqualTo(src1Value, min1, max1, src2Value, min2, max2)) { OptConstFoldBr(!branchOnGreaterThan, instr, src1Value, src2Value); return true; } return false; } bool GlobOpt::TryOptConstFoldBrGreaterThanOrEqual( IR::Instr *const instr, const bool branchOnGreaterThanOrEqual, Value *const src1Value, const int32 min1, const int32 max1, Value *const src2Value, const int32 min2, const int32 max2) { Assert(instr); Assert(src1Value); Assert(DoAggressiveIntTypeSpec() ? src1Value->GetValueInfo()->IsLikelyInt() : src1Value->GetValueInfo()->IsInt()); Assert(src2Value); Assert(DoAggressiveIntTypeSpec() ? src2Value->GetValueInfo()->IsLikelyInt() : src2Value->GetValueInfo()->IsInt()); if(ValueInfo::IsGreaterThanOrEqualTo(src1Value, min1, max1, src2Value, min2, max2)) { OptConstFoldBr(branchOnGreaterThanOrEqual, instr, src1Value, src2Value); return true; } if(ValueInfo::IsLessThan(src1Value, min1, max1, src2Value, min2, max2)) { OptConstFoldBr(!branchOnGreaterThanOrEqual, instr, src1Value, src2Value); return true; } return false; } bool GlobOpt::TryOptConstFoldBrUnsignedLessThan( IR::Instr *const instr, const bool branchOnLessThan, Value *const src1Value, const int32 min1, const int32 max1, Value *const src2Value, const int32 min2, const int32 max2) { Assert(DoConstFold()); Assert(!IsLoopPrePass()); if(!src1Value || !src2Value || !( DoAggressiveIntTypeSpec() ? src1Value->GetValueInfo()->IsLikelyInt() && src2Value->GetValueInfo()->IsLikelyInt() : src1Value->GetValueInfo()->IsInt() && src2Value->GetValueInfo()->IsInt() )) { return false; } uint uMin1 = (min1 < 0 ? (max1 < 0 ? min((uint)min1, (uint)max1) : 0) : min1); uint uMax1 = max((uint)min1, (uint)max1); uint uMin2 = (min2 < 0 ? (max2 < 0 ? min((uint)min2, (uint)max2) : 0) : min2); uint uMax2 = max((uint)min2, (uint)max2); if (uMax1 < uMin2) { // Range 1 is always lesser than Range 2 OptConstFoldBr(branchOnLessThan, instr, src1Value, src2Value); return true; } if (uMin1 >= uMax2) { // Range 2 is always lesser than Range 1 OptConstFoldBr(!branchOnLessThan, instr, src1Value, src2Value); return true; } return false; } bool GlobOpt::TryOptConstFoldBrUnsignedGreaterThan( IR::Instr *const instr, const bool branchOnGreaterThan, Value *const src1Value, const int32 min1, const int32 max1, Value *const src2Value, const int32 min2, const int32 max2) { Assert(DoConstFold()); Assert(!IsLoopPrePass()); if(!src1Value || !src2Value || !( DoAggressiveIntTypeSpec() ? src1Value->GetValueInfo()->IsLikelyInt() && src2Value->GetValueInfo()->IsLikelyInt() : src1Value->GetValueInfo()->IsInt() && src2Value->GetValueInfo()->IsInt() )) { return false; } uint uMin1 = (min1 < 0 ? (max1 < 0 ? min((uint)min1, (uint)max1) : 0) : min1); uint uMax1 = max((uint)min1, (uint)max1); uint uMin2 = (min2 < 0 ? (max2 < 0 ? min((uint)min2, (uint)max2) : 0) : min2); uint uMax2 = max((uint)min2, (uint)max2); if (uMin1 > uMax2) { // Range 1 is always greater than Range 2 OptConstFoldBr(branchOnGreaterThan, instr, src1Value, src2Value); return true; } if (uMax1 <= uMin2) { // Range 2 is always greater than Range 1 OptConstFoldBr(!branchOnGreaterThan, instr, src1Value, src2Value); return true; } return false; } void GlobOpt::SetPathDependentInfo(const bool conditionToBranch, const PathDependentInfo &info) { Assert(this->currentBlock->GetSuccList()->Count() == 2); IR::Instr * fallthrough = this->currentBlock->GetNext()->GetFirstInstr(); FOREACH_SLISTBASECOUNTED_ENTRY(FlowEdge*, edge, this->currentBlock->GetSuccList()) { if (conditionToBranch == (edge->GetSucc()->GetFirstInstr() != fallthrough)) { edge->SetPathDependentInfo(info, alloc); return; } } NEXT_SLISTBASECOUNTED_ENTRY; Assert(false); } PathDependentInfoToRestore GlobOpt::UpdatePathDependentInfo(PathDependentInfo *const info) { Assert(info); if(!info->HasInfo()) { return PathDependentInfoToRestore(); } decltype(&GlobOpt::UpdateIntBoundsForEqual) UpdateIntBoundsForLeftValue, UpdateIntBoundsForRightValue; switch(info->Relationship()) { case PathDependentRelationship::Equal: UpdateIntBoundsForLeftValue = &GlobOpt::UpdateIntBoundsForEqual; UpdateIntBoundsForRightValue = &GlobOpt::UpdateIntBoundsForEqual; break; case PathDependentRelationship::NotEqual: UpdateIntBoundsForLeftValue = &GlobOpt::UpdateIntBoundsForNotEqual; UpdateIntBoundsForRightValue = &GlobOpt::UpdateIntBoundsForNotEqual; break; case PathDependentRelationship::GreaterThanOrEqual: UpdateIntBoundsForLeftValue = &GlobOpt::UpdateIntBoundsForGreaterThanOrEqual; UpdateIntBoundsForRightValue = &GlobOpt::UpdateIntBoundsForLessThanOrEqual; break; case PathDependentRelationship::GreaterThan: UpdateIntBoundsForLeftValue = &GlobOpt::UpdateIntBoundsForGreaterThan; UpdateIntBoundsForRightValue = &GlobOpt::UpdateIntBoundsForLessThan; break; case PathDependentRelationship::LessThanOrEqual: UpdateIntBoundsForLeftValue = &GlobOpt::UpdateIntBoundsForLessThanOrEqual; UpdateIntBoundsForRightValue = &GlobOpt::UpdateIntBoundsForGreaterThanOrEqual; break; case PathDependentRelationship::LessThan: UpdateIntBoundsForLeftValue = &GlobOpt::UpdateIntBoundsForLessThan; UpdateIntBoundsForRightValue = &GlobOpt::UpdateIntBoundsForGreaterThan; break; default: Assert(false); __assume(false); } ValueInfo *leftValueInfo = info->LeftValue()->GetValueInfo(); IntConstantBounds leftConstantBounds; AssertVerify(leftValueInfo->TryGetIntConstantBounds(&leftConstantBounds, true)); ValueInfo *rightValueInfo; IntConstantBounds rightConstantBounds; if(info->RightValue()) { rightValueInfo = info->RightValue()->GetValueInfo(); AssertVerify(rightValueInfo->TryGetIntConstantBounds(&rightConstantBounds, true)); } else { rightValueInfo = nullptr; rightConstantBounds = IntConstantBounds(info->RightConstantValue(), info->RightConstantValue()); } ValueInfo *const newLeftValueInfo = (this->*UpdateIntBoundsForLeftValue)( info->LeftValue(), leftConstantBounds, info->RightValue(), rightConstantBounds, true); if(newLeftValueInfo) { ChangeValueInfo(nullptr, info->LeftValue(), newLeftValueInfo); AssertVerify(newLeftValueInfo->TryGetIntConstantBounds(&leftConstantBounds, true)); } else { leftValueInfo = nullptr; } ValueInfo *const newRightValueInfo = (this->*UpdateIntBoundsForRightValue)( info->RightValue(), rightConstantBounds, info->LeftValue(), leftConstantBounds, true); if(newRightValueInfo) { ChangeValueInfo(nullptr, info->RightValue(), newRightValueInfo); } else { rightValueInfo = nullptr; } return PathDependentInfoToRestore(leftValueInfo, rightValueInfo); } void GlobOpt::RestorePathDependentInfo(PathDependentInfo *const info, const PathDependentInfoToRestore infoToRestore) { Assert(info); if(infoToRestore.LeftValueInfo()) { Assert(info->LeftValue()); ChangeValueInfo(nullptr, info->LeftValue(), infoToRestore.LeftValueInfo()); } if(infoToRestore.RightValueInfo()) { Assert(info->RightValue()); ChangeValueInfo(nullptr, info->RightValue(), infoToRestore.RightValueInfo()); } } bool GlobOpt::TypeSpecializeFloatUnary(IR::Instr **pInstr, Value *src1Val, Value **pDstVal, bool skipDst /* = false */) { IR::Instr *&instr = *pInstr; IR::Opnd *src1; IR::Opnd *dst; Js::OpCode opcode = instr->m_opcode; Value *valueToTransfer = nullptr; Assert(src1Val && src1Val->GetValueInfo()->IsLikelyNumber() || OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)); if (!this->DoFloatTypeSpec()) { return false; } // For inline built-ins we need to do type specialization. Check upfront to avoid duplicating same case labels. if (!OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)) { switch (opcode) { case Js::OpCode::ArgOut_A_InlineBuiltIn: skipDst = true; // fall-through case Js::OpCode::Ld_A: case Js::OpCode::BrTrue_A: case Js::OpCode::BrFalse_A: if (instr->GetSrc1()->IsRegOpnd()) { StackSym *sym = instr->GetSrc1()->AsRegOpnd()->m_sym; if (CurrentBlockData()->IsFloat64TypeSpecialized(sym) == false) { // Type specializing an Ld_A isn't worth it, unless the src // is already type specialized return false; } } if (instr->m_opcode == Js::OpCode::Ld_A) { valueToTransfer = src1Val; } break; case Js::OpCode::Neg_A: break; case Js::OpCode::Conv_Num: Assert(src1Val); opcode = Js::OpCode::Ld_A; valueToTransfer = src1Val; if (!src1Val->GetValueInfo()->IsNumber()) { StackSym *sym = instr->GetSrc1()->AsRegOpnd()->m_sym; valueToTransfer = NewGenericValue(ValueType::Float, instr->GetDst()->GetStackSym()); if (CurrentBlockData()->IsFloat64TypeSpecialized(sym) == false) { // Set the dst as a nonDeadStore. We want to keep the Ld_A to prevent the FromVar from // being dead-stored, as it could cause implicit calls. dst = instr->GetDst(); dst->AsRegOpnd()->m_dontDeadStore = true; } } break; case Js::OpCode::StElemI_A: case Js::OpCode::StElemI_A_Strict: case Js::OpCode::StElemC: return TypeSpecializeStElem(pInstr, src1Val, pDstVal); default: return false; } } // Make sure the srcs are specialized src1 = instr->GetSrc1(); // Use original val when calling toFloat64 as this is what we'll use to try hoisting the fromVar if we're in a loop. this->ToFloat64(instr, src1, this->currentBlock, src1Val, nullptr, IR::BailOutPrimitiveButString); if (!skipDst) { dst = instr->GetDst(); if (dst) { this->TypeSpecializeFloatDst(instr, valueToTransfer, src1Val, nullptr, pDstVal); if (!this->IsLoopPrePass()) { instr->m_opcode = opcode; } } } GOPT_TRACE_INSTR(instr, _u("Type specialized to FLOAT: ")); #if ENABLE_DEBUG_CONFIG_OPTIONS if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::FloatTypeSpecPhase)) { Output::Print(_u("Type specialized to FLOAT: ")); Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); } #endif return true; } // Unconditionally type-spec dst to float. void GlobOpt::TypeSpecializeFloatDst(IR::Instr *instr, Value *valToTransfer, Value *const src1Value, Value *const src2Value, Value **pDstVal) { IR::Opnd* dst = instr->GetDst(); Assert(dst); AssertMsg(dst->IsRegOpnd(), "What else?"); this->ToFloat64Dst(instr, dst->AsRegOpnd(), this->currentBlock); if(valToTransfer) { *pDstVal = this->ValueNumberTransferDst(instr, valToTransfer); CurrentBlockData()->InsertNewValue(*pDstVal, dst); } else { *pDstVal = CreateDstUntransferredValue(ValueType::Float, instr, src1Value, src2Value); } } #ifdef ENABLE_SIMDJS void GlobOpt::TypeSpecializeSimd128Dst(IRType type, IR::Instr *instr, Value *valToTransfer, Value *const src1Value, Value **pDstVal) { IR::Opnd* dst = instr->GetDst(); Assert(dst); AssertMsg(dst->IsRegOpnd(), "What else?"); this->ToSimd128Dst(type, instr, dst->AsRegOpnd(), this->currentBlock); if (valToTransfer) { *pDstVal = this->ValueNumberTransferDst(instr, valToTransfer); CurrentBlockData()->InsertNewValue(*pDstVal, dst); } else { *pDstVal = NewGenericValue(GetValueTypeFromIRType(type), instr->GetDst()); } } #endif bool GlobOpt::TypeSpecializeLdLen( IR::Instr * *const instrRef, Value * *const src1ValueRef, Value * *const dstValueRef, bool *const forceInvariantHoistingRef) { Assert(instrRef); IR::Instr *&instr = *instrRef; Assert(instr); Assert(instr->m_opcode == Js::OpCode::LdLen_A); Assert(src1ValueRef); Value *&src1Value = *src1ValueRef; Assert(dstValueRef); Value *&dstValue = *dstValueRef; Assert(forceInvariantHoistingRef); bool &forceInvariantHoisting = *forceInvariantHoistingRef; if(!DoLdLenIntSpec(instr, instr->GetSrc1()->GetValueType())) { return false; } IR::BailOutKind bailOutKind = IR::BailOutOnIrregularLength; if(!IsLoopPrePass()) { IR::RegOpnd *const baseOpnd = instr->GetSrc1()->AsRegOpnd(); if(baseOpnd->IsArrayRegOpnd()) { StackSym *const lengthSym = baseOpnd->AsArrayRegOpnd()->LengthSym(); if(lengthSym) { CaptureByteCodeSymUses(instr); instr->m_opcode = Js::OpCode::Ld_I4; instr->ReplaceSrc1(IR::RegOpnd::New(lengthSym, lengthSym->GetType(), func)); instr->ClearBailOutInfo(); // Find the hoisted length value Value *const lengthValue = CurrentBlockData()->FindValue(lengthSym); Assert(lengthValue); src1Value = lengthValue; ValueInfo *const lengthValueInfo = lengthValue->GetValueInfo(); Assert(lengthValueInfo->GetSymStore() != lengthSym); IntConstantBounds lengthConstantBounds; AssertVerify(lengthValueInfo->TryGetIntConstantBounds(&lengthConstantBounds)); Assert(lengthConstantBounds.LowerBound() >= 0); // Int-specialize, and transfer the value to the dst TypeSpecializeIntDst( instr, Js::OpCode::LdLen_A, src1Value, src1Value, nullptr, bailOutKind, lengthConstantBounds.LowerBound(), lengthConstantBounds.UpperBound(), &dstValue); // Try to force hoisting the Ld_I4 so that the length will have an invariant sym store that can be // copy-propped. Invariant hoisting does not automatically hoist Ld_I4. forceInvariantHoisting = true; return true; } } if (instr->HasBailOutInfo()) { Assert(instr->GetBailOutKind() == IR::BailOutMarkTempObject); bailOutKind = IR::BailOutOnIrregularLength | IR::BailOutMarkTempObject; instr->SetBailOutKind(bailOutKind); } else { Assert(bailOutKind == IR::BailOutOnIrregularLength); GenerateBailAtOperation(&instr, bailOutKind); } } TypeSpecializeIntDst( instr, Js::OpCode::LdLen_A, nullptr, nullptr, nullptr, bailOutKind, 0, INT32_MAX, &dstValue); return true; } bool GlobOpt::TypeSpecializeFloatBinary(IR::Instr *instr, Value *src1Val, Value *src2Val, Value **pDstVal) { IR::Opnd *src1; IR::Opnd *src2; IR::Opnd *dst; bool allowUndefinedOrNullSrc1 = true; bool allowUndefinedOrNullSrc2 = true; bool skipSrc1 = false; bool skipSrc2 = false; bool skipDst = false; if (!this->DoFloatTypeSpec()) { return false; } // For inline built-ins we need to do type specialization. Check upfront to avoid duplicating same case labels. if (!OpCodeAttr::IsInlineBuiltIn(instr->m_opcode)) { switch (instr->m_opcode) { case Js::OpCode::Sub_A: case Js::OpCode::Mul_A: case Js::OpCode::Div_A: case Js::OpCode::Expo_A: // Avoid if one source is known not to be a number. if (src1Val->GetValueInfo()->IsNotNumber() || src2Val->GetValueInfo()->IsNotNumber()) { return false; } break; case Js::OpCode::BrSrEq_A: case Js::OpCode::BrSrNeq_A: case Js::OpCode::BrEq_A: case Js::OpCode::BrNeq_A: case Js::OpCode::BrSrNotEq_A: case Js::OpCode::BrNotEq_A: case Js::OpCode::BrSrNotNeq_A: case Js::OpCode::BrNotNeq_A: // Avoid if one source is known not to be a number. if (src1Val->GetValueInfo()->IsNotNumber() || src2Val->GetValueInfo()->IsNotNumber()) { return false; } // Undef == Undef, but +Undef != +Undef // 0.0 != null, but 0.0 == +null // // So Bailout on anything but numbers for both src1 and src2 allowUndefinedOrNullSrc1 = false; allowUndefinedOrNullSrc2 = false; break; case Js::OpCode::BrGt_A: case Js::OpCode::BrGe_A: case Js::OpCode::BrLt_A: case Js::OpCode::BrLe_A: case Js::OpCode::BrNotGt_A: case Js::OpCode::BrNotGe_A: case Js::OpCode::BrNotLt_A: case Js::OpCode::BrNotLe_A: // Avoid if one source is known not to be a number. if (src1Val->GetValueInfo()->IsNotNumber() || src2Val->GetValueInfo()->IsNotNumber()) { return false; } break; case Js::OpCode::Add_A: // For Add, we need both sources to be Numbers, otherwise it could be a string concat if (!src1Val || !src2Val || !(src1Val->GetValueInfo()->IsLikelyNumber() && src2Val->GetValueInfo()->IsLikelyNumber())) { return false; } break; case Js::OpCode::ArgOut_A_InlineBuiltIn: skipSrc2 = true; skipDst = true; break; default: return false; } } else { switch (instr->m_opcode) { case Js::OpCode::InlineArrayPush: bool isFloatConstMissingItem = src2Val->GetValueInfo()->IsFloatConstant(); if(isFloatConstMissingItem) { FloatConstType floatValue = src2Val->GetValueInfo()->AsFloatConstant()->FloatValue(); isFloatConstMissingItem = Js::SparseArraySegment<double>::IsMissingItem(&floatValue); } // Don't specialize if the element is not likelyNumber - we will surely bailout if(!(src2Val->GetValueInfo()->IsLikelyNumber()) || isFloatConstMissingItem) { return false; } // Only specialize the Second source - element skipSrc1 = true; skipDst = true; allowUndefinedOrNullSrc2 = false; break; } } // Make sure the srcs are specialized if(!skipSrc1) { src1 = instr->GetSrc1(); this->ToFloat64(instr, src1, this->currentBlock, src1Val, nullptr, (allowUndefinedOrNullSrc1 ? IR::BailOutPrimitiveButString : IR::BailOutNumberOnly)); } if (!skipSrc2) { src2 = instr->GetSrc2(); this->ToFloat64(instr, src2, this->currentBlock, src2Val, nullptr, (allowUndefinedOrNullSrc2 ? IR::BailOutPrimitiveButString : IR::BailOutNumberOnly)); } if (!skipDst) { dst = instr->GetDst(); if (dst) { *pDstVal = CreateDstUntransferredValue(ValueType::Float, instr, src1Val, src2Val); AssertMsg(dst->IsRegOpnd(), "What else?"); this->ToFloat64Dst(instr, dst->AsRegOpnd(), this->currentBlock); } } GOPT_TRACE_INSTR(instr, _u("Type specialized to FLOAT: ")); #if ENABLE_DEBUG_CONFIG_OPTIONS if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::FloatTypeSpecPhase)) { Output::Print(_u("Type specialized to FLOAT: ")); Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); } #endif return true; } bool GlobOpt::TypeSpecializeStElem(IR::Instr ** pInstr, Value *src1Val, Value **pDstVal) { IR::Instr *&instr = *pInstr; IR::RegOpnd *baseOpnd = instr->GetDst()->AsIndirOpnd()->GetBaseOpnd(); ValueType baseValueType(baseOpnd->GetValueType()); if (instr->DoStackArgsOpt(this->func) || (!this->DoTypedArrayTypeSpec() && baseValueType.IsLikelyOptimizedTypedArray()) || (!this->DoNativeArrayTypeSpec() && baseValueType.IsLikelyNativeArray()) || !(baseValueType.IsLikelyOptimizedTypedArray() || baseValueType.IsLikelyNativeArray())) { GOPT_TRACE_INSTR(instr, _u("Didn't type specialize array access, because typed array type specialization is disabled, or base is not an optimized typed array.\n")); if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; baseValueType.ToString(baseValueTypeStr); Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, did not specialize because %s.\n"), this->func->GetJITFunctionBody()->GetDisplayName(), this->func->GetDebugNumberSet(debugStringBuffer), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode), baseValueTypeStr, instr->DoStackArgsOpt(this->func) ? _u("instruction uses the arguments object") : _u("typed array type specialization is disabled, or base is not an optimized typed array")); Output::Flush(); } return false; } Assert(instr->GetSrc1()->IsRegOpnd() || (src1Val && src1Val->GetValueInfo()->HasIntConstantValue())); StackSym *sym = instr->GetSrc1()->IsRegOpnd() ? instr->GetSrc1()->AsRegOpnd()->m_sym : nullptr; // Only type specialize the source of store element if the source symbol is already type specialized to int or float. if (sym) { if (baseValueType.IsLikelyNativeArray()) { // Gently coerce these src's into native if it seems likely to work. // Otherwise we can't use the fast path to store. // But don't try to put a float-specialized number into an int array this way. if (!( CurrentBlockData()->IsInt32TypeSpecialized(sym) || ( src1Val && ( DoAggressiveIntTypeSpec() ? src1Val->GetValueInfo()->IsLikelyInt() : src1Val->GetValueInfo()->IsInt() ) ) )) { if (!( CurrentBlockData()->IsFloat64TypeSpecialized(sym) || (src1Val && src1Val->GetValueInfo()->IsLikelyNumber()) ) || baseValueType.HasIntElements()) { return false; } } } else if (!CurrentBlockData()->IsInt32TypeSpecialized(sym) && !CurrentBlockData()->IsFloat64TypeSpecialized(sym)) { GOPT_TRACE_INSTR(instr, _u("Didn't specialize array access, because src is not type specialized.\n")); if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; baseValueType.ToString(baseValueTypeStr); Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, did not specialize because src is not specialized.\n"), this->func->GetJITFunctionBody()->GetDisplayName(), this->func->GetDebugNumberSet(debugStringBuffer), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode), baseValueTypeStr); Output::Flush(); } return false; } } int32 src1IntConstantValue; if(baseValueType.IsLikelyNativeIntArray() && src1Val && src1Val->GetValueInfo()->TryGetIntConstantValue(&src1IntConstantValue)) { if(Js::SparseArraySegment<int32>::IsMissingItem(&src1IntConstantValue)) { return false; } } // Note: doing ToVarUses to make sure we do get the int32 version of the index before trying to access its value in // ShouldExpectConventionalArrayIndexValue. Not sure why that never gave us a problem before. Assert(instr->GetDst()->IsIndirOpnd()); IR::IndirOpnd *dst = instr->GetDst()->AsIndirOpnd(); // Make sure we use the int32 version of the index operand symbol, if available. Otherwise, ensure the var symbol is live (by // potentially inserting a ToVar). this->ToVarUses(instr, dst, /* isDst = */ true, nullptr); if (!ShouldExpectConventionalArrayIndexValue(dst)) { GOPT_TRACE_INSTR(instr, _u("Didn't specialize array access, because index is negative or likely not int.\n")); if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; baseValueType.ToString(baseValueTypeStr); Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, did not specialize because index is negative or likely not int.\n"), this->func->GetJITFunctionBody()->GetDisplayName(), this->func->GetDebugNumberSet(debugStringBuffer), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode), baseValueTypeStr); Output::Flush(); } return false; } IRType toType = TyVar; bool isLossyAllowed = true; IR::BailOutKind arrayBailOutKind = IR::BailOutConventionalTypedArrayAccessOnly; switch(baseValueType.GetObjectType()) { case ObjectType::Int8Array: case ObjectType::Uint8Array: case ObjectType::Int16Array: case ObjectType::Uint16Array: case ObjectType::Int32Array: case ObjectType::Int8VirtualArray: case ObjectType::Uint8VirtualArray: case ObjectType::Int16VirtualArray: case ObjectType::Uint16VirtualArray: case ObjectType::Int32VirtualArray: case ObjectType::Int8MixedArray: case ObjectType::Uint8MixedArray: case ObjectType::Int16MixedArray: case ObjectType::Uint16MixedArray: case ObjectType::Int32MixedArray: Int32Array: if (this->DoAggressiveIntTypeSpec() || this->DoFloatTypeSpec()) { toType = TyInt32; } break; case ObjectType::Uint32Array: case ObjectType::Uint32VirtualArray: case ObjectType::Uint32MixedArray: // Uint32Arrays may store values that overflow int32. If the value being stored comes from a symbol that's // already losslessly type specialized to int32, we'll use it. Otherwise, if we only have a float64 specialized // value, we don't want to force bailout if it doesn't fit in int32. Instead, we'll emit conversion in the // lowerer, and handle overflow, if necessary. if (!sym || CurrentBlockData()->IsInt32TypeSpecialized(sym)) { toType = TyInt32; } else if (CurrentBlockData()->IsFloat64TypeSpecialized(sym)) { toType = TyFloat64; } break; case ObjectType::Float32Array: case ObjectType::Float64Array: case ObjectType::Float32VirtualArray: case ObjectType::Float32MixedArray: case ObjectType::Float64VirtualArray: case ObjectType::Float64MixedArray: Float64Array: if (this->DoFloatTypeSpec()) { toType = TyFloat64; } break; case ObjectType::Uint8ClampedArray: case ObjectType::Uint8ClampedVirtualArray: case ObjectType::Uint8ClampedMixedArray: // Uint8ClampedArray requires rounding (as opposed to truncation) of floating point values. If source symbol is // float type specialized, type specialize this instruction to float as well, and handle rounding in the // lowerer. if (!sym || CurrentBlockData()->IsInt32TypeSpecialized(sym)) { toType = TyInt32; isLossyAllowed = false; } else if (CurrentBlockData()->IsFloat64TypeSpecialized(sym)) { toType = TyFloat64; } break; default: Assert(baseValueType.IsLikelyNativeArray()); isLossyAllowed = false; arrayBailOutKind = IR::BailOutConventionalNativeArrayAccessOnly; if(baseValueType.HasIntElements()) { goto Int32Array; } Assert(baseValueType.HasFloatElements()); goto Float64Array; } if (toType != TyVar) { GOPT_TRACE_INSTR(instr, _u("Type specialized array access.\n")); if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; baseValueType.ToString(baseValueTypeStr); Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, type specialized to %s.\n"), this->func->GetJITFunctionBody()->GetDisplayName(), this->func->GetDebugNumberSet(debugStringBuffer), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode), baseValueTypeStr, toType == TyInt32 ? _u("int32") : _u("float64")); Output::Flush(); } IR::BailOutKind bailOutKind = ((toType == TyInt32) ? IR::BailOutIntOnly : IR::BailOutNumberOnly); this->ToTypeSpecUse(instr, instr->GetSrc1(), this->currentBlock, src1Val, nullptr, toType, bailOutKind, /* lossy = */ isLossyAllowed); if (!this->IsLoopPrePass()) { bool bConvertToBailoutInstr = true; // Definite StElemC doesn't need bailout, because it can't fail or cause conversion. if (instr->m_opcode == Js::OpCode::StElemC && baseValueType.IsObject()) { if (baseValueType.HasIntElements()) { //Native int array requires a missing element check & bailout int32 min = INT32_MIN; int32 max = INT32_MAX; if (src1Val->GetValueInfo()->GetIntValMinMax(&min, &max, false)) { bConvertToBailoutInstr = ((min <= Js::JavascriptNativeIntArray::MissingItem) && (max >= Js::JavascriptNativeIntArray::MissingItem)); } } else { bConvertToBailoutInstr = false; } } if (bConvertToBailoutInstr) { if(instr->HasBailOutInfo()) { const IR::BailOutKind oldBailOutKind = instr->GetBailOutKind(); Assert( ( !(oldBailOutKind & ~IR::BailOutKindBits) || (oldBailOutKind & ~IR::BailOutKindBits) == IR::BailOutOnImplicitCallsPreOp ) && !(oldBailOutKind & IR::BailOutKindBits & ~(IR::BailOutOnArrayAccessHelperCall | IR::BailOutMarkTempObject))); if(arrayBailOutKind == IR::BailOutConventionalTypedArrayAccessOnly) { // BailOutConventionalTypedArrayAccessOnly also bails out if the array access is outside the head // segment bounds, and guarantees no implicit calls. Override the bailout kind so that the instruction // bails out for the right reason. instr->SetBailOutKind( arrayBailOutKind | (oldBailOutKind & (IR::BailOutKindBits - IR::BailOutOnArrayAccessHelperCall))); } else { // BailOutConventionalNativeArrayAccessOnly by itself may generate a helper call, and may cause implicit // calls to occur, so it must be merged in to eliminate generating the helper call. Assert(arrayBailOutKind == IR::BailOutConventionalNativeArrayAccessOnly); instr->SetBailOutKind(oldBailOutKind | arrayBailOutKind); } } else { GenerateBailAtOperation(&instr, arrayBailOutKind); } } } } else { GOPT_TRACE_INSTR(instr, _u("Didn't specialize array access, because the source was not already specialized.\n")); if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; baseValueType.ToString(baseValueTypeStr); Output::Print(_u("Typed Array Optimization: function: %s (%s): instr: %s, base value type: %S, did not type specialize, because of array type.\n"), this->func->GetJITFunctionBody()->GetDisplayName(), this->func->GetDebugNumberSet(debugStringBuffer), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode), baseValueTypeStr); Output::Flush(); } } return toType != TyVar; } IR::Instr * GlobOpt::ToVarUses(IR::Instr *instr, IR::Opnd *opnd, bool isDst, Value *val) { Sym *sym; switch (opnd->GetKind()) { case IR::OpndKindReg: if (!isDst && !CurrentBlockData()->liveVarSyms->Test(opnd->AsRegOpnd()->m_sym->m_id)) { instr = this->ToVar(instr, opnd->AsRegOpnd(), this->currentBlock, val, true); } break; case IR::OpndKindSym: sym = opnd->AsSymOpnd()->m_sym; if (sym->IsPropertySym() && !CurrentBlockData()->liveVarSyms->Test(sym->AsPropertySym()->m_stackSym->m_id) && sym->AsPropertySym()->m_stackSym->IsVar()) { StackSym *propertyBase = sym->AsPropertySym()->m_stackSym; IR::RegOpnd *newOpnd = IR::RegOpnd::New(propertyBase, TyVar, instr->m_func); instr = this->ToVar(instr, newOpnd, this->currentBlock, CurrentBlockData()->FindValue(propertyBase), true); } break; case IR::OpndKindIndir: IR::RegOpnd *baseOpnd = opnd->AsIndirOpnd()->GetBaseOpnd(); if (!CurrentBlockData()->liveVarSyms->Test(baseOpnd->m_sym->m_id)) { instr = this->ToVar(instr, baseOpnd, this->currentBlock, CurrentBlockData()->FindValue(baseOpnd->m_sym), true); } IR::RegOpnd *indexOpnd = opnd->AsIndirOpnd()->GetIndexOpnd(); if (indexOpnd && !indexOpnd->m_sym->IsTypeSpec()) { if((indexOpnd->GetValueType().IsInt() ? !IsTypeSpecPhaseOff(func) : indexOpnd->GetValueType().IsLikelyInt() && DoAggressiveIntTypeSpec()) && !GetIsAsmJSFunc()) // typespec is disabled for asmjs { StackSym *const indexVarSym = indexOpnd->m_sym; Value *const indexValue = CurrentBlockData()->FindValue(indexVarSym); Assert(indexValue); Assert(indexValue->GetValueInfo()->IsLikelyInt()); ToInt32(instr, indexOpnd, currentBlock, indexValue, opnd->AsIndirOpnd(), false); Assert(indexValue->GetValueInfo()->IsInt()); if(!IsLoopPrePass()) { indexOpnd = opnd->AsIndirOpnd()->GetIndexOpnd(); if(indexOpnd) { Assert(indexOpnd->m_sym->IsTypeSpec()); IntConstantBounds indexConstantBounds; AssertVerify(indexValue->GetValueInfo()->TryGetIntConstantBounds(&indexConstantBounds)); if(ValueInfo::IsGreaterThanOrEqualTo( indexValue, indexConstantBounds.LowerBound(), indexConstantBounds.UpperBound(), nullptr, 0, 0)) { indexOpnd->SetType(TyUint32); } } } } else if (!CurrentBlockData()->liveVarSyms->Test(indexOpnd->m_sym->m_id)) { instr = this->ToVar(instr, indexOpnd, this->currentBlock, CurrentBlockData()->FindValue(indexOpnd->m_sym), true); } } break; } return instr; } IR::Instr * GlobOpt::ToVar(IR::Instr *instr, IR::RegOpnd *regOpnd, BasicBlock *block, Value *value, bool needsUpdate) { IR::Instr *newInstr; StackSym *varSym = regOpnd->m_sym; if (IsTypeSpecPhaseOff(this->func)) { return instr; } if (this->IsLoopPrePass()) { block->globOptData.liveVarSyms->Set(varSym->m_id); return instr; } if (block->globOptData.liveVarSyms->Test(varSym->m_id)) { // Already live, nothing to do return instr; } if (!varSym->IsVar()) { Assert(!varSym->IsTypeSpec()); // Leave non-vars alone. return instr; } Assert(block->globOptData.IsTypeSpecialized(varSym)); if (!value) { value = block->globOptData.FindValue(varSym); } ValueInfo *valueInfo = value ? value->GetValueInfo() : nullptr; if(valueInfo && valueInfo->IsInt()) { // If two syms have the same value, one is lossy-int-specialized, and then the other is int-specialized, the value // would have been updated to definitely int. Upon using the lossy-int-specialized sym later, it would be flagged as // lossy while the value is definitely int. Since the bit-vectors are based on the sym and not the value, update the // lossy state. block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); } IRType fromType = TyIllegal; StackSym *typeSpecSym = nullptr; if (block->globOptData.liveInt32Syms->Test(varSym->m_id) && !block->globOptData.liveLossyInt32Syms->Test(varSym->m_id)) { fromType = TyInt32; typeSpecSym = varSym->GetInt32EquivSym(this->func); Assert(valueInfo); Assert(valueInfo->IsInt()); } else if (block->globOptData.liveFloat64Syms->Test(varSym->m_id)) { fromType = TyFloat64; typeSpecSym = varSym->GetFloat64EquivSym(this->func); // Ensure that all bailout FromVars that generate a value for this type-specialized sym will bail out on any non-number // value, even ones that have already been generated before. Float-specialized non-number values cannot be converted // back to Var since they will not go back to the original non-number value. The dead-store pass will update the bailout // kind on already-generated FromVars based on this bit. typeSpecSym->m_requiresBailOnNotNumber = true; // A previous float conversion may have used BailOutPrimitiveButString, which does not change the value type to say // definitely float, since it can also be a non-string primitive. The convert back to Var though, will cause that // bailout kind to be changed to BailOutNumberOnly in the dead-store phase, so from the point of the initial conversion // to float, that the value is definitely number. Since we don't know where the FromVar is, change the value type here. if(valueInfo) { if(!valueInfo->IsNumber()) { valueInfo = valueInfo->SpecializeToFloat64(alloc); ChangeValueInfo(block, value, valueInfo); regOpnd->SetValueType(valueInfo->Type()); } } else { value = NewGenericValue(ValueType::Float); valueInfo = value->GetValueInfo(); block->globOptData.SetValue(value, varSym); regOpnd->SetValueType(valueInfo->Type()); } } else { #ifdef ENABLE_SIMDJS // SIMD_JS Assert(block->globOptData.IsLiveAsSimd128(varSym)); if (block->globOptData.IsLiveAsSimd128F4(varSym)) { fromType = TySimd128F4; } else { Assert(block->globOptData.IsLiveAsSimd128I4(varSym)); fromType = TySimd128I4; } if (valueInfo) { if (fromType == TySimd128F4 && !valueInfo->Type().IsSimd128Float32x4()) { valueInfo = valueInfo->SpecializeToSimd128F4(alloc); ChangeValueInfo(block, value, valueInfo); regOpnd->SetValueType(valueInfo->Type()); } else if (fromType == TySimd128I4 && !valueInfo->Type().IsSimd128Int32x4()) { if (!valueInfo->Type().IsSimd128Int32x4()) { valueInfo = valueInfo->SpecializeToSimd128I4(alloc); ChangeValueInfo(block, value, valueInfo); regOpnd->SetValueType(valueInfo->Type()); } } } else { ValueType valueType = fromType == TySimd128F4 ? ValueType::GetSimd128(ObjectType::Simd128Float32x4) : ValueType::GetSimd128(ObjectType::Simd128Int32x4); value = NewGenericValue(valueType); valueInfo = value->GetValueInfo(); block->globOptData.SetValue(value, varSym); regOpnd->SetValueType(valueInfo->Type()); } ValueType valueType = valueInfo->Type(); // Should be definite if type-specialized Assert(valueType.IsSimd128()); typeSpecSym = varSym->GetSimd128EquivSym(fromType, this->func); #else Assert(UNREACHED); #endif } AssertOrFailFast(valueInfo); int32 intConstantValue; if (valueInfo->TryGetIntConstantValue(&intConstantValue)) { // Lower will tag or create a number directly newInstr = IR::Instr::New(Js::OpCode::LdC_A_I4, regOpnd, IR::IntConstOpnd::New(intConstantValue, TyInt32, instr->m_func), instr->m_func); } else { IR::RegOpnd * regNew = IR::RegOpnd::New(typeSpecSym, fromType, instr->m_func); Js::OpCode opcode = Js::OpCode::ToVar; regNew->SetIsJITOptimizedReg(true); newInstr = IR::Instr::New(opcode, regOpnd, regNew, instr->m_func); } newInstr->SetByteCodeOffset(instr); newInstr->GetDst()->AsRegOpnd()->SetIsJITOptimizedReg(true); ValueType valueType = valueInfo->Type(); if(fromType == TyInt32) { #if !INT32VAR // All 32-bit ints are taggable on 64-bit architectures IntConstantBounds constantBounds; AssertVerify(valueInfo->TryGetIntConstantBounds(&constantBounds)); if(constantBounds.IsTaggable()) #endif { // The value is within the taggable range, so set the opnd value types to TaggedInt to avoid the overflow check valueType = ValueType::GetTaggedInt(); } } newInstr->GetDst()->SetValueType(valueType); newInstr->GetSrc1()->SetValueType(valueType); IR::Instr *insertAfterInstr = instr->m_prev; if (instr == block->GetLastInstr() && (instr->IsBranchInstr() || instr->m_opcode == Js::OpCode::BailTarget)) { // Don't insert code between the branch and the preceding ByteCodeUses instrs... while(insertAfterInstr->m_opcode == Js::OpCode::ByteCodeUses) { insertAfterInstr = insertAfterInstr->m_prev; } } block->InsertInstrAfter(newInstr, insertAfterInstr); block->globOptData.liveVarSyms->Set(varSym->m_id); GOPT_TRACE_OPND(regOpnd, _u("Converting to var\n")); if (block->loop) { Assert(!this->IsLoopPrePass()); this->TryHoistInvariant(newInstr, block, value, value, nullptr, false); } if (needsUpdate) { // Make sure that the kill effect of the ToVar instruction is tracked and that the kill of a property // type is reflected in the current instruction. this->ProcessKills(newInstr); this->ValueNumberObjectType(newInstr->GetDst(), newInstr); if (instr->GetSrc1() && instr->GetSrc1()->IsSymOpnd() && instr->GetSrc1()->AsSymOpnd()->IsPropertySymOpnd()) { // Reprocess the load source. We need to reset the PropertySymOpnd fields first. IR::PropertySymOpnd *propertySymOpnd = instr->GetSrc1()->AsPropertySymOpnd(); if (propertySymOpnd->IsTypeCheckSeqCandidate()) { propertySymOpnd->SetTypeChecked(false); propertySymOpnd->SetTypeAvailable(false); propertySymOpnd->SetWriteGuardChecked(false); } this->FinishOptPropOp(instr, propertySymOpnd); instr = this->SetTypeCheckBailOut(instr->GetSrc1(), instr, nullptr); } } return instr; } IR::Instr * GlobOpt::ToInt32(IR::Instr *instr, IR::Opnd *opnd, BasicBlock *block, Value *val, IR::IndirOpnd *indir, bool lossy) { return this->ToTypeSpecUse(instr, opnd, block, val, indir, TyInt32, IR::BailOutIntOnly, lossy); } IR::Instr * GlobOpt::ToFloat64(IR::Instr *instr, IR::Opnd *opnd, BasicBlock *block, Value *val, IR::IndirOpnd *indir, IR::BailOutKind bailOutKind) { return this->ToTypeSpecUse(instr, opnd, block, val, indir, TyFloat64, bailOutKind); } IR::Instr * GlobOpt::ToTypeSpecUse(IR::Instr *instr, IR::Opnd *opnd, BasicBlock *block, Value *val, IR::IndirOpnd *indir, IRType toType, IR::BailOutKind bailOutKind, bool lossy, IR::Instr *insertBeforeInstr) { Assert(bailOutKind != IR::BailOutInvalid); IR::Instr *newInstr; if (!val && opnd->IsRegOpnd()) { val = block->globOptData.FindValue(opnd->AsRegOpnd()->m_sym); } ValueInfo *valueInfo = val ? val->GetValueInfo() : nullptr; bool needReplaceSrc = false; bool updateBlockLastInstr = false; if (instr) { needReplaceSrc = true; if (!insertBeforeInstr) { insertBeforeInstr = instr; } } else if (!insertBeforeInstr) { // Insert it at the end of the block insertBeforeInstr = block->GetLastInstr(); if (insertBeforeInstr->IsBranchInstr() || insertBeforeInstr->m_opcode == Js::OpCode::BailTarget) { // Don't insert code between the branch and the preceding ByteCodeUses instrs... while(insertBeforeInstr->m_prev->m_opcode == Js::OpCode::ByteCodeUses) { insertBeforeInstr = insertBeforeInstr->m_prev; } } else { insertBeforeInstr = insertBeforeInstr->m_next; updateBlockLastInstr = true; } } // Int constant values will be propagated into the instruction. For ArgOut_A_InlineBuiltIn, there's no benefit from // const-propping, so those are excluded. if (opnd->IsRegOpnd() && !( valueInfo && (valueInfo->HasIntConstantValue() || valueInfo->IsFloatConstant()) && (!instr || instr->m_opcode != Js::OpCode::ArgOut_A_InlineBuiltIn) )) { IR::RegOpnd *regSrc = opnd->AsRegOpnd(); StackSym *varSym = regSrc->m_sym; Js::OpCode opcode = Js::OpCode::FromVar; if (varSym->IsTypeSpec() || !block->globOptData.liveVarSyms->Test(varSym->m_id)) { // Conversion between int32 and float64 if (varSym->IsTypeSpec()) { varSym = varSym->GetVarEquivSym(this->func); } opcode = Js::OpCode::Conv_Prim; } Assert(block->globOptData.liveVarSyms->Test(varSym->m_id) || block->globOptData.IsTypeSpecialized(varSym)); StackSym *typeSpecSym = nullptr; BOOL isLive = FALSE; BVSparse<JitArenaAllocator> *livenessBv = nullptr; if(valueInfo && valueInfo->IsInt()) { // If two syms have the same value, one is lossy-int-specialized, and then the other is int-specialized, the value // would have been updated to definitely int. Upon using the lossy-int-specialized sym later, it would be flagged as // lossy while the value is definitely int. Since the bit-vectors are based on the sym and not the value, update the // lossy state. block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); } if (toType == TyInt32) { // Need to determine whether the conversion is actually lossy or lossless. If the value is an int, then it's a // lossless conversion despite the type of conversion requested. The liveness of the converted int32 sym needs to be // set to reflect the actual type of conversion done. Also, a lossless conversion needs the value to determine // whether the conversion may need to bail out. Assert(valueInfo); if(valueInfo->IsInt()) { lossy = false; } else { Assert(IsLoopPrePass() || !block->globOptData.IsInt32TypeSpecialized(varSym)); } livenessBv = block->globOptData.liveInt32Syms; isLive = livenessBv->Test(varSym->m_id) && (lossy || !block->globOptData.liveLossyInt32Syms->Test(varSym->m_id)); if (this->IsLoopPrePass()) { if(!isLive) { livenessBv->Set(varSym->m_id); if(lossy) { block->globOptData.liveLossyInt32Syms->Set(varSym->m_id); } else { block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); } } if(!lossy) { Assert(bailOutKind == IR::BailOutIntOnly || bailOutKind == IR::BailOutExpectingInteger); valueInfo = valueInfo->SpecializeToInt32(alloc); ChangeValueInfo(nullptr, val, valueInfo); if(needReplaceSrc) { opnd->SetValueType(valueInfo->Type()); } } return instr; } typeSpecSym = varSym->GetInt32EquivSym(this->func); if (!isLive) { if (!opnd->IsVar() || !block->globOptData.liveVarSyms->Test(varSym->m_id) || (block->globOptData.liveFloat64Syms->Test(varSym->m_id) && valueInfo && valueInfo->IsLikelyFloat())) { Assert(block->globOptData.liveFloat64Syms->Test(varSym->m_id)); if(!lossy && !valueInfo->IsInt()) { // Shouldn't try to do a lossless conversion from float64 to int32 when the value is not known to be an // int. There are cases where we need more than two passes over loops to flush out all dependencies. // It's possible for the loop prepass to think that a sym s1 remains an int because it acquires the // value of another sym s2 that is an int in the prepass at that time. However, s2 can become a float // later in the loop body, in which case s1 would become a float on the second iteration of the loop. By // that time, we would have already committed to having s1 live as a lossless int on entry into the // loop, and we end up having to compensate by doing a lossless conversion from float to int, which will // need a bailout and will most likely bail out. // // If s2 becomes a var instead of a float, then the compensation is legal although not ideal. After // enough bailouts, rejit would be triggered with aggressive int type spec turned off. For the // float-to-int conversion though, there's no point in emitting a bailout because we already know that // the value is a float and has high probability of bailing out (whereas a var has a chance to be a // tagged int), and so currently lossless conversion from float to int with bailout is not supported. // // So, treating this case as a compile-time bailout. The exception will trigger the jit work item to be // restarted with aggressive int type specialization disabled. if(bailOutKind == IR::BailOutExpectingInteger) { Assert(IsSwitchOptEnabledForIntTypeSpec()); throw Js::RejitException(RejitReason::DisableSwitchOptExpectingInteger); } else { Assert(DoAggressiveIntTypeSpec()); if(PHASE_TRACE(Js::BailOutPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; Output::Print( _u("BailOut (compile-time): function: %s (%s) varSym: "), this->func->GetJITFunctionBody()->GetDisplayName(), this->func->GetDebugNumberSet(debugStringBuffer), varSym->m_id); #if DBG_DUMP varSym->Dump(); #else Output::Print(_u("s%u"), varSym->m_id); #endif if(varSym->HasByteCodeRegSlot()) { Output::Print(_u(" byteCodeReg: R%u"), varSym->GetByteCodeRegSlot()); } Output::Print(_u(" (lossless conversion from float64 to int32)\n")); Output::Flush(); } if(!DoAggressiveIntTypeSpec()) { // Aggressive int type specialization is already off for some reason. Prevent trying to rejit again // because it won't help and the same thing will happen again. Just abort jitting this function. if(PHASE_TRACE(Js::BailOutPhase, this->func)) { Output::Print(_u(" Aborting JIT because AggressiveIntTypeSpec is already off\n")); Output::Flush(); } throw Js::OperationAbortedException(); } throw Js::RejitException(RejitReason::AggressiveIntTypeSpecDisabled); } } if(opnd->IsVar()) { regSrc->SetType(TyFloat64); regSrc->m_sym = varSym->GetFloat64EquivSym(this->func); opcode = Js::OpCode::Conv_Prim; } else { Assert(regSrc->IsFloat64()); Assert(regSrc->m_sym->IsFloat64()); Assert(opcode == Js::OpCode::Conv_Prim); } } } GOPT_TRACE_OPND(regSrc, _u("Converting to int32\n")); } else if (toType == TyFloat64) { // float64 typeSpecSym = varSym->GetFloat64EquivSym(this->func); if(!IsLoopPrePass() && typeSpecSym->m_requiresBailOnNotNumber && block->globOptData.IsFloat64TypeSpecialized(varSym)) { // This conversion is already protected by a BailOutNumberOnly bailout (or at least it will be after the // dead-store phase). Since 'requiresBailOnNotNumber' is not flow-based, change the value to definitely float. if(valueInfo) { if(!valueInfo->IsNumber()) { valueInfo = valueInfo->SpecializeToFloat64(alloc); ChangeValueInfo(block, val, valueInfo); opnd->SetValueType(valueInfo->Type()); } } else { val = NewGenericValue(ValueType::Float); valueInfo = val->GetValueInfo(); block->globOptData.SetValue(val, varSym); opnd->SetValueType(valueInfo->Type()); } } if(bailOutKind == IR::BailOutNumberOnly) { if(!IsLoopPrePass()) { // Ensure that all bailout FromVars that generate a value for this type-specialized sym will bail out on any // non-number value, even ones that have already been generated before. The dead-store pass will update the // bailout kind on already-generated FromVars based on this bit. typeSpecSym->m_requiresBailOnNotNumber = true; } } else if(typeSpecSym->m_requiresBailOnNotNumber) { Assert(bailOutKind == IR::BailOutPrimitiveButString); bailOutKind = IR::BailOutNumberOnly; } livenessBv = block->globOptData.liveFloat64Syms; isLive = livenessBv->Test(varSym->m_id); if (this->IsLoopPrePass()) { if(!isLive) { livenessBv->Set(varSym->m_id); } if (this->OptIsInvariant(opnd, block, this->prePassLoop, val, false, true)) { this->prePassLoop->forceFloat64SymsOnEntry->Set(varSym->m_id); } else { Sym *symStore = (valueInfo ? valueInfo->GetSymStore() : NULL); if (symStore && symStore != varSym && this->OptIsInvariant(symStore, block, this->prePassLoop, block->globOptData.FindValue(symStore), false, true)) { // If symStore is assigned to sym and we want sym to be type-specialized, for symStore to be specialized // outside the loop. this->prePassLoop->forceFloat64SymsOnEntry->Set(symStore->m_id); } } if(bailOutKind == IR::BailOutNumberOnly) { if(valueInfo) { valueInfo = valueInfo->SpecializeToFloat64(alloc); ChangeValueInfo(block, val, valueInfo); } else { val = NewGenericValue(ValueType::Float); valueInfo = val->GetValueInfo(); block->globOptData.SetValue(val, varSym); } if(needReplaceSrc) { opnd->SetValueType(valueInfo->Type()); } } return instr; } if (!isLive && regSrc->IsVar()) { if (!block->globOptData.liveVarSyms->Test(varSym->m_id) || ( block->globOptData.liveInt32Syms->Test(varSym->m_id) && !block->globOptData.liveLossyInt32Syms->Test(varSym->m_id) && valueInfo && valueInfo->IsLikelyInt() )) { Assert(block->globOptData.liveInt32Syms->Test(varSym->m_id)); Assert(!block->globOptData.liveLossyInt32Syms->Test(varSym->m_id)); // Shouldn't try to convert a lossy int32 to anything regSrc->SetType(TyInt32); regSrc->m_sym = varSym->GetInt32EquivSym(this->func); opcode = Js::OpCode::Conv_Prim; } } GOPT_TRACE_OPND(regSrc, _u("Converting to float64\n")); } #ifdef ENABLE_SIMDJS else { // SIMD_JS Assert(IRType_IsSimd128(toType)); // Get or create type-spec sym typeSpecSym = varSym->GetSimd128EquivSym(toType, this->func); if (!IsLoopPrePass() && block->globOptData.IsSimd128TypeSpecialized(toType, varSym)) { // Consider: Is this needed ? Shouldn't this have been done at previous FromVar since the simd128 sym is alive ? if (valueInfo) { if (!valueInfo->IsSimd128(toType)) { valueInfo = valueInfo->SpecializeToSimd128(toType, alloc); ChangeValueInfo(block, val, valueInfo); opnd->SetValueType(valueInfo->Type()); } } else { val = NewGenericValue(GetValueTypeFromIRType(toType)); valueInfo = val->GetValueInfo(); block->globOptData.SetValue(val, varSym); opnd->SetValueType(valueInfo->Type()); } } livenessBv = block->globOptData.GetSimd128LivenessBV(toType); isLive = livenessBv->Test(varSym->m_id); if (this->IsLoopPrePass()) { // FromVar Hoisting BVSparse<Memory::JitArenaAllocator> * forceSimd128SymsOnEntry; forceSimd128SymsOnEntry = \ toType == TySimd128F4 ? this->prePassLoop->forceSimd128F4SymsOnEntry : this->prePassLoop->forceSimd128I4SymsOnEntry; if (!isLive) { livenessBv->Set(varSym->m_id); } // Be aggressive with hoisting only if value is always initialized to SIMD type before entering loop. // This reduces the chance that the FromVar gets executed while the specialized instruction in the loop is not. Leading to unnecessary excessive bailouts. if (val && !val->GetValueInfo()->HasBeenUndefined() && !val->GetValueInfo()->HasBeenNull() && this->OptIsInvariant(opnd, block, this->prePassLoop, val, false, true)) { forceSimd128SymsOnEntry->Set(varSym->m_id); } else { Sym *symStore = (valueInfo ? valueInfo->GetSymStore() : NULL); Value * value = symStore ? block->globOptData.FindValue(symStore) : nullptr; if (symStore && symStore != varSym && value && !value->GetValueInfo()->HasBeenUndefined() && !value->GetValueInfo()->HasBeenNull() && this->OptIsInvariant(symStore, block, this->prePassLoop, value, true, true)) { // If symStore is assigned to sym and we want sym to be type-specialized, for symStore to be specialized // outside the loop. forceSimd128SymsOnEntry->Set(symStore->m_id); } } Assert(bailOutKind == IR::BailOutSimd128F4Only || bailOutKind == IR::BailOutSimd128I4Only); // We are in loop prepass, we haven't propagated the value info to the src. Do it now. if (valueInfo) { valueInfo = valueInfo->SpecializeToSimd128(toType, alloc); ChangeValueInfo(block, val, valueInfo); } else { val = NewGenericValue(GetValueTypeFromIRType(toType)); valueInfo = val->GetValueInfo(); block->globOptData.SetValue(val, varSym); } if (needReplaceSrc) { opnd->SetValueType(valueInfo->Type()); } return instr; } GOPT_TRACE_OPND(regSrc, _u("Converting to Simd128\n")); } #endif bool needLoad = false; if (needReplaceSrc) { bool wasDead = regSrc->GetIsDead(); // needReplaceSrc means we are type specializing a use, and need to replace the src on the instr if (!isLive) { needLoad = true; // ReplaceSrc will delete it. regSrc = regSrc->Copy(instr->m_func)->AsRegOpnd(); } IR::RegOpnd * regNew = IR::RegOpnd::New(typeSpecSym, toType, instr->m_func); if(valueInfo) { regNew->SetValueType(valueInfo->Type()); regNew->m_wasNegativeZeroPreventedByBailout = valueInfo->WasNegativeZeroPreventedByBailout(); } regNew->SetIsDead(wasDead); regNew->SetIsJITOptimizedReg(true); this->CaptureByteCodeSymUses(instr); if (indir == nullptr) { instr->ReplaceSrc(opnd, regNew); } else { indir->ReplaceIndexOpnd(regNew); } opnd = regNew; if (!needLoad) { Assert(isLive); return instr; } } else { // We just need to insert a load of a type spec sym if(isLive) { return instr; } // Insert it before the specified instruction instr = insertBeforeInstr; } IR::RegOpnd *regDst = IR::RegOpnd::New(typeSpecSym, toType, instr->m_func); bool isBailout = false; bool isHoisted = false; bool isInLandingPad = (block->next && !block->next->isDeleted && block->next->isLoopHeader); if (isInLandingPad) { Loop *loop = block->next->loop; Assert(loop && loop->landingPad == block); Assert(loop->bailOutInfo); } if (opcode == Js::OpCode::FromVar) { if (toType == TyInt32) { Assert(valueInfo); if (lossy) { if (!valueInfo->IsPrimitive() && !block->globOptData.IsTypeSpecialized(varSym)) { // Lossy conversions to int32 on non-primitive values may have implicit calls to toString or valueOf, which // may be overridden to have a side effect. The side effect needs to happen every time the conversion is // supposed to happen, so the resulting lossy int32 value cannot be reused. Bail out on implicit calls. Assert(DoLossyIntTypeSpec()); bailOutKind = IR::BailOutOnNotPrimitive; isBailout = true; } } else if (!valueInfo->IsInt()) { // The operand is likely an int (hence the request to convert to int), so bail out if it's not an int. Only // bail out if a lossless conversion to int is requested. Lossy conversions to int such as in (a | 0) don't // need to bail out. if (bailOutKind == IR::BailOutExpectingInteger) { Assert(IsSwitchOptEnabledForIntTypeSpec()); } else { Assert(DoAggressiveIntTypeSpec()); } isBailout = true; } } else if (toType == TyFloat64 && (!valueInfo || !valueInfo->IsNumber())) { // Bailout if converting vars to float if we can't prove they are floats: // x = str + float; -> need to bailout if str is a string // // x = obj * 0.1; // y = obj * 0.2; -> if obj has valueof, we'll only call valueof once on the FromVar conversion... Assert(bailOutKind != IR::BailOutInvalid); isBailout = true; } #ifdef ENABLE_SIMDJS else if (IRType_IsSimd128(toType) && (!valueInfo || !valueInfo->IsSimd128(toType))) { Assert(toType == TySimd128F4 && bailOutKind == IR::BailOutSimd128F4Only || toType == TySimd128I4 && bailOutKind == IR::BailOutSimd128I4Only); isBailout = true; } #endif } if (isBailout) { if (isInLandingPad) { Loop *loop = block->next->loop; this->EnsureBailTarget(loop); instr = loop->bailOutInfo->bailOutInstr; updateBlockLastInstr = false; newInstr = IR::BailOutInstr::New(opcode, bailOutKind, loop->bailOutInfo, instr->m_func); newInstr->SetDst(regDst); newInstr->SetSrc1(regSrc); } else { newInstr = IR::BailOutInstr::New(opcode, regDst, regSrc, bailOutKind, instr, instr->m_func); } } else { newInstr = IR::Instr::New(opcode, regDst, regSrc, instr->m_func); } newInstr->SetByteCodeOffset(instr); instr->InsertBefore(newInstr); if (updateBlockLastInstr) { block->SetLastInstr(newInstr); } regDst->SetIsJITOptimizedReg(true); newInstr->GetSrc1()->AsRegOpnd()->SetIsJITOptimizedReg(true); ValueInfo *const oldValueInfo = valueInfo; if(valueInfo) { newInstr->GetSrc1()->SetValueType(valueInfo->Type()); } if(isBailout) { Assert(opcode == Js::OpCode::FromVar); if(toType == TyInt32) { Assert(valueInfo); if(!lossy) { Assert(bailOutKind == IR::BailOutIntOnly || bailOutKind == IR::BailOutExpectingInteger); valueInfo = valueInfo->SpecializeToInt32(alloc, isPerformingLoopBackEdgeCompensation); ChangeValueInfo(nullptr, val, valueInfo); int32 intConstantValue; if(indir && needReplaceSrc && valueInfo->TryGetIntConstantValue(&intConstantValue)) { // A likely-int value can have constant bounds due to conditional branches narrowing its range. Now that // the sym has been proven to be an int, the likely-int value, after specialization, will be constant. // Replace the index opnd in the indir with an offset. Assert(opnd == indir->GetIndexOpnd()); Assert(indir->GetScale() == 0); indir->UnlinkIndexOpnd()->Free(instr->m_func); opnd = nullptr; indir->SetOffset(intConstantValue); } } } else if (toType == TyFloat64) { if(bailOutKind == IR::BailOutNumberOnly) { if(valueInfo) { valueInfo = valueInfo->SpecializeToFloat64(alloc); ChangeValueInfo(block, val, valueInfo); } else { val = NewGenericValue(ValueType::Float); valueInfo = val->GetValueInfo(); block->globOptData.SetValue(val, varSym); } } } else { Assert(IRType_IsSimd128(toType)); if (valueInfo) { valueInfo = valueInfo->SpecializeToSimd128(toType, alloc); ChangeValueInfo(block, val, valueInfo); } else { val = NewGenericValue(GetValueTypeFromIRType(toType)); valueInfo = val->GetValueInfo(); block->globOptData.SetValue(val, varSym); } } } if(valueInfo) { newInstr->GetDst()->SetValueType(valueInfo->Type()); if(needReplaceSrc && opnd) { opnd->SetValueType(valueInfo->Type()); } } if (block->loop) { Assert(!this->IsLoopPrePass()); isHoisted = this->TryHoistInvariant(newInstr, block, val, val, nullptr, false, lossy, false, bailOutKind); } if (isBailout) { if (!isHoisted && !isInLandingPad) { if(valueInfo) { // Since this is a pre-op bailout, the old value info should be used for the purposes of bailout. For // instance, the value info could be LikelyInt but with a constant range. Once specialized to int, the value // info would be an int constant. However, the int constant is only guaranteed if the value is actually an // int, which this conversion is verifying, so bailout cannot assume the constant value. if(oldValueInfo) { val->SetValueInfo(oldValueInfo); } else { block->globOptData.ClearSymValue(varSym); } } // Fill in bail out info if the FromVar is a bailout instr, and it wasn't hoisted as invariant. // If it was hoisted, the invariant code will fill out the bailout info with the loop landing pad bailout info. this->FillBailOutInfo(block, newInstr->GetBailOutInfo()); if(valueInfo) { // Restore the new value info after filling the bailout info if(oldValueInfo) { val->SetValueInfo(valueInfo); } else { block->globOptData.SetValue(val, varSym); } } } } // Now that we've captured the liveness in the bailout info, we can mark this as live. // This type specialized sym isn't live if the FromVar bails out. livenessBv->Set(varSym->m_id); if(toType == TyInt32) { if(lossy) { block->globOptData.liveLossyInt32Syms->Set(varSym->m_id); } else { block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); } } } else { Assert(valueInfo); if(opnd->IsRegOpnd() && valueInfo->IsInt()) { // If two syms have the same value, one is lossy-int-specialized, and then the other is int-specialized, the value // would have been updated to definitely int. Upon using the lossy-int-specialized sym later, it would be flagged as // lossy while the value is definitely int. Since the bit-vectors are based on the sym and not the value, update the // lossy state. block->globOptData.liveLossyInt32Syms->Clear(opnd->AsRegOpnd()->m_sym->m_id); if(toType == TyInt32) { lossy = false; } } if (this->IsLoopPrePass()) { if(opnd->IsRegOpnd()) { StackSym *const sym = opnd->AsRegOpnd()->m_sym; if(toType == TyInt32) { Assert(!sym->IsTypeSpec()); block->globOptData.liveInt32Syms->Set(sym->m_id); if(lossy) { block->globOptData.liveLossyInt32Syms->Set(sym->m_id); } else { block->globOptData.liveLossyInt32Syms->Clear(sym->m_id); } } else { Assert(toType == TyFloat64); AnalysisAssert(instr); StackSym *const varSym = sym->IsTypeSpec() ? sym->GetVarEquivSym(instr->m_func) : sym; block->globOptData.liveFloat64Syms->Set(varSym->m_id); } } return instr; } if (!needReplaceSrc) { instr = insertBeforeInstr; } IR::Opnd *constOpnd; int32 intConstantValue; if(valueInfo->TryGetIntConstantValue(&intConstantValue)) { if(toType == TyInt32) { constOpnd = IR::IntConstOpnd::New(intConstantValue, TyInt32, instr->m_func); } else { Assert(toType == TyFloat64); constOpnd = IR::FloatConstOpnd::New(static_cast<FloatConstType>(intConstantValue), TyFloat64, instr->m_func); } } else if(valueInfo->IsFloatConstant()) { const FloatConstType floatValue = valueInfo->AsFloatConstant()->FloatValue(); if(toType == TyInt32) { Assert(lossy); constOpnd = IR::IntConstOpnd::New( Js::JavascriptMath::ToInt32(floatValue), TyInt32, instr->m_func); } else { Assert(toType == TyFloat64); constOpnd = IR::FloatConstOpnd::New(floatValue, TyFloat64, instr->m_func); } } else { Assert(opnd->IsVar()); Assert(opnd->IsAddrOpnd()); AssertMsg(opnd->AsAddrOpnd()->IsVar(), "We only expect to see addr that are var before lower."); // Don't need to capture uses, we are only replacing an addr opnd if(toType == TyInt32) { constOpnd = IR::IntConstOpnd::New(Js::TaggedInt::ToInt32(opnd->AsAddrOpnd()->m_address), TyInt32, instr->m_func); } else { Assert(toType == TyFloat64); constOpnd = IR::FloatConstOpnd::New(Js::TaggedInt::ToDouble(opnd->AsAddrOpnd()->m_address), TyFloat64, instr->m_func); } } if (toType == TyInt32) { if (needReplaceSrc) { CaptureByteCodeSymUses(instr); if(indir) { Assert(opnd == indir->GetIndexOpnd()); Assert(indir->GetScale() == 0); indir->UnlinkIndexOpnd()->Free(instr->m_func); indir->SetOffset(constOpnd->AsIntConstOpnd()->AsInt32()); } else { instr->ReplaceSrc(opnd, constOpnd); } } else { StackSym *varSym = opnd->AsRegOpnd()->m_sym; if(varSym->IsTypeSpec()) { varSym = varSym->GetVarEquivSym(nullptr); Assert(varSym); } if(block->globOptData.liveInt32Syms->TestAndSet(varSym->m_id)) { Assert(!!block->globOptData.liveLossyInt32Syms->Test(varSym->m_id) == lossy); } else { if(lossy) { block->globOptData.liveLossyInt32Syms->Set(varSym->m_id); } StackSym *int32Sym = varSym->GetInt32EquivSym(instr->m_func); IR::RegOpnd *int32Reg = IR::RegOpnd::New(int32Sym, TyInt32, instr->m_func); int32Reg->SetIsJITOptimizedReg(true); newInstr = IR::Instr::New(Js::OpCode::Ld_I4, int32Reg, constOpnd, instr->m_func); newInstr->SetByteCodeOffset(instr); instr->InsertBefore(newInstr); if (updateBlockLastInstr) { block->SetLastInstr(newInstr); } } } } else { StackSym *floatSym; bool newFloatSym = false; StackSym* varSym; if (opnd->IsRegOpnd()) { varSym = opnd->AsRegOpnd()->m_sym; if (varSym->IsTypeSpec()) { varSym = varSym->GetVarEquivSym(nullptr); Assert(varSym); } floatSym = varSym->GetFloat64EquivSym(instr->m_func); } else { varSym = block->globOptData.GetCopyPropSym(nullptr, val); if(!varSym) { // Clear the symstore to ensure it's set below to this new symbol this->SetSymStoreDirect(val->GetValueInfo(), nullptr); varSym = StackSym::New(TyVar, instr->m_func); newFloatSym = true; } floatSym = varSym->GetFloat64EquivSym(instr->m_func); } IR::RegOpnd *floatReg = IR::RegOpnd::New(floatSym, TyFloat64, instr->m_func); floatReg->SetIsJITOptimizedReg(true); // If the value is not live - let's load it. if(!block->globOptData.liveFloat64Syms->TestAndSet(varSym->m_id)) { newInstr = IR::Instr::New(Js::OpCode::LdC_F8_R8, floatReg, constOpnd, instr->m_func); newInstr->SetByteCodeOffset(instr); instr->InsertBefore(newInstr); if (updateBlockLastInstr) { block->SetLastInstr(newInstr); } if(newFloatSym) { block->globOptData.SetValue(val, varSym); } // Src is always invariant, but check if the dst is, and then hoist. if (block->loop && ( (newFloatSym && block->loop->CanHoistInvariants()) || this->OptIsInvariant(floatReg, block, block->loop, val, false, false) )) { Assert(!this->IsLoopPrePass()); this->OptHoistInvariant(newInstr, block, block->loop, val, val, nullptr, false); } } if (needReplaceSrc) { CaptureByteCodeSymUses(instr); instr->ReplaceSrc(opnd, floatReg); } } return instr; } return newInstr; } void GlobOpt::ToVarRegOpnd(IR::RegOpnd *dst, BasicBlock *block) { ToVarStackSym(dst->m_sym, block); } void GlobOpt::ToVarStackSym(StackSym *varSym, BasicBlock *block) { //added another check for sym , in case of asmjs there is mostly no var syms and hence added a new check to see if it is the primary sym Assert(!varSym->IsTypeSpec()); block->globOptData.liveVarSyms->Set(varSym->m_id); block->globOptData.liveInt32Syms->Clear(varSym->m_id); block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); block->globOptData.liveFloat64Syms->Clear(varSym->m_id); #ifdef ENABLE_SIMDJS // SIMD_JS block->globOptData.liveSimd128F4Syms->Clear(varSym->m_id); block->globOptData.liveSimd128I4Syms->Clear(varSym->m_id); #endif } void GlobOpt::ToInt32Dst(IR::Instr *instr, IR::RegOpnd *dst, BasicBlock *block) { StackSym *varSym = dst->m_sym; Assert(!varSym->IsTypeSpec()); if (!this->IsLoopPrePass() && varSym->IsVar()) { StackSym *int32Sym = varSym->GetInt32EquivSym(instr->m_func); // Use UnlinkDst / SetDst to make sure isSingleDef is tracked properly, // since we'll just be hammering the symbol. dst = instr->UnlinkDst()->AsRegOpnd(); dst->m_sym = int32Sym; dst->SetType(TyInt32); instr->SetDst(dst); } block->globOptData.liveInt32Syms->Set(varSym->m_id); block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); // The store makes it lossless block->globOptData.liveVarSyms->Clear(varSym->m_id); block->globOptData.liveFloat64Syms->Clear(varSym->m_id); #ifdef ENABLE_SIMDJS // SIMD_JS block->globOptData.liveSimd128F4Syms->Clear(varSym->m_id); block->globOptData.liveSimd128I4Syms->Clear(varSym->m_id); #endif } void GlobOpt::ToUInt32Dst(IR::Instr *instr, IR::RegOpnd *dst, BasicBlock *block) { // We should be calling only for asmjs function Assert(GetIsAsmJSFunc()); StackSym *varSym = dst->m_sym; Assert(!varSym->IsTypeSpec()); block->globOptData.liveInt32Syms->Set(varSym->m_id); block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); // The store makes it lossless block->globOptData.liveVarSyms->Clear(varSym->m_id); block->globOptData.liveFloat64Syms->Clear(varSym->m_id); #ifdef ENABLE_SIMDJS // SIMD_JS block->globOptData.liveSimd128F4Syms->Clear(varSym->m_id); block->globOptData.liveSimd128I4Syms->Clear(varSym->m_id); #endif } void GlobOpt::ToFloat64Dst(IR::Instr *instr, IR::RegOpnd *dst, BasicBlock *block) { StackSym *varSym = dst->m_sym; Assert(!varSym->IsTypeSpec()); if (!this->IsLoopPrePass() && varSym->IsVar()) { StackSym *float64Sym = varSym->GetFloat64EquivSym(this->func); // Use UnlinkDst / SetDst to make sure isSingleDef is tracked properly, // since we'll just be hammering the symbol. dst = instr->UnlinkDst()->AsRegOpnd(); dst->m_sym = float64Sym; dst->SetType(TyFloat64); instr->SetDst(dst); } block->globOptData.liveFloat64Syms->Set(varSym->m_id); block->globOptData.liveVarSyms->Clear(varSym->m_id); block->globOptData.liveInt32Syms->Clear(varSym->m_id); block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); #ifdef ENABLE_SIMDJS // SIMD_JS block->globOptData.liveSimd128F4Syms->Clear(varSym->m_id); block->globOptData.liveSimd128I4Syms->Clear(varSym->m_id); #endif } #ifdef ENABLE_SIMDJS // SIMD_JS void GlobOpt::ToSimd128Dst(IRType toType, IR::Instr *instr, IR::RegOpnd *dst, BasicBlock *block) { StackSym *varSym = dst->m_sym; Assert(!varSym->IsTypeSpec()); BVSparse<JitArenaAllocator> * livenessBV = block->globOptData.GetSimd128LivenessBV(toType); Assert(livenessBV); if (!this->IsLoopPrePass() && varSym->IsVar()) { StackSym *simd128Sym = varSym->GetSimd128EquivSym(toType, this->func); // Use UnlinkDst / SetDst to make sure isSingleDef is tracked properly, // since we'll just be hammering the symbol. dst = instr->UnlinkDst()->AsRegOpnd(); dst->m_sym = simd128Sym; dst->SetType(toType); instr->SetDst(dst); } block->globOptData.liveFloat64Syms->Clear(varSym->m_id); block->globOptData.liveVarSyms->Clear(varSym->m_id); block->globOptData.liveInt32Syms->Clear(varSym->m_id); block->globOptData.liveLossyInt32Syms->Clear(varSym->m_id); // SIMD_JS block->globOptData.liveSimd128F4Syms->Clear(varSym->m_id); block->globOptData.liveSimd128I4Syms->Clear(varSym->m_id); livenessBV->Set(varSym->m_id); } #endif static void SetIsConstFlag(StackSym* dstSym, int64 value) { Assert(dstSym); dstSym->SetIsInt64Const(); } static void SetIsConstFlag(StackSym* dstSym, int value) { Assert(dstSym); dstSym->SetIsIntConst(value); } static IR::Opnd* CreateIntConstOpnd(IR::Instr* instr, int64 value) { return (IR::Opnd*)IR::Int64ConstOpnd::New(value, instr->GetDst()->GetType(), instr->m_func); } static IR::Opnd* CreateIntConstOpnd(IR::Instr* instr, int value) { IntConstType constVal; if (instr->GetDst()->IsUnsigned()) { // we should zero extend in case of uint constVal = (uint32)value; } else { constVal = value; } return (IR::Opnd*)IR::IntConstOpnd::New(constVal, instr->GetDst()->GetType(), instr->m_func); } template <typename T> IR::Opnd* GlobOpt::ReplaceWConst(IR::Instr **pInstr, T value, Value **pDstVal) { IR::Instr * &instr = *pInstr; IR::Opnd * constOpnd = CreateIntConstOpnd(instr, value); instr->ReplaceSrc1(constOpnd); instr->FreeSrc2(); this->OptSrc(constOpnd, &instr); IR::Opnd *dst = instr->GetDst(); StackSym *dstSym = dst->AsRegOpnd()->m_sym; if (dstSym->IsSingleDef()) { SetIsConstFlag(dstSym, value); } GOPT_TRACE_INSTR(instr, _u("Constant folding to %d: \n"), value); *pDstVal = GetIntConstantValue(value, instr, dst); return dst; } template <typename T> bool GlobOpt::OptConstFoldBinaryWasm( IR::Instr** pInstr, const Value* src1, const Value* src2, Value **pDstVal) { IR::Instr* &instr = *pInstr; if (!DoConstFold()) { return false; } T src1IntConstantValue, src2IntConstantValue; if (!src1 || !src1->GetValueInfo()->TryGetIntConstantValue(&src1IntConstantValue, false) || //a bit sketchy: false for int32 means likelyInt = false !src2 || !src2->GetValueInfo()->TryGetIntConstantValue(&src2IntConstantValue, false) //and unsigned = false for int64 ) { return false; } int64 tmpValueOut; if (!instr->BinaryCalculatorT<T>(src1IntConstantValue, src2IntConstantValue, &tmpValueOut, func->GetJITFunctionBody()->IsWasmFunction())) { return false; } this->CaptureByteCodeSymUses(instr); IR::Opnd *dst = (instr->GetDst()->IsInt64()) ? //dst can be int32 for int64 comparison operators ReplaceWConst(pInstr, tmpValueOut, pDstVal) : ReplaceWConst(pInstr, (int)tmpValueOut, pDstVal); instr->m_opcode = Js::OpCode::Ld_I4; this->ToInt32Dst(instr, dst->AsRegOpnd(), this->currentBlock); return true; } bool GlobOpt::OptConstFoldBinary( IR::Instr * *pInstr, const IntConstantBounds &src1IntConstantBounds, const IntConstantBounds &src2IntConstantBounds, Value **pDstVal) { IR::Instr * &instr = *pInstr; int32 value; IR::IntConstOpnd *constOpnd; if (!DoConstFold()) { return false; } int32 src1IntConstantValue = -1; int32 src2IntConstantValue = -1; int32 src1MaxIntConstantValue = -1; int32 src2MaxIntConstantValue = -1; int32 src1MinIntConstantValue = -1; int32 src2MinIntConstantValue = -1; if (instr->IsBranchInstr()) { src1MinIntConstantValue = src1IntConstantBounds.LowerBound(); src1MaxIntConstantValue = src1IntConstantBounds.UpperBound(); src2MinIntConstantValue = src2IntConstantBounds.LowerBound(); src2MaxIntConstantValue = src2IntConstantBounds.UpperBound(); } else if (src1IntConstantBounds.IsConstant() && src2IntConstantBounds.IsConstant()) { src1IntConstantValue = src1IntConstantBounds.LowerBound(); src2IntConstantValue = src2IntConstantBounds.LowerBound(); } else { return false; } IntConstType tmpValueOut; if (!instr->BinaryCalculator(src1IntConstantValue, src2IntConstantValue, &tmpValueOut, TyInt32) || !Math::FitsInDWord(tmpValueOut)) { return false; } value = (int32)tmpValueOut; this->CaptureByteCodeSymUses(instr); constOpnd = IR::IntConstOpnd::New(value, TyInt32, instr->m_func); instr->ReplaceSrc1(constOpnd); instr->FreeSrc2(); this->OptSrc(constOpnd, &instr); IR::Opnd *dst = instr->GetDst(); Assert(dst->IsRegOpnd()); StackSym *dstSym = dst->AsRegOpnd()->m_sym; if (dstSym->IsSingleDef()) { dstSym->SetIsIntConst(value); } GOPT_TRACE_INSTR(instr, _u("Constant folding to %d: \n"), value); *pDstVal = GetIntConstantValue(value, instr, dst); if (IsTypeSpecPhaseOff(this->func)) { instr->m_opcode = Js::OpCode::LdC_A_I4; this->ToVarRegOpnd(dst->AsRegOpnd(), this->currentBlock); } else { instr->m_opcode = Js::OpCode::Ld_I4; this->ToInt32Dst(instr, dst->AsRegOpnd(), this->currentBlock); } // If this is an induction variable, then treat it the way the prepass would have if it had seen // the assignment and the resulting change to the value number, and mark it as indeterminate. for (Loop * loop = this->currentBlock->loop; loop; loop = loop->parent) { InductionVariable *iv = nullptr; if (loop->inductionVariables && loop->inductionVariables->TryGetReference(dstSym->m_id, &iv)) { iv->SetChangeIsIndeterminate(); } } return true; } void GlobOpt::OptConstFoldBr(bool test, IR::Instr *instr, Value * src1Val, Value * src2Val) { GOPT_TRACE_INSTR(instr, _u("Constant folding to branch: ")); BasicBlock *deadBlock; if (src1Val) { this->ToInt32(instr, instr->GetSrc1(), this->currentBlock, src1Val, nullptr, false); } if (src2Val) { this->ToInt32(instr, instr->GetSrc2(), this->currentBlock, src2Val, nullptr, false); } this->CaptureByteCodeSymUses(instr); if (test) { instr->m_opcode = Js::OpCode::Br; instr->FreeSrc1(); if(instr->GetSrc2()) { instr->FreeSrc2(); } deadBlock = instr->m_next->AsLabelInstr()->GetBasicBlock(); } else { AssertMsg(instr->m_next->IsLabelInstr(), "Next instr of branch should be a label..."); if(instr->AsBranchInstr()->IsMultiBranch()) { return; } deadBlock = instr->AsBranchInstr()->GetTarget()->GetBasicBlock(); instr->FreeSrc1(); if(instr->GetSrc2()) { instr->FreeSrc2(); } instr->m_opcode = Js::OpCode::Nop; } // Loop back edge: we would have already decremented data use count for the tail block when we processed the loop header. if (!(this->currentBlock->loop && this->currentBlock->loop->GetHeadBlock() == deadBlock)) { this->currentBlock->DecrementDataUseCount(); } this->currentBlock->RemoveDeadSucc(deadBlock, this->func->m_fg); if (deadBlock->GetPredList()->Count() == 0) { deadBlock->SetDataUseCount(0); } } void GlobOpt::ChangeValueType( BasicBlock *const block, Value *const value, const ValueType newValueType, const bool preserveSubclassInfo, const bool allowIncompatibleType) const { Assert(value); // Why are we trying to change the value type of the type sym value? Asserting here to make sure we don't deep copy the type sym's value info. Assert(!value->GetValueInfo()->IsJsType()); ValueInfo *const valueInfo = value->GetValueInfo(); const ValueType valueType(valueInfo->Type()); if(valueType == newValueType && (preserveSubclassInfo || valueInfo->IsGeneric())) { return; } // ArrayValueInfo has information specific to the array type, so make sure that doesn't change Assert( !preserveSubclassInfo || !valueInfo->IsArrayValueInfo() || newValueType.IsObject() && newValueType.GetObjectType() == valueInfo->GetObjectType()); Assert(!valueInfo->GetSymStore() || !valueInfo->GetSymStore()->IsStackSym() || !valueInfo->GetSymStore()->AsStackSym()->IsFromByteCodeConstantTable()); ValueInfo *const newValueInfo = preserveSubclassInfo ? valueInfo->Copy(alloc) : valueInfo->CopyWithGenericStructureKind(alloc); newValueInfo->Type() = newValueType; ChangeValueInfo(block, value, newValueInfo, allowIncompatibleType); } void GlobOpt::ChangeValueInfo(BasicBlock *const block, Value *const value, ValueInfo *const newValueInfo, const bool allowIncompatibleType, const bool compensated) const { Assert(value); Assert(newValueInfo); // The value type must be changed to something more specific or something more generic. For instance, it would be changed to // something more specific if the current value type is LikelyArray and checks have been done to ensure that it's an array, // and it would be changed to something more generic if a call kills the Array value type and it must be treated as // LikelyArray going forward. // There are cases where we change the type because of different profile information, and because of rejit, these profile information // may conflict. Need to allow incompatible type in those cause. However, the old type should be indefinite. Assert((allowIncompatibleType && !value->GetValueInfo()->IsDefinite()) || AreValueInfosCompatible(newValueInfo, value->GetValueInfo())); // ArrayValueInfo has information specific to the array type, so make sure that doesn't change Assert( !value->GetValueInfo()->IsArrayValueInfo() || !newValueInfo->IsArrayValueInfo() || newValueInfo->GetObjectType() == value->GetValueInfo()->GetObjectType()); if(block) { TrackValueInfoChangeForKills(block, value, newValueInfo, compensated); } value->SetValueInfo(newValueInfo); } bool GlobOpt::AreValueInfosCompatible(const ValueInfo *const v0, const ValueInfo *const v1) const { Assert(v0); Assert(v1); if(v0->IsUninitialized() || v1->IsUninitialized()) { return true; } const bool doAggressiveIntTypeSpec = DoAggressiveIntTypeSpec(); if(doAggressiveIntTypeSpec && (v0->IsInt() || v1->IsInt())) { // Int specialization in some uncommon loop cases involving dependencies, needs to allow specializing values of // arbitrary types, even values that are definitely not int, to compensate for aggressive assumptions made by a loop // prepass return true; } if ((v0->Type()).IsMixedTypedArrayPair(v1->Type()) || (v1->Type()).IsMixedTypedArrayPair(v0->Type())) { return true; } const bool doFloatTypeSpec = DoFloatTypeSpec(); if(doFloatTypeSpec && (v0->IsFloat() || v1->IsFloat())) { // Float specialization allows specializing values of arbitrary types, even values that are definitely not float return true; } #ifdef ENABLE_SIMDJS // SIMD_JS if (SIMD128_TYPE_SPEC_FLAG && v0->Type().IsSimd128()) { // We only type-spec Undefined values, Objects (possibly merged SIMD values), or actual SIMD values. if (v1->Type().IsLikelyUndefined() || v1->Type().IsLikelyNull()) { return true; } if (v1->Type().IsLikelyObject() && v1->Type().GetObjectType() == ObjectType::Object) { return true; } if (v1->Type().IsSimd128()) { return v0->Type().GetObjectType() == v1->Type().GetObjectType(); } } #endif const bool doArrayMissingValueCheckHoist = DoArrayMissingValueCheckHoist(); const bool doNativeArrayTypeSpec = DoNativeArrayTypeSpec(); const auto AreValueTypesCompatible = [=](const ValueType t0, const ValueType t1) { return t0.IsSubsetOf(t1, doAggressiveIntTypeSpec, doFloatTypeSpec, doArrayMissingValueCheckHoist, doNativeArrayTypeSpec) || t1.IsSubsetOf(t0, doAggressiveIntTypeSpec, doFloatTypeSpec, doArrayMissingValueCheckHoist, doNativeArrayTypeSpec); }; const ValueType t0(v0->Type().ToDefinite()), t1(v1->Type().ToDefinite()); if(t0.IsLikelyObject() && t1.IsLikelyObject()) { // Check compatibility for the primitive portions and the object portions of the value types separately if(AreValueTypesCompatible(t0.ToDefiniteObject(), t1.ToDefiniteObject()) && ( !t0.HasBeenPrimitive() || !t1.HasBeenPrimitive() || AreValueTypesCompatible(t0.ToDefinitePrimitiveSubset(), t1.ToDefinitePrimitiveSubset()) )) { return true; } } else if(AreValueTypesCompatible(t0, t1)) { return true; } const FloatConstantValueInfo *floatConstantValueInfo; const ValueInfo *likelyIntValueinfo; if(v0->IsFloatConstant() && v1->IsLikelyInt()) { floatConstantValueInfo = v0->AsFloatConstant(); likelyIntValueinfo = v1; } else if(v0->IsLikelyInt() && v1->IsFloatConstant()) { floatConstantValueInfo = v1->AsFloatConstant(); likelyIntValueinfo = v0; } else { return false; } // A float constant value with a value that is actually an int is a subset of a likely-int value. // Ideally, we should create an int constant value for this up front, such that IsInt() also returns true. There // were other issues with that, should see if that can be done. int32 int32Value; return Js::JavascriptNumber::TryGetInt32Value(floatConstantValueInfo->FloatValue(), &int32Value) && (!likelyIntValueinfo->IsLikelyTaggedInt() || !Js::TaggedInt::IsOverflow(int32Value)); } #if DBG void GlobOpt::VerifyArrayValueInfoForTracking( const ValueInfo *const valueInfo, const bool isJsArray, const BasicBlock *const block, const bool ignoreKnownImplicitCalls) const { Assert(valueInfo); Assert(valueInfo->IsAnyOptimizedArray()); Assert(isJsArray == valueInfo->IsArrayOrObjectWithArray()); Assert(!isJsArray == valueInfo->IsOptimizedTypedArray()); Assert(block); Loop *implicitCallsLoop; if(block->next && !block->next->isDeleted && block->next->isLoopHeader) { // Since a loop's landing pad does not have user code, determine whether disabling implicit calls is allowed in the // landing pad based on the loop for which this block is the landing pad. implicitCallsLoop = block->next->loop; Assert(implicitCallsLoop); Assert(implicitCallsLoop->landingPad == block); } else { implicitCallsLoop = block->loop; } Assert( !isJsArray || DoArrayCheckHoist(valueInfo->Type(), implicitCallsLoop) || ( ignoreKnownImplicitCalls && !(implicitCallsLoop ? ImplicitCallFlagsAllowOpts(implicitCallsLoop) : ImplicitCallFlagsAllowOpts(func)) )); Assert(!(isJsArray && valueInfo->HasNoMissingValues() && !DoArrayMissingValueCheckHoist())); Assert( !( valueInfo->IsArrayValueInfo() && ( valueInfo->AsArrayValueInfo()->HeadSegmentSym() || valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym() ) && !DoArraySegmentHoist(valueInfo->Type()) )); #if 0 // We can't assert here that there is only a head segment length sym if hoisting is allowed in the current block, // because we may have propagated the sym forward out of a loop, and hoisting may be allowed inside but not // outside the loop. Assert( isJsArray || !valueInfo->IsArrayValueInfo() || !valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym() || DoTypedArraySegmentLengthHoist(implicitCallsLoop) || ignoreKnownImplicitCalls || (implicitCallsLoop ? ImplicitCallFlagsAllowOpts(implicitCallsLoop) : ImplicitCallFlagsAllowOpts(func)) ); #endif Assert( !( isJsArray && valueInfo->IsArrayValueInfo() && valueInfo->AsArrayValueInfo()->LengthSym() && !DoArrayLengthHoist() )); } #endif void GlobOpt::TrackNewValueForKills(Value *const value) { Assert(value); if(!value->GetValueInfo()->IsAnyOptimizedArray()) { return; } DoTrackNewValueForKills(value); } void GlobOpt::DoTrackNewValueForKills(Value *const value) { Assert(value); ValueInfo *const valueInfo = value->GetValueInfo(); Assert(valueInfo->IsAnyOptimizedArray()); Assert(!valueInfo->IsArrayValueInfo()); // The value and value info here are new, so it's okay to modify the value info in-place Assert(!valueInfo->GetSymStore()); const bool isJsArray = valueInfo->IsArrayOrObjectWithArray(); Assert(!isJsArray == valueInfo->IsOptimizedTypedArray()); Loop *implicitCallsLoop; if(currentBlock->next && !currentBlock->next->isDeleted && currentBlock->next->isLoopHeader) { // Since a loop's landing pad does not have user code, determine whether disabling implicit calls is allowed in the // landing pad based on the loop for which this block is the landing pad. implicitCallsLoop = currentBlock->next->loop; Assert(implicitCallsLoop); Assert(implicitCallsLoop->landingPad == currentBlock); } else { implicitCallsLoop = currentBlock->loop; } if(isJsArray) { if(!DoArrayCheckHoist(valueInfo->Type(), implicitCallsLoop)) { // Array opts are disabled for this value type, so treat it as an indefinite value type going forward valueInfo->Type() = valueInfo->Type().ToLikely(); return; } if(valueInfo->HasNoMissingValues() && !DoArrayMissingValueCheckHoist()) { valueInfo->Type() = valueInfo->Type().SetHasNoMissingValues(false); } } #if DBG VerifyArrayValueInfoForTracking(valueInfo, isJsArray, currentBlock); #endif if(!isJsArray) { return; } // Can't assume going forward that it will definitely be an array without disabling implicit calls, because the // array may be transformed into an ES5 array. Since array opts are enabled, implicit calls can be disabled, and we can // treat it as a definite value type going forward, but the value needs to be tracked so that something like a call can // revert the value type to a likely version. CurrentBlockData()->valuesToKillOnCalls->Add(value); } void GlobOpt::TrackCopiedValueForKills(Value *const value) { Assert(value); if(!value->GetValueInfo()->IsAnyOptimizedArray()) { return; } DoTrackCopiedValueForKills(value); } void GlobOpt::DoTrackCopiedValueForKills(Value *const value) { Assert(value); ValueInfo *const valueInfo = value->GetValueInfo(); Assert(valueInfo->IsAnyOptimizedArray()); const bool isJsArray = valueInfo->IsArrayOrObjectWithArray(); Assert(!isJsArray == valueInfo->IsOptimizedTypedArray()); #if DBG VerifyArrayValueInfoForTracking(valueInfo, isJsArray, currentBlock); #endif if(!isJsArray && !(valueInfo->IsArrayValueInfo() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym())) { return; } // Can't assume going forward that it will definitely be an array without disabling implicit calls, because the // array may be transformed into an ES5 array. Since array opts are enabled, implicit calls can be disabled, and we can // treat it as a definite value type going forward, but the value needs to be tracked so that something like a call can // revert the value type to a likely version. CurrentBlockData()->valuesToKillOnCalls->Add(value); } void GlobOpt::TrackMergedValueForKills( Value *const value, GlobOptBlockData *const blockData, BVSparse<JitArenaAllocator> *const mergedValueTypesTrackedForKills) const { Assert(value); if(!value->GetValueInfo()->IsAnyOptimizedArray()) { return; } DoTrackMergedValueForKills(value, blockData, mergedValueTypesTrackedForKills); } void GlobOpt::DoTrackMergedValueForKills( Value *const value, GlobOptBlockData *const blockData, BVSparse<JitArenaAllocator> *const mergedValueTypesTrackedForKills) const { Assert(value); Assert(blockData); ValueInfo *valueInfo = value->GetValueInfo(); Assert(valueInfo->IsAnyOptimizedArray()); const bool isJsArray = valueInfo->IsArrayOrObjectWithArray(); Assert(!isJsArray == valueInfo->IsOptimizedTypedArray()); #if DBG VerifyArrayValueInfoForTracking(valueInfo, isJsArray, currentBlock, true); #endif if(!isJsArray && !(valueInfo->IsArrayValueInfo() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym())) { return; } // Can't assume going forward that it will definitely be an array without disabling implicit calls, because the // array may be transformed into an ES5 array. Since array opts are enabled, implicit calls can be disabled, and we can // treat it as a definite value type going forward, but the value needs to be tracked so that something like a call can // revert the value type to a likely version. if(!mergedValueTypesTrackedForKills || !mergedValueTypesTrackedForKills->TestAndSet(value->GetValueNumber())) { blockData->valuesToKillOnCalls->Add(value); } } void GlobOpt::TrackValueInfoChangeForKills(BasicBlock *const block, Value *const value, ValueInfo *const newValueInfo, const bool compensated) const { Assert(block); Assert(value); Assert(newValueInfo); ValueInfo *const oldValueInfo = value->GetValueInfo(); #if DBG if(oldValueInfo->IsAnyOptimizedArray()) { VerifyArrayValueInfoForTracking(oldValueInfo, oldValueInfo->IsArrayOrObjectWithArray(), block, compensated); } #endif const bool trackOldValueInfo = oldValueInfo->IsArrayOrObjectWithArray() || ( oldValueInfo->IsOptimizedTypedArray() && oldValueInfo->IsArrayValueInfo() && oldValueInfo->AsArrayValueInfo()->HeadSegmentLengthSym() ); Assert(trackOldValueInfo == block->globOptData.valuesToKillOnCalls->ContainsKey(value)); #if DBG if(newValueInfo->IsAnyOptimizedArray()) { VerifyArrayValueInfoForTracking(newValueInfo, newValueInfo->IsArrayOrObjectWithArray(), block, compensated); } #endif const bool trackNewValueInfo = newValueInfo->IsArrayOrObjectWithArray() || ( newValueInfo->IsOptimizedTypedArray() && newValueInfo->IsArrayValueInfo() && newValueInfo->AsArrayValueInfo()->HeadSegmentLengthSym() ); if(trackOldValueInfo == trackNewValueInfo) { return; } if(trackNewValueInfo) { block->globOptData.valuesToKillOnCalls->Add(value); } else { block->globOptData.valuesToKillOnCalls->Remove(value); } } void GlobOpt::ProcessValueKills(IR::Instr *const instr) { Assert(instr); ValueSet *const valuesToKillOnCalls = CurrentBlockData()->valuesToKillOnCalls; if(!IsLoopPrePass() && valuesToKillOnCalls->Count() == 0) { return; } const JsArrayKills kills = CheckJsArrayKills(instr); Assert(!kills.KillsArrayHeadSegments() || kills.KillsArrayHeadSegmentLengths()); if(IsLoopPrePass()) { rootLoopPrePass->jsArrayKills = rootLoopPrePass->jsArrayKills.Merge(kills); Assert( !rootLoopPrePass->parent || rootLoopPrePass->jsArrayKills.AreSubsetOf(rootLoopPrePass->parent->jsArrayKills)); if(kills.KillsAllArrays()) { rootLoopPrePass->needImplicitCallBailoutChecksForJsArrayCheckHoist = false; } if(valuesToKillOnCalls->Count() == 0) { return; } } if(kills.KillsAllArrays()) { Assert(kills.KillsTypedArrayHeadSegmentLengths()); // - Calls need to kill the value types of values in the following list. For instance, calls can transform a JS array // into an ES5 array, so any definitely-array value types need to be killed. Also, VirtualTypeArrays do not have // bounds checks; this can be problematic if the array is detached, so check to ensure that it is a virtual array. // Update the value types to likley to ensure a bailout that asserts Array type is generated. // - Calls also need to kill typed array head segment lengths. A typed array's array buffer may be transferred to a web // worker, in which case the typed array's length is set to zero. for(auto it = valuesToKillOnCalls->GetIterator(); it.IsValid(); it.MoveNext()) { Value *const value = it.CurrentValue(); ValueInfo *const valueInfo = value->GetValueInfo(); Assert( valueInfo->IsArrayOrObjectWithArray() || valueInfo->IsOptimizedTypedArray() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym()); if (valueInfo->IsArrayOrObjectWithArray() || valueInfo->IsOptimizedVirtualTypedArray()) { ChangeValueType(nullptr, value, valueInfo->Type().ToLikely(), false); continue; } ChangeValueInfo( nullptr, value, valueInfo->AsArrayValueInfo()->Copy(alloc, true, false /* copyHeadSegmentLength */, true)); } valuesToKillOnCalls->Clear(); return; } if(kills.KillsArraysWithNoMissingValues()) { // Some operations may kill arrays with no missing values in unlikely circumstances. Convert their value types to likely // versions so that the checks have to be redone. for(auto it = valuesToKillOnCalls->GetIteratorWithRemovalSupport(); it.IsValid(); it.MoveNext()) { Value *const value = it.CurrentValue(); ValueInfo *const valueInfo = value->GetValueInfo(); Assert( valueInfo->IsArrayOrObjectWithArray() || valueInfo->IsOptimizedTypedArray() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym()); if(!valueInfo->IsArrayOrObjectWithArray() || !valueInfo->HasNoMissingValues()) { continue; } ChangeValueType(nullptr, value, valueInfo->Type().ToLikely(), false); it.RemoveCurrent(); } } if(kills.KillsNativeArrays()) { // Some operations may kill native arrays in (what should be) unlikely circumstances. Convert their value types to // likely versions so that the checks have to be redone. for(auto it = valuesToKillOnCalls->GetIteratorWithRemovalSupport(); it.IsValid(); it.MoveNext()) { Value *const value = it.CurrentValue(); ValueInfo *const valueInfo = value->GetValueInfo(); Assert( valueInfo->IsArrayOrObjectWithArray() || valueInfo->IsOptimizedTypedArray() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym()); if(!valueInfo->IsArrayOrObjectWithArray() || valueInfo->HasVarElements()) { continue; } ChangeValueType(nullptr, value, valueInfo->Type().ToLikely(), false); it.RemoveCurrent(); } } const bool likelyKillsJsArraysWithNoMissingValues = IsOperationThatLikelyKillsJsArraysWithNoMissingValues(instr); if(!kills.KillsArrayHeadSegmentLengths()) { Assert(!kills.KillsArrayHeadSegments()); if(!likelyKillsJsArraysWithNoMissingValues && !kills.KillsArrayLengths()) { return; } } for(auto it = valuesToKillOnCalls->GetIterator(); it.IsValid(); it.MoveNext()) { Value *const value = it.CurrentValue(); ValueInfo *valueInfo = value->GetValueInfo(); Assert( valueInfo->IsArrayOrObjectWithArray() || valueInfo->IsOptimizedTypedArray() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym()); if(!valueInfo->IsArrayOrObjectWithArray()) { continue; } if(likelyKillsJsArraysWithNoMissingValues && valueInfo->HasNoMissingValues()) { ChangeValueType(nullptr, value, valueInfo->Type().SetHasNoMissingValues(false), true); valueInfo = value->GetValueInfo(); } if(!valueInfo->IsArrayValueInfo()) { continue; } ArrayValueInfo *const arrayValueInfo = valueInfo->AsArrayValueInfo(); const bool removeHeadSegment = kills.KillsArrayHeadSegments() && arrayValueInfo->HeadSegmentSym(); const bool removeHeadSegmentLength = kills.KillsArrayHeadSegmentLengths() && arrayValueInfo->HeadSegmentLengthSym(); const bool removeLength = kills.KillsArrayLengths() && arrayValueInfo->LengthSym(); if(removeHeadSegment || removeHeadSegmentLength || removeLength) { ChangeValueInfo( nullptr, value, arrayValueInfo->Copy(alloc, !removeHeadSegment, !removeHeadSegmentLength, !removeLength)); valueInfo = value->GetValueInfo(); } } } void GlobOpt::ProcessValueKills(BasicBlock *const block, GlobOptBlockData *const blockData) { Assert(block); Assert(blockData); ValueSet *const valuesToKillOnCalls = blockData->valuesToKillOnCalls; if(!IsLoopPrePass() && valuesToKillOnCalls->Count() == 0) { return; } // If the current block or loop has implicit calls, kill all definitely-array value types, as using that info will cause // implicit calls to be disabled, resulting in unnecessary bailouts const bool killValuesOnImplicitCalls = (block->loop ? !this->ImplicitCallFlagsAllowOpts(block->loop) : !this->ImplicitCallFlagsAllowOpts(func)); if (!killValuesOnImplicitCalls) { return; } if(IsLoopPrePass() && block->loop == rootLoopPrePass) { AnalysisAssert(rootLoopPrePass); for (Loop * loop = rootLoopPrePass; loop != nullptr; loop = loop->parent) { loop->jsArrayKills.SetKillsAllArrays(); } Assert(!rootLoopPrePass->parent || rootLoopPrePass->jsArrayKills.AreSubsetOf(rootLoopPrePass->parent->jsArrayKills)); if(valuesToKillOnCalls->Count() == 0) { return; } } for(auto it = valuesToKillOnCalls->GetIterator(); it.IsValid(); it.MoveNext()) { Value *const value = it.CurrentValue(); ValueInfo *const valueInfo = value->GetValueInfo(); Assert( valueInfo->IsArrayOrObjectWithArray() || valueInfo->IsOptimizedTypedArray() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym()); if(valueInfo->IsArrayOrObjectWithArray()) { ChangeValueType(nullptr, value, valueInfo->Type().ToLikely(), false); continue; } ChangeValueInfo( nullptr, value, valueInfo->AsArrayValueInfo()->Copy(alloc, true, false /* copyHeadSegmentLength */, true)); } valuesToKillOnCalls->Clear(); } void GlobOpt::ProcessValueKillsForLoopHeaderAfterBackEdgeMerge(BasicBlock *const block, GlobOptBlockData *const blockData) { Assert(block); Assert(block->isLoopHeader); Assert(blockData); ValueSet *const valuesToKillOnCalls = blockData->valuesToKillOnCalls; if(valuesToKillOnCalls->Count() == 0) { return; } const JsArrayKills loopKills(block->loop->jsArrayKills); for(auto it = valuesToKillOnCalls->GetIteratorWithRemovalSupport(); it.IsValid(); it.MoveNext()) { Value *const value = it.CurrentValue(); ValueInfo *valueInfo = value->GetValueInfo(); Assert( valueInfo->IsArrayOrObjectWithArray() || valueInfo->IsOptimizedTypedArray() && valueInfo->AsArrayValueInfo()->HeadSegmentLengthSym()); const bool isJsArray = valueInfo->IsArrayOrObjectWithArray(); Assert(!isJsArray == valueInfo->IsOptimizedTypedArray()); if(isJsArray ? loopKills.KillsValueType(valueInfo->Type()) : loopKills.KillsTypedArrayHeadSegmentLengths()) { // Hoisting array checks and other related things for this type is disabled for the loop due to the kill, as // compensation code is currently not added on back-edges. When merging values from a back-edge, the array value // type cannot be definite, as that may require adding compensation code on the back-edge if the optimization pass // chooses to not optimize the array. if(isJsArray) { ChangeValueType(nullptr, value, valueInfo->Type().ToLikely(), false); } else { ChangeValueInfo( nullptr, value, valueInfo->AsArrayValueInfo()->Copy(alloc, true, false /* copyHeadSegmentLength */, true)); } it.RemoveCurrent(); continue; } if(!isJsArray || !valueInfo->IsArrayValueInfo()) { continue; } // Similarly, if the loop contains an operation that kills JS array segments, don't make the segment or other related // syms available initially inside the loop ArrayValueInfo *const arrayValueInfo = valueInfo->AsArrayValueInfo(); const bool removeHeadSegment = loopKills.KillsArrayHeadSegments() && arrayValueInfo->HeadSegmentSym(); const bool removeHeadSegmentLength = loopKills.KillsArrayHeadSegmentLengths() && arrayValueInfo->HeadSegmentLengthSym(); const bool removeLength = loopKills.KillsArrayLengths() && arrayValueInfo->LengthSym(); if(removeHeadSegment || removeHeadSegmentLength || removeLength) { ChangeValueInfo( nullptr, value, arrayValueInfo->Copy(alloc, !removeHeadSegment, !removeHeadSegmentLength, !removeLength)); valueInfo = value->GetValueInfo(); } } } bool GlobOpt::NeedBailOnImplicitCallForLiveValues(BasicBlock const * const block, const bool isForwardPass) const { if(isForwardPass) { return block->globOptData.valuesToKillOnCalls->Count() != 0; } if(block->noImplicitCallUses->IsEmpty()) { Assert(block->noImplicitCallNoMissingValuesUses->IsEmpty()); Assert(block->noImplicitCallNativeArrayUses->IsEmpty()); Assert(block->noImplicitCallJsArrayHeadSegmentSymUses->IsEmpty()); Assert(block->noImplicitCallArrayLengthSymUses->IsEmpty()); return false; } return true; } IR::Instr* GlobOpt::CreateBoundsCheckInstr(IR::Opnd* lowerBound, IR::Opnd* upperBound, int offset, Func* func) { IR::Instr* instr = IR::Instr::New(Js::OpCode::BoundCheck, func); return AttachBoundsCheckData(instr, lowerBound, upperBound, offset); } IR::Instr* GlobOpt::CreateBoundsCheckInstr(IR::Opnd* lowerBound, IR::Opnd* upperBound, int offset, IR::BailOutKind bailoutkind, BailOutInfo* bailoutInfo, Func * func) { IR::Instr* instr = IR::BailOutInstr::New(Js::OpCode::BoundCheck, bailoutkind, bailoutInfo, func); return AttachBoundsCheckData(instr, lowerBound, upperBound, offset); } IR::Instr* GlobOpt::AttachBoundsCheckData(IR::Instr* instr, IR::Opnd* lowerBound, IR::Opnd* upperBound, int offset) { instr->SetSrc1(lowerBound); instr->SetSrc2(upperBound); if (offset != 0) { instr->SetDst(IR::IntConstOpnd::New(offset, TyInt32, instr->m_func)); } return instr; } void GlobOpt::OptArraySrc(IR::Instr * *const instrRef) { Assert(instrRef); IR::Instr *&instr = *instrRef; Assert(instr); IR::Instr *baseOwnerInstr; IR::IndirOpnd *baseOwnerIndir; IR::RegOpnd *baseOpnd; bool isProfilableLdElem, isProfilableStElem; bool isLoad, isStore; bool needsHeadSegment, needsHeadSegmentLength, needsLength, needsBoundChecks; switch(instr->m_opcode) { // SIMD_JS case Js::OpCode::Simd128_LdArr_F4: case Js::OpCode::Simd128_LdArr_I4: // no type-spec for Asm.js if (this->GetIsAsmJSFunc()) { return; } // fall through case Js::OpCode::LdElemI_A: case Js::OpCode::LdMethodElem: if(!instr->GetSrc1()->IsIndirOpnd()) { return; } baseOwnerInstr = nullptr; baseOwnerIndir = instr->GetSrc1()->AsIndirOpnd(); baseOpnd = baseOwnerIndir->GetBaseOpnd(); isProfilableLdElem = instr->m_opcode == Js::OpCode::LdElemI_A; // LdMethodElem is currently not profiled isProfilableLdElem |= Js::IsSimd128Load(instr->m_opcode); needsBoundChecks = needsHeadSegmentLength = needsHeadSegment = isLoad = true; needsLength = isStore = isProfilableStElem = false; break; // SIMD_JS case Js::OpCode::Simd128_StArr_F4: case Js::OpCode::Simd128_StArr_I4: if (this->GetIsAsmJSFunc()) { return; } // fall through case Js::OpCode::StElemI_A: case Js::OpCode::StElemI_A_Strict: case Js::OpCode::StElemC: if(!instr->GetDst()->IsIndirOpnd()) { return; } baseOwnerInstr = nullptr; baseOwnerIndir = instr->GetDst()->AsIndirOpnd(); baseOpnd = baseOwnerIndir->GetBaseOpnd(); needsBoundChecks = isProfilableStElem = instr->m_opcode != Js::OpCode::StElemC; isProfilableStElem |= Js::IsSimd128Store(instr->m_opcode); needsHeadSegmentLength = needsHeadSegment = isStore = true; needsLength = isLoad = isProfilableLdElem = false; break; case Js::OpCode::InlineArrayPush: case Js::OpCode::InlineArrayPop: { baseOwnerInstr = instr; baseOwnerIndir = nullptr; IR::Opnd * thisOpnd = instr->GetSrc1(); // Return if it not a LikelyArray or Object with Array - No point in doing array check elimination. if(!thisOpnd->IsRegOpnd() || !thisOpnd->GetValueType().IsLikelyArrayOrObjectWithArray()) { return; } baseOpnd = thisOpnd->AsRegOpnd(); isLoad = instr->m_opcode == Js::OpCode::InlineArrayPop; isStore = instr->m_opcode == Js::OpCode::InlineArrayPush; needsLength = needsHeadSegmentLength = needsHeadSegment = true; needsBoundChecks = isProfilableLdElem = isProfilableStElem = false; break; } case Js::OpCode::LdLen_A: if(!instr->GetSrc1()->IsRegOpnd()) { return; } baseOwnerInstr = instr; baseOwnerIndir = nullptr; baseOpnd = instr->GetSrc1()->AsRegOpnd(); if(baseOpnd->GetValueType().IsLikelyObject() && baseOpnd->GetValueType().GetObjectType() == ObjectType::ObjectWithArray) { return; } needsLength = true; needsBoundChecks = needsHeadSegmentLength = needsHeadSegment = isStore = isLoad = isProfilableStElem = isProfilableLdElem = false; break; default: return; } Assert(!(baseOwnerInstr && baseOwnerIndir)); Assert(!needsHeadSegmentLength || needsHeadSegment); if(baseOwnerIndir && !IsLoopPrePass()) { // Since this happens before type specialization, make sure that any necessary conversions are done, and that the index // is int-specialized if possible such that the const flags are correct. ToVarUses(instr, baseOwnerIndir, baseOwnerIndir == instr->GetDst(), nullptr); } if(isProfilableStElem && !IsLoopPrePass()) { // If the dead-store pass decides to add the bailout kind IR::BailOutInvalidatedArrayHeadSegment, and the fast path is // generated, it may bail out before the operation is done, so this would need to be a pre-op bailout. if(instr->HasBailOutInfo()) { Assert( instr->GetByteCodeOffset() != Js::Constants::NoByteCodeOffset && instr->GetBailOutInfo()->bailOutOffset <= instr->GetByteCodeOffset()); const IR::BailOutKind bailOutKind = instr->GetBailOutKind(); Assert( !(bailOutKind & ~IR::BailOutKindBits) || (bailOutKind & ~IR::BailOutKindBits) == IR::BailOutOnImplicitCallsPreOp); if(!(bailOutKind & ~IR::BailOutKindBits)) { instr->SetBailOutKind(bailOutKind + IR::BailOutOnImplicitCallsPreOp); } } else { GenerateBailAtOperation(&instr, IR::BailOutOnImplicitCallsPreOp); } } Value *const baseValue = CurrentBlockData()->FindValue(baseOpnd->m_sym); if(!baseValue) { return; } ValueInfo *baseValueInfo = baseValue->GetValueInfo(); ValueType baseValueType(baseValueInfo->Type()); baseOpnd->SetValueType(baseValueType); if(!baseValueType.IsLikelyAnyOptimizedArray() || !DoArrayCheckHoist(baseValueType, currentBlock->loop, instr) || (baseOwnerIndir && !ShouldExpectConventionalArrayIndexValue(baseOwnerIndir))) { return; } const bool isLikelyJsArray = !baseValueType.IsLikelyTypedArray(); Assert(isLikelyJsArray == baseValueType.IsLikelyArrayOrObjectWithArray()); Assert(!isLikelyJsArray == baseValueType.IsLikelyOptimizedTypedArray()); if(!isLikelyJsArray && instr->m_opcode == Js::OpCode::LdMethodElem) { // Fast path is not generated in this case since the subsequent call will throw return; } ValueType newBaseValueType(baseValueType.ToDefiniteObject()); if(isLikelyJsArray && newBaseValueType.HasNoMissingValues() && !DoArrayMissingValueCheckHoist()) { newBaseValueType = newBaseValueType.SetHasNoMissingValues(false); } Assert((newBaseValueType == baseValueType) == baseValueType.IsObject()); ArrayValueInfo *baseArrayValueInfo = nullptr; const auto UpdateValue = [&](StackSym *newHeadSegmentSym, StackSym *newHeadSegmentLengthSym, StackSym *newLengthSym) { Assert(baseValueType.GetObjectType() == newBaseValueType.GetObjectType()); Assert(newBaseValueType.IsObject()); Assert(baseValueType.IsLikelyArray() || !newLengthSym); if(!(newHeadSegmentSym || newHeadSegmentLengthSym || newLengthSym)) { // We're not adding new information to the value other than changing the value type. Preserve any existing // information and just change the value type. ChangeValueType(currentBlock, baseValue, newBaseValueType, true); return; } // Merge the new syms into the value while preserving any existing information, and change the value type if(baseArrayValueInfo) { if(!newHeadSegmentSym) { newHeadSegmentSym = baseArrayValueInfo->HeadSegmentSym(); } if(!newHeadSegmentLengthSym) { newHeadSegmentLengthSym = baseArrayValueInfo->HeadSegmentLengthSym(); } if(!newLengthSym) { newLengthSym = baseArrayValueInfo->LengthSym(); } Assert( !baseArrayValueInfo->HeadSegmentSym() || newHeadSegmentSym == baseArrayValueInfo->HeadSegmentSym()); Assert( !baseArrayValueInfo->HeadSegmentLengthSym() || newHeadSegmentLengthSym == baseArrayValueInfo->HeadSegmentLengthSym()); Assert(!baseArrayValueInfo->LengthSym() || newLengthSym == baseArrayValueInfo->LengthSym()); } ArrayValueInfo *const newBaseArrayValueInfo = ArrayValueInfo::New( alloc, newBaseValueType, newHeadSegmentSym, newHeadSegmentLengthSym, newLengthSym, baseValueInfo->GetSymStore()); ChangeValueInfo(currentBlock, baseValue, newBaseArrayValueInfo); }; if(IsLoopPrePass()) { if(newBaseValueType != baseValueType) { UpdateValue(nullptr, nullptr, nullptr); } // For javascript arrays and objects with javascript arrays: // - Implicit calls need to be disabled and calls cannot be allowed in the loop since the array vtable may be changed // into an ES5 array. // For typed arrays: // - A typed array's array buffer may be transferred to a web worker as part of an implicit call, in which case the // typed array's length is set to zero. Implicit calls need to be disabled if the typed array's head segment length // is going to be loaded and used later. // Since we don't know if the loop has kills after this instruction, the kill information may not be complete. If a kill // is found later, this information will be updated to not require disabling implicit calls. if(!( isLikelyJsArray ? rootLoopPrePass->jsArrayKills.KillsValueType(newBaseValueType) : rootLoopPrePass->jsArrayKills.KillsTypedArrayHeadSegmentLengths() )) { rootLoopPrePass->needImplicitCallBailoutChecksForJsArrayCheckHoist = true; } return; } if(baseValueInfo->IsArrayValueInfo()) { baseArrayValueInfo = baseValueInfo->AsArrayValueInfo(); } const bool doArrayChecks = !baseValueType.IsObject(); const bool doArraySegmentHoist = DoArraySegmentHoist(baseValueType) && instr->m_opcode != Js::OpCode::StElemC; const bool headSegmentIsAvailable = baseArrayValueInfo && baseArrayValueInfo->HeadSegmentSym(); const bool doHeadSegmentLoad = doArraySegmentHoist && needsHeadSegment && !headSegmentIsAvailable; const bool doArraySegmentLengthHoist = doArraySegmentHoist && (isLikelyJsArray || DoTypedArraySegmentLengthHoist(currentBlock->loop)); const bool headSegmentLengthIsAvailable = baseArrayValueInfo && baseArrayValueInfo->HeadSegmentLengthSym(); const bool doHeadSegmentLengthLoad = doArraySegmentLengthHoist && (needsHeadSegmentLength || (!isLikelyJsArray && needsLength)) && !headSegmentLengthIsAvailable; const bool lengthIsAvailable = baseArrayValueInfo && baseArrayValueInfo->LengthSym(); const bool doLengthLoad = DoArrayLengthHoist() && needsLength && !lengthIsAvailable && baseValueType.IsLikelyArray() && DoLdLenIntSpec(instr->m_opcode == Js::OpCode::LdLen_A ? instr : nullptr, baseValueType); StackSym *const newHeadSegmentSym = doHeadSegmentLoad ? StackSym::New(TyMachPtr, instr->m_func) : nullptr; StackSym *const newHeadSegmentLengthSym = doHeadSegmentLengthLoad ? StackSym::New(TyUint32, instr->m_func) : nullptr; StackSym *const newLengthSym = doLengthLoad ? StackSym::New(TyUint32, instr->m_func) : nullptr; bool canBailOutOnArrayAccessHelperCall; if (Js::IsSimd128LoadStore(instr->m_opcode)) { // SIMD_JS // simd load/store never call helper canBailOutOnArrayAccessHelperCall = true; } else { canBailOutOnArrayAccessHelperCall = (isProfilableLdElem || isProfilableStElem) && DoEliminateArrayAccessHelperCall() && !( instr->IsProfiledInstr() && ( isProfilableLdElem ? instr->AsProfiledInstr()->u.ldElemInfo->LikelyNeedsHelperCall() : instr->AsProfiledInstr()->u.stElemInfo->LikelyNeedsHelperCall() ) ); } bool doExtractBoundChecks = false, eliminatedLowerBoundCheck = false, eliminatedUpperBoundCheck = false; StackSym *indexVarSym = nullptr; Value *indexValue = nullptr; IntConstantBounds indexConstantBounds; Value *headSegmentLengthValue = nullptr; IntConstantBounds headSegmentLengthConstantBounds; #if ENABLE_FAST_ARRAYBUFFER if (baseValueType.IsLikelyOptimizedVirtualTypedArray() && !Js::IsSimd128LoadStore(instr->m_opcode) /*Always extract bounds for SIMD */) { if (isProfilableStElem || !instr->IsDstNotAlwaysConvertedToInt32() || ( (baseValueType.GetObjectType() == ObjectType::Float32VirtualArray || baseValueType.GetObjectType() == ObjectType::Float64VirtualArray) && !instr->IsDstNotAlwaysConvertedToNumber() ) ) { // Unless we're in asm.js (where it is guaranteed that virtual typed array accesses cannot read/write beyond 4GB), // check the range of the index to make sure we won't access beyond the reserved memory beforing eliminating bounds // checks in jitted code. if (!GetIsAsmJSFunc() && baseOwnerIndir) { IR::RegOpnd * idxOpnd = baseOwnerIndir->GetIndexOpnd(); if (idxOpnd) { StackSym * idxSym = idxOpnd->m_sym->IsTypeSpec() ? idxOpnd->m_sym->GetVarEquivSym(nullptr) : idxOpnd->m_sym; Value * idxValue = CurrentBlockData()->FindValue(idxSym); IntConstantBounds idxConstantBounds; if (idxValue && idxValue->GetValueInfo()->TryGetIntConstantBounds(&idxConstantBounds)) { BYTE indirScale = Lowerer::GetArrayIndirScale(baseValueType); int32 upperBound = idxConstantBounds.UpperBound(); int32 lowerBound = idxConstantBounds.LowerBound(); if (lowerBound >= 0 && ((static_cast<uint64>(upperBound) << indirScale) < MAX_ASMJS_ARRAYBUFFER_LENGTH)) { eliminatedLowerBoundCheck = true; eliminatedUpperBoundCheck = true; canBailOutOnArrayAccessHelperCall = false; } } } } else { if (!baseOwnerIndir) { Assert(instr->m_opcode == Js::OpCode::InlineArrayPush || instr->m_opcode == Js::OpCode::InlineArrayPop || instr->m_opcode == Js::OpCode::LdLen_A); } eliminatedLowerBoundCheck = true; eliminatedUpperBoundCheck = true; canBailOutOnArrayAccessHelperCall = false; } } } #endif if(needsBoundChecks && DoBoundCheckElimination()) { AnalysisAssert(baseOwnerIndir); Assert(needsHeadSegmentLength); // Bound checks can be separated from the instruction only if it can bail out instead of making a helper call when a // bound check fails. And only if it would bail out, can we use a bound check to eliminate redundant bound checks later // on that path. doExtractBoundChecks = (headSegmentLengthIsAvailable || doHeadSegmentLengthLoad) && canBailOutOnArrayAccessHelperCall; do { // Get the index value IR::RegOpnd *const indexOpnd = baseOwnerIndir->GetIndexOpnd(); if(indexOpnd) { StackSym *const indexSym = indexOpnd->m_sym; if(indexSym->IsTypeSpec()) { Assert(indexSym->IsInt32()); indexVarSym = indexSym->GetVarEquivSym(nullptr); Assert(indexVarSym); indexValue = CurrentBlockData()->FindValue(indexVarSym); Assert(indexValue); AssertVerify(indexValue->GetValueInfo()->TryGetIntConstantBounds(&indexConstantBounds)); Assert(indexOpnd->GetType() == TyInt32 || indexOpnd->GetType() == TyUint32); Assert( (indexOpnd->GetType() == TyUint32) == ValueInfo::IsGreaterThanOrEqualTo( indexValue, indexConstantBounds.LowerBound(), indexConstantBounds.UpperBound(), nullptr, 0, 0)); if(indexOpnd->GetType() == TyUint32) { eliminatedLowerBoundCheck = true; } } else { doExtractBoundChecks = false; // Bound check instruction operates only on int-specialized operands indexValue = CurrentBlockData()->FindValue(indexSym); if(!indexValue || !indexValue->GetValueInfo()->TryGetIntConstantBounds(&indexConstantBounds)) { break; } if(ValueInfo::IsGreaterThanOrEqualTo( indexValue, indexConstantBounds.LowerBound(), indexConstantBounds.UpperBound(), nullptr, 0, 0)) { eliminatedLowerBoundCheck = true; } } if(!eliminatedLowerBoundCheck && ValueInfo::IsLessThan( indexValue, indexConstantBounds.LowerBound(), indexConstantBounds.UpperBound(), nullptr, 0, 0)) { eliminatedUpperBoundCheck = true; doExtractBoundChecks = false; break; } } else { const int32 indexConstantValue = baseOwnerIndir->GetOffset(); if(indexConstantValue < 0) { eliminatedUpperBoundCheck = true; doExtractBoundChecks = false; break; } if(indexConstantValue == INT32_MAX) { eliminatedLowerBoundCheck = true; doExtractBoundChecks = false; break; } indexConstantBounds = IntConstantBounds(indexConstantValue, indexConstantValue); eliminatedLowerBoundCheck = true; } if(!headSegmentLengthIsAvailable) { break; } headSegmentLengthValue = CurrentBlockData()->FindValue(baseArrayValueInfo->HeadSegmentLengthSym()); if(!headSegmentLengthValue) { if(doExtractBoundChecks) { headSegmentLengthConstantBounds = IntConstantBounds(0, Js::SparseArraySegmentBase::MaxLength); } break; } AssertVerify(headSegmentLengthValue->GetValueInfo()->TryGetIntConstantBounds(&headSegmentLengthConstantBounds)); if (ValueInfo::IsLessThanOrEqualTo( indexValue, indexConstantBounds.LowerBound(), indexConstantBounds.UpperBound(), headSegmentLengthValue, headSegmentLengthConstantBounds.LowerBound(), headSegmentLengthConstantBounds.UpperBound(), GetBoundCheckOffsetForSimd(newBaseValueType, instr, -1) )) { eliminatedUpperBoundCheck = true; if(eliminatedLowerBoundCheck) { doExtractBoundChecks = false; } } } while(false); } if(doArrayChecks || doHeadSegmentLoad || doHeadSegmentLengthLoad || doLengthLoad || doExtractBoundChecks) { // Find the loops out of which array checks and head segment loads need to be hoisted Loop *hoistChecksOutOfLoop = nullptr; Loop *hoistHeadSegmentLoadOutOfLoop = nullptr; Loop *hoistHeadSegmentLengthLoadOutOfLoop = nullptr; Loop *hoistLengthLoadOutOfLoop = nullptr; if(doArrayChecks || doHeadSegmentLoad || doHeadSegmentLengthLoad || doLengthLoad) { for(Loop *loop = currentBlock->loop; loop; loop = loop->parent) { const JsArrayKills loopKills(loop->jsArrayKills); Value *baseValueInLoopLandingPad = nullptr; if((isLikelyJsArray && loopKills.KillsValueType(newBaseValueType)) || !OptIsInvariant(baseOpnd->m_sym, currentBlock, loop, baseValue, true, true, &baseValueInLoopLandingPad) || !(doArrayChecks || baseValueInLoopLandingPad->GetValueInfo()->IsObject())) { break; } // The value types should be the same, except: // - The value type in the landing pad is a type that can merge to a specific object type. Typically, these // cases will use BailOnNoProfile, but that can be disabled due to excessive bailouts. Those value types // merge aggressively to the other side's object type, so the value type may have started off as // Uninitialized, [Likely]Undefined|Null, [Likely]UninitializedObject, etc., and changed in the loop to an // array type during a prepass. // - StElems in the loop can kill the no-missing-values info. // - The native array type may be made more conservative based on profile data by an instruction in the loop. #if DBG if (!baseValueInLoopLandingPad->GetValueInfo()->CanMergeToSpecificObjectType()) { ValueType landingPadValueType = baseValueInLoopLandingPad->GetValueInfo()->Type(); Assert(landingPadValueType.IsSimilar(baseValueType) || ( landingPadValueType.IsLikelyNativeArray() && landingPadValueType.Merge(baseValueType).IsSimilar(baseValueType) ) ); } #endif if(doArrayChecks) { hoistChecksOutOfLoop = loop; } if(isLikelyJsArray && loopKills.KillsArrayHeadSegments()) { Assert(loopKills.KillsArrayHeadSegmentLengths()); if(!(doArrayChecks || doLengthLoad)) { break; } } else { if(doHeadSegmentLoad || headSegmentIsAvailable) { // If the head segment is already available, we may need to rehoist the value including other // information. So, need to track the loop out of which the head segment length can be hoisted even if // the head segment length is not being loaded here. hoistHeadSegmentLoadOutOfLoop = loop; } if(isLikelyJsArray ? loopKills.KillsArrayHeadSegmentLengths() : loopKills.KillsTypedArrayHeadSegmentLengths()) { if(!(doArrayChecks || doHeadSegmentLoad || doLengthLoad)) { break; } } else if(doHeadSegmentLengthLoad || headSegmentLengthIsAvailable) { // If the head segment length is already available, we may need to rehoist the value including other // information. So, need to track the loop out of which the head segment length can be hoisted even if // the head segment length is not being loaded here. hoistHeadSegmentLengthLoadOutOfLoop = loop; } } if(isLikelyJsArray && loopKills.KillsArrayLengths()) { if(!(doArrayChecks || doHeadSegmentLoad || doHeadSegmentLengthLoad)) { break; } } else if(doLengthLoad || lengthIsAvailable) { // If the length is already available, we may need to rehoist the value including other information. So, // need to track the loop out of which the head segment length can be hoisted even if the length is not // being loaded here. hoistLengthLoadOutOfLoop = loop; } } } IR::Instr *insertBeforeInstr = instr->GetInsertBeforeByteCodeUsesInstr(); const auto InsertInstrInLandingPad = [&](IR::Instr *const instr, Loop *const hoistOutOfLoop) { if(hoistOutOfLoop->bailOutInfo->bailOutInstr) { instr->SetByteCodeOffset(hoistOutOfLoop->bailOutInfo->bailOutInstr); hoistOutOfLoop->bailOutInfo->bailOutInstr->InsertBefore(instr); } else { instr->SetByteCodeOffset(hoistOutOfLoop->landingPad->GetLastInstr()); hoistOutOfLoop->landingPad->InsertAfter(instr); } }; BailOutInfo *shareableBailOutInfo = nullptr; IR::Instr *shareableBailOutInfoOriginalOwner = nullptr; const auto ShareBailOut = [&]() { Assert(shareableBailOutInfo); if(shareableBailOutInfo->bailOutInstr != shareableBailOutInfoOriginalOwner) { return; } Assert(shareableBailOutInfoOriginalOwner->GetBailOutInfo() == shareableBailOutInfo); IR::Instr *const sharedBailOut = shareableBailOutInfoOriginalOwner->ShareBailOut(); Assert(sharedBailOut->GetBailOutInfo() == shareableBailOutInfo); shareableBailOutInfoOriginalOwner = nullptr; sharedBailOut->Unlink(); insertBeforeInstr->InsertBefore(sharedBailOut); insertBeforeInstr = sharedBailOut; }; if(doArrayChecks) { TRACE_TESTTRACE_PHASE_INSTR(Js::ArrayCheckHoistPhase, instr, _u("Separating array checks with bailout\n")); IR::Instr *bailOnNotArray = IR::Instr::New(Js::OpCode::BailOnNotArray, instr->m_func); bailOnNotArray->SetSrc1(baseOpnd); bailOnNotArray->GetSrc1()->SetIsJITOptimizedReg(true); const IR::BailOutKind bailOutKind = newBaseValueType.IsLikelyNativeArray() ? IR::BailOutOnNotNativeArray : IR::BailOutOnNotArray; if(hoistChecksOutOfLoop) { Assert(!(isLikelyJsArray && hoistChecksOutOfLoop->jsArrayKills.KillsValueType(newBaseValueType))); TRACE_PHASE_INSTR( Js::ArrayCheckHoistPhase, instr, _u("Hoisting array checks with bailout out of loop %u to landing pad block %u\n"), hoistChecksOutOfLoop->GetLoopNumber(), hoistChecksOutOfLoop->landingPad->GetBlockNum()); TESTTRACE_PHASE_INSTR(Js::ArrayCheckHoistPhase, instr, _u("Hoisting array checks with bailout out of loop\n")); Assert(hoistChecksOutOfLoop->bailOutInfo); EnsureBailTarget(hoistChecksOutOfLoop); InsertInstrInLandingPad(bailOnNotArray, hoistChecksOutOfLoop); bailOnNotArray = bailOnNotArray->ConvertToBailOutInstr(hoistChecksOutOfLoop->bailOutInfo, bailOutKind); } else { bailOnNotArray->SetByteCodeOffset(instr); insertBeforeInstr->InsertBefore(bailOnNotArray); GenerateBailAtOperation(&bailOnNotArray, bailOutKind); shareableBailOutInfo = bailOnNotArray->GetBailOutInfo(); shareableBailOutInfoOriginalOwner = bailOnNotArray; } baseValueType = newBaseValueType; baseOpnd->SetValueType(newBaseValueType); } if(doLengthLoad) { Assert(baseValueType.IsArray()); Assert(newLengthSym); TRACE_TESTTRACE_PHASE_INSTR(Js::Phase::ArrayLengthHoistPhase, instr, _u("Separating array length load\n")); // Create an initial value for the length CurrentBlockData()->liveVarSyms->Set(newLengthSym->m_id); Value *const lengthValue = NewIntRangeValue(0, INT32_MAX, false); CurrentBlockData()->SetValue(lengthValue, newLengthSym); // SetValue above would have set the sym store to newLengthSym. This sym won't be used for copy-prop though, so // remove it as the sym store. this->SetSymStoreDirect(lengthValue->GetValueInfo(), nullptr); // length = [array + offsetOf(length)] IR::Instr *const loadLength = IR::Instr::New( Js::OpCode::LdIndir, IR::RegOpnd::New(newLengthSym, newLengthSym->GetType(), instr->m_func), IR::IndirOpnd::New( baseOpnd, Js::JavascriptArray::GetOffsetOfLength(), newLengthSym->GetType(), instr->m_func), instr->m_func); loadLength->GetDst()->SetIsJITOptimizedReg(true); loadLength->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->SetIsJITOptimizedReg(true); // BailOnNegative length (BailOutOnIrregularLength) IR::Instr *bailOnIrregularLength = IR::Instr::New(Js::OpCode::BailOnNegative, instr->m_func); bailOnIrregularLength->SetSrc1(loadLength->GetDst()); const IR::BailOutKind bailOutKind = IR::BailOutOnIrregularLength; if(hoistLengthLoadOutOfLoop) { Assert(!hoistLengthLoadOutOfLoop->jsArrayKills.KillsArrayLengths()); TRACE_PHASE_INSTR( Js::Phase::ArrayLengthHoistPhase, instr, _u("Hoisting array length load out of loop %u to landing pad block %u\n"), hoistLengthLoadOutOfLoop->GetLoopNumber(), hoistLengthLoadOutOfLoop->landingPad->GetBlockNum()); TESTTRACE_PHASE_INSTR(Js::Phase::ArrayLengthHoistPhase, instr, _u("Hoisting array length load out of loop\n")); Assert(hoistLengthLoadOutOfLoop->bailOutInfo); EnsureBailTarget(hoistLengthLoadOutOfLoop); InsertInstrInLandingPad(loadLength, hoistLengthLoadOutOfLoop); InsertInstrInLandingPad(bailOnIrregularLength, hoistLengthLoadOutOfLoop); bailOnIrregularLength = bailOnIrregularLength->ConvertToBailOutInstr(hoistLengthLoadOutOfLoop->bailOutInfo, bailOutKind); // Hoist the length value for(InvariantBlockBackwardIterator it( this, currentBlock, hoistLengthLoadOutOfLoop->landingPad, baseOpnd->m_sym, baseValue->GetValueNumber()); it.IsValid(); it.MoveNext()) { BasicBlock *const block = it.Block(); block->globOptData.liveVarSyms->Set(newLengthSym->m_id); Assert(!block->globOptData.FindValue(newLengthSym)); Value *const lengthValueCopy = CopyValue(lengthValue, lengthValue->GetValueNumber()); block->globOptData.SetValue(lengthValueCopy, newLengthSym); this->SetSymStoreDirect(lengthValueCopy->GetValueInfo(), nullptr); } } else { loadLength->SetByteCodeOffset(instr); insertBeforeInstr->InsertBefore(loadLength); bailOnIrregularLength->SetByteCodeOffset(instr); insertBeforeInstr->InsertBefore(bailOnIrregularLength); if(shareableBailOutInfo) { ShareBailOut(); bailOnIrregularLength = bailOnIrregularLength->ConvertToBailOutInstr(shareableBailOutInfo, bailOutKind); } else { GenerateBailAtOperation(&bailOnIrregularLength, bailOutKind); shareableBailOutInfo = bailOnIrregularLength->GetBailOutInfo(); shareableBailOutInfoOriginalOwner = bailOnIrregularLength; } } } const auto InsertHeadSegmentLoad = [&]() { TRACE_TESTTRACE_PHASE_INSTR(Js::ArraySegmentHoistPhase, instr, _u("Separating array segment load\n")); Assert(newHeadSegmentSym); IR::RegOpnd *const headSegmentOpnd = IR::RegOpnd::New(newHeadSegmentSym, newHeadSegmentSym->GetType(), instr->m_func); headSegmentOpnd->SetIsJITOptimizedReg(true); IR::RegOpnd *const jitOptimizedBaseOpnd = baseOpnd->Copy(instr->m_func)->AsRegOpnd(); jitOptimizedBaseOpnd->SetIsJITOptimizedReg(true); IR::Instr *loadObjectArray; if(baseValueType.GetObjectType() == ObjectType::ObjectWithArray) { loadObjectArray = IR::Instr::New( Js::OpCode::LdIndir, headSegmentOpnd, IR::IndirOpnd::New( jitOptimizedBaseOpnd, Js::DynamicObject::GetOffsetOfObjectArray(), jitOptimizedBaseOpnd->GetType(), instr->m_func), instr->m_func); } else { loadObjectArray = nullptr; } IR::Instr *const loadHeadSegment = IR::Instr::New( Js::OpCode::LdIndir, headSegmentOpnd, IR::IndirOpnd::New( loadObjectArray ? headSegmentOpnd : jitOptimizedBaseOpnd, Lowerer::GetArrayOffsetOfHeadSegment(baseValueType), headSegmentOpnd->GetType(), instr->m_func), instr->m_func); if(hoistHeadSegmentLoadOutOfLoop) { Assert(!(isLikelyJsArray && hoistHeadSegmentLoadOutOfLoop->jsArrayKills.KillsArrayHeadSegments())); TRACE_PHASE_INSTR( Js::ArraySegmentHoistPhase, instr, _u("Hoisting array segment load out of loop %u to landing pad block %u\n"), hoistHeadSegmentLoadOutOfLoop->GetLoopNumber(), hoistHeadSegmentLoadOutOfLoop->landingPad->GetBlockNum()); TESTTRACE_PHASE_INSTR(Js::ArraySegmentHoistPhase, instr, _u("Hoisting array segment load out of loop\n")); if(loadObjectArray) { InsertInstrInLandingPad(loadObjectArray, hoistHeadSegmentLoadOutOfLoop); } InsertInstrInLandingPad(loadHeadSegment, hoistHeadSegmentLoadOutOfLoop); } else { if(loadObjectArray) { loadObjectArray->SetByteCodeOffset(instr); insertBeforeInstr->InsertBefore(loadObjectArray); } loadHeadSegment->SetByteCodeOffset(instr); insertBeforeInstr->InsertBefore(loadHeadSegment); instr->loadedArrayHeadSegment = true; } }; if(doHeadSegmentLoad && isLikelyJsArray) { // For javascript arrays, the head segment is required to load the head segment length InsertHeadSegmentLoad(); } if(doHeadSegmentLengthLoad) { Assert(!isLikelyJsArray || newHeadSegmentSym || baseArrayValueInfo && baseArrayValueInfo->HeadSegmentSym()); Assert(newHeadSegmentLengthSym); Assert(!headSegmentLengthValue); TRACE_TESTTRACE_PHASE_INSTR(Js::ArraySegmentHoistPhase, instr, _u("Separating array segment length load\n")); // Create an initial value for the head segment length CurrentBlockData()->liveVarSyms->Set(newHeadSegmentLengthSym->m_id); headSegmentLengthValue = NewIntRangeValue(0, Js::SparseArraySegmentBase::MaxLength, false); headSegmentLengthConstantBounds = IntConstantBounds(0, Js::SparseArraySegmentBase::MaxLength); CurrentBlockData()->SetValue(headSegmentLengthValue, newHeadSegmentLengthSym); // SetValue above would have set the sym store to newHeadSegmentLengthSym. This sym won't be used for copy-prop // though, so remove it as the sym store. this->SetSymStoreDirect(headSegmentLengthValue->GetValueInfo(), nullptr); StackSym *const headSegmentSym = isLikelyJsArray ? newHeadSegmentSym ? newHeadSegmentSym : baseArrayValueInfo->HeadSegmentSym() : nullptr; IR::Instr *const loadHeadSegmentLength = IR::Instr::New( Js::OpCode::LdIndir, IR::RegOpnd::New(newHeadSegmentLengthSym, newHeadSegmentLengthSym->GetType(), instr->m_func), IR::IndirOpnd::New( isLikelyJsArray ? IR::RegOpnd::New(headSegmentSym, headSegmentSym->GetType(), instr->m_func) : baseOpnd, isLikelyJsArray ? Js::SparseArraySegmentBase::GetOffsetOfLength() : Lowerer::GetArrayOffsetOfLength(baseValueType), newHeadSegmentLengthSym->GetType(), instr->m_func), instr->m_func); loadHeadSegmentLength->GetDst()->SetIsJITOptimizedReg(true); loadHeadSegmentLength->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->SetIsJITOptimizedReg(true); // We don't check the head segment length for negative (very large uint32) values. For JS arrays, the bound checks // cover that. For typed arrays, we currently don't allocate array buffers with more than 1 GB elements. if(hoistHeadSegmentLengthLoadOutOfLoop) { Assert( !( isLikelyJsArray ? hoistHeadSegmentLengthLoadOutOfLoop->jsArrayKills.KillsArrayHeadSegmentLengths() : hoistHeadSegmentLengthLoadOutOfLoop->jsArrayKills.KillsTypedArrayHeadSegmentLengths() )); TRACE_PHASE_INSTR( Js::ArraySegmentHoistPhase, instr, _u("Hoisting array segment length load out of loop %u to landing pad block %u\n"), hoistHeadSegmentLengthLoadOutOfLoop->GetLoopNumber(), hoistHeadSegmentLengthLoadOutOfLoop->landingPad->GetBlockNum()); TESTTRACE_PHASE_INSTR(Js::ArraySegmentHoistPhase, instr, _u("Hoisting array segment length load out of loop\n")); InsertInstrInLandingPad(loadHeadSegmentLength, hoistHeadSegmentLengthLoadOutOfLoop); // Hoist the head segment length value for(InvariantBlockBackwardIterator it( this, currentBlock, hoistHeadSegmentLengthLoadOutOfLoop->landingPad, baseOpnd->m_sym, baseValue->GetValueNumber()); it.IsValid(); it.MoveNext()) { BasicBlock *const block = it.Block(); block->globOptData.liveVarSyms->Set(newHeadSegmentLengthSym->m_id); Assert(!block->globOptData.FindValue(newHeadSegmentLengthSym)); Value *const headSegmentLengthValueCopy = CopyValue(headSegmentLengthValue, headSegmentLengthValue->GetValueNumber()); block->globOptData.SetValue(headSegmentLengthValueCopy, newHeadSegmentLengthSym); this->SetSymStoreDirect(headSegmentLengthValueCopy->GetValueInfo(), nullptr); } } else { loadHeadSegmentLength->SetByteCodeOffset(instr); insertBeforeInstr->InsertBefore(loadHeadSegmentLength); instr->loadedArrayHeadSegmentLength = true; } } if(doExtractBoundChecks) { Assert(!(eliminatedLowerBoundCheck && eliminatedUpperBoundCheck)); Assert(baseOwnerIndir); Assert(!baseOwnerIndir->GetIndexOpnd() || baseOwnerIndir->GetIndexOpnd()->m_sym->IsTypeSpec()); Assert(doHeadSegmentLengthLoad || headSegmentLengthIsAvailable); Assert(canBailOutOnArrayAccessHelperCall); Assert(!isStore || instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict || Js::IsSimd128LoadStore(instr->m_opcode)); StackSym *const headSegmentLengthSym = headSegmentLengthIsAvailable ? baseArrayValueInfo->HeadSegmentLengthSym() : newHeadSegmentLengthSym; Assert(headSegmentLengthSym); Assert(headSegmentLengthValue); ArrayLowerBoundCheckHoistInfo lowerBoundCheckHoistInfo; ArrayUpperBoundCheckHoistInfo upperBoundCheckHoistInfo; bool failedToUpdateCompatibleLowerBoundCheck = false, failedToUpdateCompatibleUpperBoundCheck = false; if(DoBoundCheckHoist()) { if(indexVarSym) { TRACE_PHASE_INSTR_VERBOSE( Js::Phase::BoundCheckHoistPhase, instr, _u("Determining array bound check hoistability for index s%u\n"), indexVarSym->m_id); } else { TRACE_PHASE_INSTR_VERBOSE( Js::Phase::BoundCheckHoistPhase, instr, _u("Determining array bound check hoistability for index %d\n"), indexConstantBounds.LowerBound()); } DetermineArrayBoundCheckHoistability( !eliminatedLowerBoundCheck, !eliminatedUpperBoundCheck, lowerBoundCheckHoistInfo, upperBoundCheckHoistInfo, isLikelyJsArray, indexVarSym, indexValue, indexConstantBounds, headSegmentLengthSym, headSegmentLengthValue, headSegmentLengthConstantBounds, hoistHeadSegmentLengthLoadOutOfLoop, failedToUpdateCompatibleLowerBoundCheck, failedToUpdateCompatibleUpperBoundCheck); #ifdef ENABLE_SIMDJS // SIMD_JS UpdateBoundCheckHoistInfoForSimd(upperBoundCheckHoistInfo, newBaseValueType, instr); #endif } if(!eliminatedLowerBoundCheck) { eliminatedLowerBoundCheck = true; Assert(indexVarSym); Assert(baseOwnerIndir->GetIndexOpnd()); Assert(indexValue); ArrayLowerBoundCheckHoistInfo &hoistInfo = lowerBoundCheckHoistInfo; if(hoistInfo.HasAnyInfo()) { BasicBlock *hoistBlock; if(hoistInfo.CompatibleBoundCheckBlock()) { hoistBlock = hoistInfo.CompatibleBoundCheckBlock(); TRACE_PHASE_INSTR( Js::Phase::BoundCheckHoistPhase, instr, _u("Hoisting array lower bound check into existing bound check instruction in block %u\n"), hoistBlock->GetBlockNum()); TESTTRACE_PHASE_INSTR( Js::Phase::BoundCheckHoistPhase, instr, _u("Hoisting array lower bound check into existing bound check instruction\n")); } else { Assert(hoistInfo.Loop()); BasicBlock *const landingPad = hoistInfo.Loop()->landingPad; hoistBlock = landingPad; StackSym *indexIntSym; if(hoistInfo.IndexSym() && hoistInfo.IndexSym()->IsVar()) { if(!landingPad->globOptData.IsInt32TypeSpecialized(hoistInfo.IndexSym())) { // Int-specialize the index sym, as the BoundCheck instruction requires int operands. Specialize // it in this block if it is invariant, as the conversion will be hoisted along with value // updates. BasicBlock *specializationBlock = hoistInfo.Loop()->landingPad; IR::Instr *specializeBeforeInstr = nullptr; if(!CurrentBlockData()->IsInt32TypeSpecialized(hoistInfo.IndexSym()) && OptIsInvariant( hoistInfo.IndexSym(), currentBlock, hoistInfo.Loop(), CurrentBlockData()->FindValue(hoistInfo.IndexSym()), false, true)) { specializationBlock = currentBlock; specializeBeforeInstr = insertBeforeInstr; } Assert(tempBv->IsEmpty()); tempBv->Set(hoistInfo.IndexSym()->m_id); ToInt32(tempBv, specializationBlock, false, specializeBeforeInstr); tempBv->ClearAll(); Assert(landingPad->globOptData.IsInt32TypeSpecialized(hoistInfo.IndexSym())); } indexIntSym = hoistInfo.IndexSym()->GetInt32EquivSym(nullptr); Assert(indexIntSym); } else { indexIntSym = hoistInfo.IndexSym(); Assert(!indexIntSym || indexIntSym->GetType() == TyInt32 || indexIntSym->GetType() == TyUint32); } // The info in the landing pad may be better than the info in the current block due to changes made to // the index sym inside the loop. Check if the bound check we intend to hoist is unnecessary in the // landing pad. if(!ValueInfo::IsLessThanOrEqualTo( nullptr, 0, 0, hoistInfo.IndexValue(), hoistInfo.IndexConstantBounds().LowerBound(), hoistInfo.IndexConstantBounds().UpperBound(), hoistInfo.Offset())) { Assert(hoistInfo.IndexSym()); Assert(hoistInfo.Loop()->bailOutInfo); EnsureBailTarget(hoistInfo.Loop()); if(hoistInfo.LoopCount()) { // Generate the loop count and loop count based bound that will be used for the bound check if(!hoistInfo.LoopCount()->HasBeenGenerated()) { GenerateLoopCount(hoistInfo.Loop(), hoistInfo.LoopCount()); } GenerateSecondaryInductionVariableBound( hoistInfo.Loop(), indexVarSym->GetInt32EquivSym(nullptr), hoistInfo.LoopCount(), hoistInfo.MaxMagnitudeChange(), hoistInfo.IndexSym()); } IR::Opnd* lowerBound = IR::IntConstOpnd::New(0, TyInt32, instr->m_func, true); IR::Opnd* upperBound = IR::RegOpnd::New(indexIntSym, TyInt32, instr->m_func); upperBound->SetIsJITOptimizedReg(true); // 0 <= indexSym + offset (src1 <= src2 + dst) IR::Instr *const boundCheck = CreateBoundsCheckInstr( lowerBound, upperBound, hoistInfo.Offset(), hoistInfo.IsLoopCountBasedBound() ? IR::BailOutOnFailedHoistedLoopCountBasedBoundCheck : IR::BailOutOnFailedHoistedBoundCheck, hoistInfo.Loop()->bailOutInfo, hoistInfo.Loop()->bailOutInfo->bailOutFunc); InsertInstrInLandingPad(boundCheck, hoistInfo.Loop()); TRACE_PHASE_INSTR( Js::Phase::BoundCheckHoistPhase, instr, _u("Hoisting array lower bound check out of loop %u to landing pad block %u, as (0 <= s%u + %d)\n"), hoistInfo.Loop()->GetLoopNumber(), landingPad->GetBlockNum(), hoistInfo.IndexSym()->m_id, hoistInfo.Offset()); TESTTRACE_PHASE_INSTR( Js::Phase::BoundCheckHoistPhase, instr, _u("Hoisting array lower bound check out of loop\n")); // Record the bound check instruction as available const IntBoundCheck boundCheckInfo( ZeroValueNumber, hoistInfo.IndexValueNumber(), boundCheck, landingPad); { const bool added = CurrentBlockData()->availableIntBoundChecks->AddNew(boundCheckInfo) >= 0; Assert(added || failedToUpdateCompatibleLowerBoundCheck); } for(InvariantBlockBackwardIterator it(this, currentBlock, landingPad, nullptr); it.IsValid(); it.MoveNext()) { const bool added = it.Block()->globOptData.availableIntBoundChecks->AddNew(boundCheckInfo) >= 0; Assert(added || failedToUpdateCompatibleLowerBoundCheck); } } } // Update values of the syms involved in the bound check to reflect the bound check if(hoistBlock != currentBlock && hoistInfo.IndexSym() && hoistInfo.Offset() != INT32_MIN) { for(InvariantBlockBackwardIterator it( this, currentBlock->next, hoistBlock, hoistInfo.IndexSym(), hoistInfo.IndexValueNumber()); it.IsValid(); it.MoveNext()) { Value *const value = it.InvariantSymValue(); IntConstantBounds constantBounds; AssertVerify(value->GetValueInfo()->TryGetIntConstantBounds(&constantBounds, true)); ValueInfo *const newValueInfo = UpdateIntBoundsForGreaterThanOrEqual( value, constantBounds, nullptr, IntConstantBounds(-hoistInfo.Offset(), -hoistInfo.Offset()), false); if(newValueInfo) { ChangeValueInfo(nullptr, value, newValueInfo); if(it.Block() == currentBlock && value == indexValue) { AssertVerify(newValueInfo->TryGetIntConstantBounds(&indexConstantBounds)); } } } } } else { IR::Opnd* lowerBound = IR::IntConstOpnd::New(0, TyInt32, instr->m_func, true); IR::Opnd* upperBound = baseOwnerIndir->GetIndexOpnd(); upperBound->SetIsJITOptimizedReg(true); const int offset = 0; IR::Instr *boundCheck; if(shareableBailOutInfo) { ShareBailOut(); boundCheck = CreateBoundsCheckInstr( lowerBound, upperBound, offset, IR::BailOutOnArrayAccessHelperCall, shareableBailOutInfo, shareableBailOutInfo->bailOutFunc); } else { boundCheck = CreateBoundsCheckInstr( lowerBound, upperBound, offset, instr->m_func); } boundCheck->SetByteCodeOffset(instr); insertBeforeInstr->InsertBefore(boundCheck); if(!shareableBailOutInfo) { GenerateBailAtOperation(&boundCheck, IR::BailOutOnArrayAccessHelperCall); shareableBailOutInfo = boundCheck->GetBailOutInfo(); shareableBailOutInfoOriginalOwner = boundCheck; } TRACE_PHASE_INSTR( Js::Phase::BoundCheckEliminationPhase, instr, _u("Separating array lower bound check, as (0 <= s%u)\n"), indexVarSym->m_id); TESTTRACE_PHASE_INSTR( Js::Phase::BoundCheckEliminationPhase, instr, _u("Separating array lower bound check\n")); if(DoBoundCheckHoist()) { // Record the bound check instruction as available const bool added = CurrentBlockData()->availableIntBoundChecks->AddNew( IntBoundCheck(ZeroValueNumber, indexValue->GetValueNumber(), boundCheck, currentBlock)) >= 0; Assert(added || failedToUpdateCompatibleLowerBoundCheck); } } // Update the index value to reflect the bound check ValueInfo *const newValueInfo = UpdateIntBoundsForGreaterThanOrEqual( indexValue, indexConstantBounds, nullptr, IntConstantBounds(0, 0), false); if(newValueInfo) { ChangeValueInfo(nullptr, indexValue, newValueInfo); AssertVerify(newValueInfo->TryGetIntConstantBounds(&indexConstantBounds)); } } if(!eliminatedUpperBoundCheck) { eliminatedUpperBoundCheck = true; ArrayUpperBoundCheckHoistInfo &hoistInfo = upperBoundCheckHoistInfo; if(hoistInfo.HasAnyInfo()) { BasicBlock *hoistBlock; if(hoistInfo.CompatibleBoundCheckBlock()) { hoistBlock = hoistInfo.CompatibleBoundCheckBlock(); TRACE_PHASE_INSTR( Js::Phase::BoundCheckHoistPhase, instr, _u("Hoisting array upper bound check into existing bound check instruction in block %u\n"), hoistBlock->GetBlockNum()); TESTTRACE_PHASE_INSTR( Js::Phase::BoundCheckHoistPhase, instr, _u("Hoisting array upper bound check into existing bound check instruction\n")); } else { Assert(hoistInfo.Loop()); BasicBlock *const landingPad = hoistInfo.Loop()->landingPad; hoistBlock = landingPad; StackSym *indexIntSym; if(hoistInfo.IndexSym() && hoistInfo.IndexSym()->IsVar()) { if(!landingPad->globOptData.IsInt32TypeSpecialized(hoistInfo.IndexSym())) { // Int-specialize the index sym, as the BoundCheck instruction requires int operands. Specialize it // in this block if it is invariant, as the conversion will be hoisted along with value updates. BasicBlock *specializationBlock = hoistInfo.Loop()->landingPad; IR::Instr *specializeBeforeInstr = nullptr; if(!CurrentBlockData()->IsInt32TypeSpecialized(hoistInfo.IndexSym()) && OptIsInvariant( hoistInfo.IndexSym(), currentBlock, hoistInfo.Loop(), CurrentBlockData()->FindValue(hoistInfo.IndexSym()), false, true)) { specializationBlock = currentBlock; specializeBeforeInstr = insertBeforeInstr; } Assert(tempBv->IsEmpty()); tempBv->Set(hoistInfo.IndexSym()->m_id); ToInt32(tempBv, specializationBlock, false, specializeBeforeInstr); tempBv->ClearAll(); Assert(landingPad->globOptData.IsInt32TypeSpecialized(hoistInfo.IndexSym())); } indexIntSym = hoistInfo.IndexSym()->GetInt32EquivSym(nullptr); Assert(indexIntSym); } else { indexIntSym = hoistInfo.IndexSym(); Assert(!indexIntSym || indexIntSym->GetType() == TyInt32 || indexIntSym->GetType() == TyUint32); } // The info in the landing pad may be better than the info in the current block due to changes made to the // index sym inside the loop. Check if the bound check we intend to hoist is unnecessary in the landing pad. if(!ValueInfo::IsLessThanOrEqualTo( hoistInfo.IndexValue(), hoistInfo.IndexConstantBounds().LowerBound(), hoistInfo.IndexConstantBounds().UpperBound(), hoistInfo.HeadSegmentLengthValue(), hoistInfo.HeadSegmentLengthConstantBounds().LowerBound(), hoistInfo.HeadSegmentLengthConstantBounds().UpperBound(), hoistInfo.Offset())) { Assert(hoistInfo.Loop()->bailOutInfo); EnsureBailTarget(hoistInfo.Loop()); if(hoistInfo.LoopCount()) { // Generate the loop count and loop count based bound that will be used for the bound check if(!hoistInfo.LoopCount()->HasBeenGenerated()) { GenerateLoopCount(hoistInfo.Loop(), hoistInfo.LoopCount()); } GenerateSecondaryInductionVariableBound( hoistInfo.Loop(), indexVarSym->GetInt32EquivSym(nullptr), hoistInfo.LoopCount(), hoistInfo.MaxMagnitudeChange(), hoistInfo.IndexSym()); } IR::Opnd* lowerBound = indexIntSym ? static_cast<IR::Opnd *>(IR::RegOpnd::New(indexIntSym, TyInt32, instr->m_func)) : IR::IntConstOpnd::New( hoistInfo.IndexConstantBounds().LowerBound(), TyInt32, instr->m_func); lowerBound->SetIsJITOptimizedReg(true); IR::Opnd* upperBound = IR::RegOpnd::New(headSegmentLengthSym, headSegmentLengthSym->GetType(), instr->m_func); upperBound->SetIsJITOptimizedReg(true); // indexSym <= headSegmentLength + offset (src1 <= src2 + dst) IR::Instr *const boundCheck = CreateBoundsCheckInstr( lowerBound, upperBound, hoistInfo.Offset(), hoistInfo.IsLoopCountBasedBound() ? IR::BailOutOnFailedHoistedLoopCountBasedBoundCheck : IR::BailOutOnFailedHoistedBoundCheck, hoistInfo.Loop()->bailOutInfo, hoistInfo.Loop()->bailOutInfo->bailOutFunc); InsertInstrInLandingPad(boundCheck, hoistInfo.Loop()); if(indexIntSym) { TRACE_PHASE_INSTR( Js::Phase::BoundCheckHoistPhase, instr, _u("Hoisting array upper bound check out of loop %u to landing pad block %u, as (s%u <= s%u + %d)\n"), hoistInfo.Loop()->GetLoopNumber(), landingPad->GetBlockNum(), hoistInfo.IndexSym()->m_id, headSegmentLengthSym->m_id, hoistInfo.Offset()); } else { TRACE_PHASE_INSTR( Js::Phase::BoundCheckHoistPhase, instr, _u("Hoisting array upper bound check out of loop %u to landing pad block %u, as (%d <= s%u + %d)\n"), hoistInfo.Loop()->GetLoopNumber(), landingPad->GetBlockNum(), hoistInfo.IndexConstantBounds().LowerBound(), headSegmentLengthSym->m_id, hoistInfo.Offset()); } TESTTRACE_PHASE_INSTR( Js::Phase::BoundCheckHoistPhase, instr, _u("Hoisting array upper bound check out of loop\n")); // Record the bound check instruction as available const IntBoundCheck boundCheckInfo( hoistInfo.IndexValue() ? hoistInfo.IndexValueNumber() : ZeroValueNumber, hoistInfo.HeadSegmentLengthValue()->GetValueNumber(), boundCheck, landingPad); { const bool added = CurrentBlockData()->availableIntBoundChecks->AddNew(boundCheckInfo) >= 0; Assert(added || failedToUpdateCompatibleUpperBoundCheck); } for(InvariantBlockBackwardIterator it(this, currentBlock, landingPad, nullptr); it.IsValid(); it.MoveNext()) { const bool added = it.Block()->globOptData.availableIntBoundChecks->AddNew(boundCheckInfo) >= 0; Assert(added || failedToUpdateCompatibleUpperBoundCheck); } } } // Update values of the syms involved in the bound check to reflect the bound check Assert(!hoistInfo.Loop() || hoistBlock != currentBlock); if(hoistBlock != currentBlock) { for(InvariantBlockBackwardIterator it(this, currentBlock->next, hoistBlock, nullptr); it.IsValid(); it.MoveNext()) { BasicBlock *const block = it.Block(); Value *leftValue; IntConstantBounds leftConstantBounds; if(hoistInfo.IndexSym()) { leftValue = block->globOptData.FindValue(hoistInfo.IndexSym()); if(!leftValue || leftValue->GetValueNumber() != hoistInfo.IndexValueNumber()) { continue; } AssertVerify(leftValue->GetValueInfo()->TryGetIntConstantBounds(&leftConstantBounds, true)); } else { leftValue = nullptr; leftConstantBounds = hoistInfo.IndexConstantBounds(); } Value *const rightValue = block->globOptData.FindValue(headSegmentLengthSym); if(!rightValue) { continue; } Assert(rightValue->GetValueNumber() == headSegmentLengthValue->GetValueNumber()); IntConstantBounds rightConstantBounds; AssertVerify(rightValue->GetValueInfo()->TryGetIntConstantBounds(&rightConstantBounds)); ValueInfo *const newValueInfoForLessThanOrEqual = UpdateIntBoundsForLessThanOrEqual( leftValue, leftConstantBounds, rightValue, rightConstantBounds, hoistInfo.Offset(), false); if (newValueInfoForLessThanOrEqual) { ChangeValueInfo(nullptr, leftValue, newValueInfoForLessThanOrEqual); AssertVerify(newValueInfoForLessThanOrEqual->TryGetIntConstantBounds(&leftConstantBounds, true)); if(block == currentBlock && leftValue == indexValue) { Assert(newValueInfoForLessThanOrEqual->IsInt()); indexConstantBounds = leftConstantBounds; } } if(hoistInfo.Offset() != INT32_MIN) { ValueInfo *const newValueInfoForGreaterThanOrEqual = UpdateIntBoundsForGreaterThanOrEqual( rightValue, rightConstantBounds, leftValue, leftConstantBounds, -hoistInfo.Offset(), false); if (newValueInfoForGreaterThanOrEqual) { ChangeValueInfo(nullptr, rightValue, newValueInfoForGreaterThanOrEqual); if(block == currentBlock) { Assert(rightValue == headSegmentLengthValue); AssertVerify(newValueInfoForGreaterThanOrEqual->TryGetIntConstantBounds(&headSegmentLengthConstantBounds)); } } } } } } else { IR::Opnd* lowerBound = baseOwnerIndir->GetIndexOpnd() ? static_cast<IR::Opnd *>(baseOwnerIndir->GetIndexOpnd()) : IR::IntConstOpnd::New(baseOwnerIndir->GetOffset(), TyInt32, instr->m_func); lowerBound->SetIsJITOptimizedReg(true); IR::Opnd* upperBound = IR::RegOpnd::New(headSegmentLengthSym, headSegmentLengthSym->GetType(), instr->m_func); upperBound->SetIsJITOptimizedReg(true); const int offset = GetBoundCheckOffsetForSimd(newBaseValueType, instr, -1); IR::Instr *boundCheck; // index <= headSegmentLength - 1 (src1 <= src2 + dst) if (shareableBailOutInfo) { ShareBailOut(); boundCheck = CreateBoundsCheckInstr( lowerBound, upperBound, offset, IR::BailOutOnArrayAccessHelperCall, shareableBailOutInfo, shareableBailOutInfo->bailOutFunc); } else { boundCheck = CreateBoundsCheckInstr( lowerBound, upperBound, offset, instr->m_func); } boundCheck->SetByteCodeOffset(instr); insertBeforeInstr->InsertBefore(boundCheck); if(!shareableBailOutInfo) { GenerateBailAtOperation(&boundCheck, IR::BailOutOnArrayAccessHelperCall); shareableBailOutInfo = boundCheck->GetBailOutInfo(); shareableBailOutInfoOriginalOwner = boundCheck; } instr->extractedUpperBoundCheckWithoutHoisting = true; if(baseOwnerIndir->GetIndexOpnd()) { TRACE_PHASE_INSTR( Js::Phase::BoundCheckEliminationPhase, instr, _u("Separating array upper bound check, as (s%u < s%u)\n"), indexVarSym->m_id, headSegmentLengthSym->m_id); } else { TRACE_PHASE_INSTR( Js::Phase::BoundCheckEliminationPhase, instr, _u("Separating array upper bound check, as (%d < s%u)\n"), baseOwnerIndir->GetOffset(), headSegmentLengthSym->m_id); } TESTTRACE_PHASE_INSTR( Js::Phase::BoundCheckEliminationPhase, instr, _u("Separating array upper bound check\n")); if(DoBoundCheckHoist()) { // Record the bound check instruction as available const bool added = CurrentBlockData()->availableIntBoundChecks->AddNew( IntBoundCheck( indexValue ? indexValue->GetValueNumber() : ZeroValueNumber, headSegmentLengthValue->GetValueNumber(), boundCheck, currentBlock)) >= 0; Assert(added || failedToUpdateCompatibleUpperBoundCheck); } } // Update the index and head segment length values to reflect the bound check ValueInfo *newValueInfo = UpdateIntBoundsForLessThan( indexValue, indexConstantBounds, headSegmentLengthValue, headSegmentLengthConstantBounds, false); if(newValueInfo) { ChangeValueInfo(nullptr, indexValue, newValueInfo); AssertVerify(newValueInfo->TryGetIntConstantBounds(&indexConstantBounds)); } newValueInfo = UpdateIntBoundsForGreaterThan( headSegmentLengthValue, headSegmentLengthConstantBounds, indexValue, indexConstantBounds, false); if(newValueInfo) { ChangeValueInfo(nullptr, headSegmentLengthValue, newValueInfo); } } } if(doHeadSegmentLoad && !isLikelyJsArray) { // For typed arrays, load the length first, followed by the bound checks, and then load the head segment. This // allows the length sym to become dead by the time of the head segment load, freeing up the register for use by the // head segment sym. InsertHeadSegmentLoad(); } if(doArrayChecks || doHeadSegmentLoad || doHeadSegmentLengthLoad || doLengthLoad) { UpdateValue(newHeadSegmentSym, newHeadSegmentLengthSym, newLengthSym); baseValueInfo = baseValue->GetValueInfo(); baseArrayValueInfo = baseValueInfo->IsArrayValueInfo() ? baseValueInfo->AsArrayValueInfo() : nullptr; // Iterate up to the root loop's landing pad until all necessary value info is updated uint hoistItemCount = static_cast<uint>(!!hoistChecksOutOfLoop) + !!hoistHeadSegmentLoadOutOfLoop + !!hoistHeadSegmentLengthLoadOutOfLoop + !!hoistLengthLoadOutOfLoop; if(hoistItemCount != 0) { Loop *rootLoop = nullptr; for(Loop *loop = currentBlock->loop; loop; loop = loop->parent) { rootLoop = loop; } Assert(rootLoop); ValueInfo *valueInfoToHoist = baseValueInfo; bool removeHeadSegment, removeHeadSegmentLength, removeLength; if(baseArrayValueInfo) { removeHeadSegment = baseArrayValueInfo->HeadSegmentSym() && !hoistHeadSegmentLoadOutOfLoop; removeHeadSegmentLength = baseArrayValueInfo->HeadSegmentLengthSym() && !hoistHeadSegmentLengthLoadOutOfLoop; removeLength = baseArrayValueInfo->LengthSym() && !hoistLengthLoadOutOfLoop; } else { removeLength = removeHeadSegmentLength = removeHeadSegment = false; } for(InvariantBlockBackwardIterator it( this, currentBlock, rootLoop->landingPad, baseOpnd->m_sym, baseValue->GetValueNumber()); it.IsValid(); it.MoveNext()) { if(removeHeadSegment || removeHeadSegmentLength || removeLength) { // Remove information that shouldn't be there anymore, from the value info valueInfoToHoist = valueInfoToHoist->AsArrayValueInfo()->Copy( alloc, !removeHeadSegment, !removeHeadSegmentLength, !removeLength); removeLength = removeHeadSegmentLength = removeHeadSegment = false; } BasicBlock *const block = it.Block(); Value *const blockBaseValue = it.InvariantSymValue(); HoistInvariantValueInfo(valueInfoToHoist, blockBaseValue, block); // See if we have completed hoisting value info for one of the items if(hoistChecksOutOfLoop && block == hoistChecksOutOfLoop->landingPad) { // All other items depend on array checks, so we can just stop here hoistChecksOutOfLoop = nullptr; break; } if(hoistHeadSegmentLoadOutOfLoop && block == hoistHeadSegmentLoadOutOfLoop->landingPad) { hoistHeadSegmentLoadOutOfLoop = nullptr; if(--hoistItemCount == 0) break; if(valueInfoToHoist->IsArrayValueInfo() && valueInfoToHoist->AsArrayValueInfo()->HeadSegmentSym()) removeHeadSegment = true; } if(hoistHeadSegmentLengthLoadOutOfLoop && block == hoistHeadSegmentLengthLoadOutOfLoop->landingPad) { hoistHeadSegmentLengthLoadOutOfLoop = nullptr; if(--hoistItemCount == 0) break; if(valueInfoToHoist->IsArrayValueInfo() && valueInfoToHoist->AsArrayValueInfo()->HeadSegmentLengthSym()) removeHeadSegmentLength = true; } if(hoistLengthLoadOutOfLoop && block == hoistLengthLoadOutOfLoop->landingPad) { hoistLengthLoadOutOfLoop = nullptr; if(--hoistItemCount == 0) break; if(valueInfoToHoist->IsArrayValueInfo() && valueInfoToHoist->AsArrayValueInfo()->LengthSym()) removeLength = true; } } } } } IR::ArrayRegOpnd *baseArrayOpnd; if(baseArrayValueInfo) { // Update the opnd to include the associated syms baseArrayOpnd = baseArrayValueInfo->CreateOpnd( baseOpnd, needsHeadSegment, needsHeadSegmentLength || (!isLikelyJsArray && needsLength), needsLength, eliminatedLowerBoundCheck, eliminatedUpperBoundCheck, instr->m_func); if(baseOwnerInstr) { Assert(baseOwnerInstr->GetSrc1() == baseOpnd); baseOwnerInstr->ReplaceSrc1(baseArrayOpnd); } else { Assert(baseOwnerIndir); Assert(baseOwnerIndir->GetBaseOpnd() == baseOpnd); baseOwnerIndir->ReplaceBaseOpnd(baseArrayOpnd); } baseOpnd = baseArrayOpnd; } else { baseArrayOpnd = nullptr; } if(isLikelyJsArray) { // Insert an instruction to indicate to the dead-store pass that implicit calls need to be kept disabled until this // instruction. Operations other than LdElem and StElem don't benefit much from arrays having no missing values, so // no need to ensure that the array still has no missing values. For a particular array, if none of the accesses // benefit much from the no-missing-values information, it may be beneficial to avoid checking for no missing // values, especially in the case for a single array access, where the cost of the check could be relatively // significant. An StElem has to do additional checks in the common path if the array may have missing values, and // a StElem that operates on an array that has no missing values is more likely to keep the no-missing-values info // on the array more precise, so it still benefits a little from the no-missing-values info. CaptureNoImplicitCallUses(baseOpnd, isLoad || isStore); } else if(baseArrayOpnd && baseArrayOpnd->HeadSegmentLengthSym()) { // A typed array's array buffer may be transferred to a web worker as part of an implicit call, in which case the typed // array's length is set to zero. Insert an instruction to indicate to the dead-store pass that implicit calls need to // be disabled until this instruction. IR::RegOpnd *const headSegmentLengthOpnd = IR::RegOpnd::New( baseArrayOpnd->HeadSegmentLengthSym(), baseArrayOpnd->HeadSegmentLengthSym()->GetType(), instr->m_func); const IR::AutoReuseOpnd autoReuseHeadSegmentLengthOpnd(headSegmentLengthOpnd, instr->m_func); CaptureNoImplicitCallUses(headSegmentLengthOpnd, false); } const auto OnEliminated = [&](const Js::Phase phase, const char *const eliminatedLoad) { TRACE_TESTTRACE_PHASE_INSTR(phase, instr, _u("Eliminating array %S\n"), eliminatedLoad); }; OnEliminated(Js::Phase::ArrayCheckHoistPhase, "checks"); if(baseArrayOpnd) { if(baseArrayOpnd->HeadSegmentSym()) { OnEliminated(Js::Phase::ArraySegmentHoistPhase, "head segment load"); } if(baseArrayOpnd->HeadSegmentLengthSym()) { OnEliminated(Js::Phase::ArraySegmentHoistPhase, "head segment length load"); } if(baseArrayOpnd->LengthSym()) { OnEliminated(Js::Phase::ArrayLengthHoistPhase, "length load"); } if(baseArrayOpnd->EliminatedLowerBoundCheck()) { OnEliminated(Js::Phase::BoundCheckEliminationPhase, "lower bound check"); } if(baseArrayOpnd->EliminatedUpperBoundCheck()) { OnEliminated(Js::Phase::BoundCheckEliminationPhase, "upper bound check"); } } if(!canBailOutOnArrayAccessHelperCall) { return; } // Bail out instead of generating a helper call. This helps to remove the array reference when the head segment and head // segment length are available, reduces code size, and allows bound checks to be separated. if(instr->HasBailOutInfo()) { const IR::BailOutKind bailOutKind = instr->GetBailOutKind(); Assert( !(bailOutKind & ~IR::BailOutKindBits) || (bailOutKind & ~IR::BailOutKindBits) == IR::BailOutOnImplicitCallsPreOp); instr->SetBailOutKind(bailOutKind & IR::BailOutKindBits | IR::BailOutOnArrayAccessHelperCall); } else { GenerateBailAtOperation(&instr, IR::BailOutOnArrayAccessHelperCall); } } void GlobOpt::CaptureNoImplicitCallUses( IR::Opnd *opnd, const bool usesNoMissingValuesInfo, IR::Instr *const includeCurrentInstr) { Assert(!IsLoopPrePass()); Assert(noImplicitCallUsesToInsert); Assert(opnd); // The opnd may be deleted later, so make a copy to ensure it is alive for inserting NoImplicitCallUses later opnd = opnd->Copy(func); if(!usesNoMissingValuesInfo) { const ValueType valueType(opnd->GetValueType()); if(valueType.IsArrayOrObjectWithArray() && valueType.HasNoMissingValues()) { // Inserting NoImplicitCallUses for an opnd with a definitely-array-with-no-missing-values value type means that the // instruction following it uses the information that the array has no missing values in some way, for instance, it // may omit missing value checks. Based on that, the dead-store phase in turn ensures that the necessary bailouts // are inserted to ensure that the array still has no missing values until the following instruction. Since // 'usesNoMissingValuesInfo' is false, change the value type to indicate to the dead-store phase that the following // instruction does not use the no-missing-values information. opnd->SetValueType(valueType.SetHasNoMissingValues(false)); } } if(includeCurrentInstr) { IR::Instr *const noImplicitCallUses = IR::PragmaInstr::New(Js::OpCode::NoImplicitCallUses, 0, includeCurrentInstr->m_func); noImplicitCallUses->SetSrc1(opnd); noImplicitCallUses->GetSrc1()->SetIsJITOptimizedReg(true); includeCurrentInstr->InsertAfter(noImplicitCallUses); return; } noImplicitCallUsesToInsert->Add(opnd); } void GlobOpt::InsertNoImplicitCallUses(IR::Instr *const instr) { Assert(noImplicitCallUsesToInsert); const int n = noImplicitCallUsesToInsert->Count(); if(n == 0) { return; } IR::Instr *const insertBeforeInstr = instr->GetInsertBeforeByteCodeUsesInstr(); for(int i = 0; i < n;) { IR::Instr *const noImplicitCallUses = IR::PragmaInstr::New(Js::OpCode::NoImplicitCallUses, 0, instr->m_func); noImplicitCallUses->SetSrc1(noImplicitCallUsesToInsert->Item(i)); noImplicitCallUses->GetSrc1()->SetIsJITOptimizedReg(true); ++i; if(i < n) { noImplicitCallUses->SetSrc2(noImplicitCallUsesToInsert->Item(i)); noImplicitCallUses->GetSrc2()->SetIsJITOptimizedReg(true); ++i; } noImplicitCallUses->SetByteCodeOffset(instr); insertBeforeInstr->InsertBefore(noImplicitCallUses); } noImplicitCallUsesToInsert->Clear(); } void GlobOpt::PrepareLoopArrayCheckHoist() { if(IsLoopPrePass() || !currentBlock->loop || !currentBlock->isLoopHeader || !currentBlock->loop->parent) { return; } if(currentBlock->loop->parent->needImplicitCallBailoutChecksForJsArrayCheckHoist) { // If the parent loop is an array check elimination candidate, so is the current loop. Even though the current loop may // not have array accesses, if the parent loop hoists array checks, the current loop also needs implicit call checks. currentBlock->loop->needImplicitCallBailoutChecksForJsArrayCheckHoist = true; } } JsArrayKills GlobOpt::CheckJsArrayKills(IR::Instr *const instr) { Assert(instr); JsArrayKills kills; if(instr->UsesAllFields()) { // Calls can (but are unlikely to) change a javascript array into an ES5 array, which may have different behavior for // index properties. kills.SetKillsAllArrays(); return kills; } const bool doArrayMissingValueCheckHoist = DoArrayMissingValueCheckHoist(); const bool doNativeArrayTypeSpec = DoNativeArrayTypeSpec(); const bool doArraySegmentHoist = DoArraySegmentHoist(ValueType::GetObject(ObjectType::Array)); Assert(doArraySegmentHoist == DoArraySegmentHoist(ValueType::GetObject(ObjectType::ObjectWithArray))); const bool doArrayLengthHoist = DoArrayLengthHoist(); if(!doArrayMissingValueCheckHoist && !doNativeArrayTypeSpec && !doArraySegmentHoist && !doArrayLengthHoist) { return kills; } // The following operations may create missing values in an array in an unlikely circumstance. Even though they don't kill // the fact that the 'this' parameter is an array (when implicit calls are disabled), we don't have a way to say the value // type is definitely array but it likely has no missing values. So, these will kill the definite value type as well, making // it likely array, such that the array checks will have to be redone. const bool useValueTypes = !IsLoopPrePass(); // Source value types are not guaranteed to be correct in a loop prepass switch(instr->m_opcode) { case Js::OpCode::StElemI_A: case Js::OpCode::StElemI_A_Strict: { Assert(instr->GetDst()); if(!instr->GetDst()->IsIndirOpnd()) { break; } const ValueType baseValueType = useValueTypes ? instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetValueType() : ValueType::Uninitialized; if(useValueTypes && baseValueType.IsNotArrayOrObjectWithArray()) { break; } if(instr->IsProfiledInstr()) { const Js::StElemInfo *const stElemInfo = instr->AsProfiledInstr()->u.stElemInfo; if(doArraySegmentHoist && stElemInfo->LikelyStoresOutsideHeadSegmentBounds()) { kills.SetKillsArrayHeadSegments(); kills.SetKillsArrayHeadSegmentLengths(); } if(doArrayLengthHoist && !(useValueTypes && baseValueType.IsNotArray()) && stElemInfo->LikelyStoresOutsideArrayBounds()) { kills.SetKillsArrayLengths(); } } break; } case Js::OpCode::DeleteElemI_A: case Js::OpCode::DeleteElemIStrict_A: Assert(instr->GetSrc1()); if(!instr->GetSrc1()->IsIndirOpnd() || (useValueTypes && instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->GetValueType().IsNotArrayOrObjectWithArray())) { break; } if(doArrayMissingValueCheckHoist) { kills.SetKillsArraysWithNoMissingValues(); } if(doArraySegmentHoist) { kills.SetKillsArrayHeadSegmentLengths(); } break; case Js::OpCode::StFld: case Js::OpCode::StFldStrict: { Assert(instr->GetDst()); if(!doArraySegmentHoist && !doArrayLengthHoist) { break; } IR::SymOpnd *const symDst = instr->GetDst()->AsSymOpnd(); if(!symDst->IsPropertySymOpnd()) { break; } IR::PropertySymOpnd *const dst = symDst->AsPropertySymOpnd(); if(dst->m_sym->AsPropertySym()->m_propertyId != Js::PropertyIds::length) { break; } if(useValueTypes && dst->GetPropertyOwnerValueType().IsNotArray()) { // Setting the 'length' property of an object that is not an array, even if it has an internal array, does // not kill the head segment or head segment length of any arrays. break; } if(doArraySegmentHoist) { kills.SetKillsArrayHeadSegmentLengths(); } if(doArrayLengthHoist) { kills.SetKillsArrayLengths(); } break; } case Js::OpCode::InlineArrayPush: { Assert(instr->GetSrc2()); IR::Opnd *const arrayOpnd = instr->GetSrc1(); Assert(arrayOpnd); const ValueType arrayValueType(arrayOpnd->GetValueType()); if(!arrayOpnd->IsRegOpnd() || (useValueTypes && arrayValueType.IsNotArrayOrObjectWithArray())) { break; } if(doArrayMissingValueCheckHoist) { kills.SetKillsArraysWithNoMissingValues(); } if(doArraySegmentHoist) { kills.SetKillsArrayHeadSegments(); kills.SetKillsArrayHeadSegmentLengths(); } if(doArrayLengthHoist && !(useValueTypes && arrayValueType.IsNotArray())) { kills.SetKillsArrayLengths(); } // Don't kill NativeArray, if there is no mismatch between array's type and element's type. if(doNativeArrayTypeSpec && !(useValueTypes && arrayValueType.IsNativeArray() && ((arrayValueType.IsLikelyNativeIntArray() && instr->GetSrc2()->IsInt32()) || (arrayValueType.IsLikelyNativeFloatArray() && instr->GetSrc2()->IsFloat())) ) && !(useValueTypes && arrayValueType.IsNotNativeArray())) { kills.SetKillsNativeArrays(); } break; } case Js::OpCode::InlineArrayPop: { IR::Opnd *const arrayOpnd = instr->GetSrc1(); Assert(arrayOpnd); const ValueType arrayValueType(arrayOpnd->GetValueType()); if(!arrayOpnd->IsRegOpnd() || (useValueTypes && arrayValueType.IsNotArrayOrObjectWithArray())) { break; } if(doArraySegmentHoist) { kills.SetKillsArrayHeadSegmentLengths(); } if(doArrayLengthHoist && !(useValueTypes && arrayValueType.IsNotArray())) { kills.SetKillsArrayLengths(); } break; } case Js::OpCode::CallDirect: { Assert(instr->GetSrc1()); // Find the 'this' parameter and check if it's possible for it to be an array IR::Opnd *const arrayOpnd = instr->FindCallArgumentOpnd(1); Assert(arrayOpnd); const ValueType arrayValueType(arrayOpnd->GetValueType()); if(!arrayOpnd->IsRegOpnd() || (useValueTypes && arrayValueType.IsNotArrayOrObjectWithArray())) { break; } const IR::JnHelperMethod helperMethod = instr->GetSrc1()->AsHelperCallOpnd()->m_fnHelper; if(doArrayMissingValueCheckHoist) { switch(helperMethod) { case IR::HelperArray_Reverse: case IR::HelperArray_Shift: case IR::HelperArray_Splice: case IR::HelperArray_Unshift: kills.SetKillsArraysWithNoMissingValues(); break; } } if(doArraySegmentHoist) { switch(helperMethod) { case IR::HelperArray_Reverse: case IR::HelperArray_Shift: case IR::HelperArray_Splice: case IR::HelperArray_Unshift: kills.SetKillsArrayHeadSegments(); kills.SetKillsArrayHeadSegmentLengths(); break; } } if(doArrayLengthHoist && !(useValueTypes && arrayValueType.IsNotArray())) { switch(helperMethod) { case IR::HelperArray_Shift: case IR::HelperArray_Splice: case IR::HelperArray_Unshift: kills.SetKillsArrayLengths(); break; } } if(doNativeArrayTypeSpec && !(useValueTypes && arrayValueType.IsNotNativeArray())) { switch(helperMethod) { case IR::HelperArray_Reverse: case IR::HelperArray_Shift: case IR::HelperArray_Slice: // Currently not inlined. //case IR::HelperArray_Sort: case IR::HelperArray_Splice: case IR::HelperArray_Unshift: kills.SetKillsNativeArrays(); break; } } break; } } return kills; } GlobOptBlockData const * GlobOpt::CurrentBlockData() const { return &this->currentBlock->globOptData; } GlobOptBlockData * GlobOpt::CurrentBlockData() { return &this->currentBlock->globOptData; } void GlobOpt::CommitCapturedValuesCandidate() { GlobOptBlockData * globOptData = CurrentBlockData(); globOptData->changedSyms->ClearAll(); if (!this->changedSymsAfterIncBailoutCandidate->IsEmpty()) { // // some symbols are changed after the values for current bailout have been // captured (GlobOpt::CapturedValues), need to restore such symbols as changed // for following incremental bailout construction, or we will miss capturing // values for later bailout // // swap changedSyms and changedSymsAfterIncBailoutCandidate // because both are from this->alloc BVSparse<JitArenaAllocator> * tempBvSwap = globOptData->changedSyms; globOptData->changedSyms = this->changedSymsAfterIncBailoutCandidate; this->changedSymsAfterIncBailoutCandidate = tempBvSwap; } if (globOptData->capturedValues) { globOptData->capturedValues->DecrementRefCount(); } globOptData->capturedValues = globOptData->capturedValuesCandidate; // null out capturedValuesCandidate to stop tracking symbols change for it globOptData->capturedValuesCandidate = nullptr; } bool GlobOpt::IsOperationThatLikelyKillsJsArraysWithNoMissingValues(IR::Instr *const instr) { // StElem is profiled with information indicating whether it will likely create a missing value in the array. In that case, // we prefer to kill the no-missing-values information in the value so that we don't bail out in a likely circumstance. return (instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict) && DoArrayMissingValueCheckHoist() && instr->IsProfiledInstr() && instr->AsProfiledInstr()->u.stElemInfo->LikelyCreatesMissingValue(); } bool GlobOpt::NeedBailOnImplicitCallForArrayCheckHoist(BasicBlock const * const block, const bool isForwardPass) const { Assert(block); return isForwardPass && block->loop && block->loop->needImplicitCallBailoutChecksForJsArrayCheckHoist; } bool GlobOpt::PrepareForIgnoringIntOverflow(IR::Instr *const instr) { Assert(instr); const bool isBoundary = instr->m_opcode == Js::OpCode::NoIntOverflowBoundary; // Update the instruction's "int overflow matters" flag based on whether we are currently allowing ignoring int overflows. // Some operations convert their srcs to int32s, those can still ignore int overflow. if(instr->ignoreIntOverflowInRange) { instr->ignoreIntOverflowInRange = !intOverflowCurrentlyMattersInRange || OpCodeAttr::IsInt32(instr->m_opcode); } if(!intOverflowDoesNotMatterRange) { Assert(intOverflowCurrentlyMattersInRange); // There are no more ranges of instructions where int overflow does not matter, in this block. return isBoundary; } if(instr == intOverflowDoesNotMatterRange->LastInstr()) { Assert(isBoundary); // Reached the last instruction in the range intOverflowCurrentlyMattersInRange = true; intOverflowDoesNotMatterRange = intOverflowDoesNotMatterRange->Next(); return isBoundary; } if(!intOverflowCurrentlyMattersInRange) { return isBoundary; } if(instr != intOverflowDoesNotMatterRange->FirstInstr()) { // Have not reached the next range return isBoundary; } Assert(isBoundary); // This is the first instruction in a range of instructions where int overflow does not matter. There can be many inputs to // instructions in the range, some of which are inputs to the range itself (that is, the values are not defined in the // range). Ignoring int overflow is only valid for int operations, so we need to ensure that all inputs to the range are // int (not "likely int") before ignoring any overflows in the range. Ensuring that a sym with a "likely int" value is an // int requires a bail-out. These bail-out check need to happen before any overflows are ignored, otherwise it's too late. // The backward pass tracked all inputs into the range. Iterate over them and verify the values, and insert lossless // conversions to int as necessary, before the first instruction in the range. If for any reason all values cannot be // guaranteed to be ints, the optimization will be disabled for this range. intOverflowCurrentlyMattersInRange = false; { BVSparse<JitArenaAllocator> tempBv1(tempAlloc); BVSparse<JitArenaAllocator> tempBv2(tempAlloc); { // Just renaming the temp BVs for this section to indicate how they're used so that it makes sense BVSparse<JitArenaAllocator> &symsToExclude = tempBv1; BVSparse<JitArenaAllocator> &symsToInclude = tempBv2; #if DBG_DUMP SymID couldNotConvertSymId = 0; #endif FOREACH_BITSET_IN_SPARSEBV(id, intOverflowDoesNotMatterRange->SymsRequiredToBeInt()) { Sym *const sym = func->m_symTable->Find(id); Assert(sym); // Some instructions with property syms are also tracked by the backward pass, and may be included in the range // (LdSlot for instance). These property syms don't get their values until either copy-prop resolves a value for // them, or a new value is created once the use of the property sym is reached. In either case, we're not that // far yet, so we need to find the future value of the property sym by evaluating copy-prop in reverse. Value *const value = sym->IsStackSym() ? CurrentBlockData()->FindValue(sym) : CurrentBlockData()->FindFuturePropertyValue(sym->AsPropertySym()); if(!value) { #if DBG_DUMP couldNotConvertSymId = id; #endif intOverflowCurrentlyMattersInRange = true; BREAK_BITSET_IN_SPARSEBV; } const bool isInt32OrUInt32Float = value->GetValueInfo()->IsFloatConstant() && Js::JavascriptNumber::IsInt32OrUInt32(value->GetValueInfo()->AsFloatConstant()->FloatValue()); if(value->GetValueInfo()->IsInt() || isInt32OrUInt32Float) { if(!IsLoopPrePass()) { // Input values that are already int can be excluded from int-specialization. We can treat unsigned // int32 values as int32 values (ignoring the overflow), since the values will only be used inside the // range where overflow does not matter. symsToExclude.Set(sym->m_id); } continue; } if(!DoAggressiveIntTypeSpec() || !value->GetValueInfo()->IsLikelyInt()) { // When aggressive int specialization is off, syms with "likely int" values cannot be forced to int since // int bail-out checks are not allowed in that mode. Similarly, with aggressive int specialization on, it // wouldn't make sense to force non-"likely int" values to int since it would almost guarantee a bail-out at // runtime. In both cases, just disable ignoring overflow for this range. #if DBG_DUMP couldNotConvertSymId = id; #endif intOverflowCurrentlyMattersInRange = true; BREAK_BITSET_IN_SPARSEBV; } if(IsLoopPrePass()) { // The loop prepass does not modify bit-vectors. Since it doesn't add bail-out checks, it also does not need // to specialize anything up-front. It only needs to be consistent in how it determines whether to allow // ignoring overflow for a range, based on the values of inputs into the range. continue; } // Since input syms are tracked in the backward pass, where there is no value tracking, it will not be aware of // copy-prop. If a copy-prop sym is available, it will be used instead, so exclude the original sym and include // the copy-prop sym for specialization. StackSym *const copyPropSym = CurrentBlockData()->GetCopyPropSym(sym, value); if(copyPropSym) { symsToExclude.Set(sym->m_id); Assert(!symsToExclude.Test(copyPropSym->m_id)); const bool needsToBeLossless = !intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()->Test(sym->m_id); if(intOverflowDoesNotMatterRange->SymsRequiredToBeInt()->Test(copyPropSym->m_id) || symsToInclude.TestAndSet(copyPropSym->m_id)) { // The copy-prop sym is already included if(needsToBeLossless) { // The original sym needs to be lossless, so make the copy-prop sym lossless as well. intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()->Clear(copyPropSym->m_id); } } else if(!needsToBeLossless) { // The copy-prop sym was not included before, and the original sym can be lossy, so make it lossy. intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()->Set(copyPropSym->m_id); } } else if(!sym->IsStackSym()) { // Only stack syms can be converted to int, and copy-prop syms are stack syms. If a copy-prop sym was not // found for the property sym, we can't ignore overflows in this range. #if DBG_DUMP couldNotConvertSymId = id; #endif intOverflowCurrentlyMattersInRange = true; BREAK_BITSET_IN_SPARSEBV; } } NEXT_BITSET_IN_SPARSEBV; if(intOverflowCurrentlyMattersInRange) { #if DBG_DUMP if(PHASE_TRACE(Js::TrackCompoundedIntOverflowPhase, func) && !IsLoopPrePass()) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; Output::Print( _u("TrackCompoundedIntOverflow - Top function: %s (%s), Phase: %s, Block: %u, Disabled ignoring overflows\n"), func->GetJITFunctionBody()->GetDisplayName(), func->GetDebugNumberSet(debugStringBuffer), Js::PhaseNames[Js::ForwardPhase], currentBlock->GetBlockNum()); Output::Print(_u(" Input sym could not be turned into an int: %u\n"), couldNotConvertSymId); Output::Print(_u(" First instr: ")); instr->m_next->Dump(); Output::Flush(); } #endif intOverflowDoesNotMatterRange = intOverflowDoesNotMatterRange->Next(); return isBoundary; } if(IsLoopPrePass()) { return isBoundary; } // Update the syms to specialize after enumeration intOverflowDoesNotMatterRange->SymsRequiredToBeInt()->Minus(&symsToExclude); intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()->Minus(&symsToExclude); intOverflowDoesNotMatterRange->SymsRequiredToBeInt()->Or(&symsToInclude); } { // Exclude syms that are already live as lossless int32, and exclude lossy conversions of syms that are already live // as lossy int32. // symsToExclude = liveInt32Syms - liveLossyInt32Syms // syms live as lossless int // lossySymsToExclude = symsRequiredToBeLossyInt & liveLossyInt32Syms; // syms we want as lossy int that are already live as lossy int // symsToExclude |= lossySymsToExclude // symsRequiredToBeInt -= symsToExclude // symsRequiredToBeLossyInt -= symsToExclude BVSparse<JitArenaAllocator> &symsToExclude = tempBv1; BVSparse<JitArenaAllocator> &lossySymsToExclude = tempBv2; symsToExclude.Minus(CurrentBlockData()->liveInt32Syms, CurrentBlockData()->liveLossyInt32Syms); lossySymsToExclude.And( intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt(), CurrentBlockData()->liveLossyInt32Syms); symsToExclude.Or(&lossySymsToExclude); intOverflowDoesNotMatterRange->SymsRequiredToBeInt()->Minus(&symsToExclude); intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()->Minus(&symsToExclude); } #if DBG { // Verify that the syms to be converted are live // liveSyms = liveInt32Syms | liveFloat64Syms | liveVarSyms // deadSymsRequiredToBeInt = symsRequiredToBeInt - liveSyms BVSparse<JitArenaAllocator> &liveSyms = tempBv1; BVSparse<JitArenaAllocator> &deadSymsRequiredToBeInt = tempBv2; liveSyms.Or(CurrentBlockData()->liveInt32Syms, CurrentBlockData()->liveFloat64Syms); liveSyms.Or(CurrentBlockData()->liveVarSyms); deadSymsRequiredToBeInt.Minus(intOverflowDoesNotMatterRange->SymsRequiredToBeInt(), &liveSyms); Assert(deadSymsRequiredToBeInt.IsEmpty()); } #endif } // Int-specialize the syms before the first instruction of the range (the current instruction) intOverflowDoesNotMatterRange->SymsRequiredToBeInt()->Minus(intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()); #if DBG_DUMP if(PHASE_TRACE(Js::TrackCompoundedIntOverflowPhase, func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; Output::Print( _u("TrackCompoundedIntOverflow - Top function: %s (%s), Phase: %s, Block: %u\n"), func->GetJITFunctionBody()->GetDisplayName(), func->GetDebugNumberSet(debugStringBuffer), Js::PhaseNames[Js::ForwardPhase], currentBlock->GetBlockNum()); Output::Print(_u(" Input syms to be int-specialized (lossless): ")); intOverflowDoesNotMatterRange->SymsRequiredToBeInt()->Dump(); Output::Print(_u(" Input syms to be converted to int (lossy): ")); intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt()->Dump(); Output::Print(_u(" First instr: ")); instr->m_next->Dump(); Output::Flush(); } #endif ToInt32(intOverflowDoesNotMatterRange->SymsRequiredToBeInt(), currentBlock, false /* lossy */, instr); ToInt32(intOverflowDoesNotMatterRange->SymsRequiredToBeLossyInt(), currentBlock, true /* lossy */, instr); return isBoundary; } void GlobOpt::VerifyIntSpecForIgnoringIntOverflow(IR::Instr *const instr) { if(intOverflowCurrentlyMattersInRange || IsLoopPrePass()) { return; } Assert(instr->m_opcode != Js::OpCode::Mul_I4 || (instr->m_opcode == Js::OpCode::Mul_I4 && !instr->ShouldCheckFor32BitOverflow() && instr->ShouldCheckForNon32BitOverflow() )); // Instructions that are marked as "overflow doesn't matter" in the range must guarantee that they operate on int values and // result in int values, for ignoring overflow to be valid. So, int-specialization is required for such instructions in the // range. Ld_A is an exception because it only specializes if the src sym is available as a required specialized sym, and it // doesn't generate bailouts or cause ignoring int overflow to be invalid. // MULs are allowed to start a region and have BailOutInfo since they will bailout on non-32 bit overflow. if(instr->m_opcode == Js::OpCode::Ld_A || ((!instr->HasBailOutInfo() || instr->m_opcode == Js::OpCode::Mul_I4) && (!instr->GetDst() || instr->GetDst()->IsInt32()) && (!instr->GetSrc1() || instr->GetSrc1()->IsInt32()) && (!instr->GetSrc2() || instr->GetSrc2()->IsInt32()))) { return; } if (!instr->HasBailOutInfo() && !instr->HasAnySideEffects()) { return; } // This can happen for Neg_A if it needs to bail out on negative zero, and perhaps other cases as well. It's too late to fix // the problem (overflows may already be ignored), so handle it by bailing out at compile-time and disabling tracking int // overflow. Assert(!func->IsTrackCompoundedIntOverflowDisabled()); if(PHASE_TRACE(Js::BailOutPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; Output::Print( _u("BailOut (compile-time): function: %s (%s) instr: "), func->GetJITFunctionBody()->GetDisplayName(), func->GetDebugNumberSet(debugStringBuffer)); #if DBG_DUMP instr->Dump(); #else Output::Print(_u("%s "), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); #endif Output::Print(_u("(overflow does not matter but could not int-spec or needed bailout)\n")); Output::Flush(); } if(func->IsTrackCompoundedIntOverflowDisabled()) { // Tracking int overflows is already off for some reason. Prevent trying to rejit again because it won't help and the // same thing will happen again and cause an infinite loop. Just abort jitting this function. if(PHASE_TRACE(Js::BailOutPhase, this->func)) { Output::Print(_u(" Aborting JIT because TrackIntOverflow is already off\n")); Output::Flush(); } throw Js::OperationAbortedException(); } throw Js::RejitException(RejitReason::TrackIntOverflowDisabled); } // It makes lowering easier if it can assume that the first src is never a constant, // at least for commutative operators. For non-commutative, just hoist the constant. void GlobOpt::PreLowerCanonicalize(IR::Instr *instr, Value **pSrc1Val, Value **pSrc2Val) { IR::Opnd *dst = instr->GetDst(); IR::Opnd *src1 = instr->GetSrc1(); IR::Opnd *src2 = instr->GetSrc2(); if (src1->IsImmediateOpnd()) { // Swap for dst, src } else if (src2 && dst && src2->IsRegOpnd()) { if (src2->GetIsDead() && !src1->GetIsDead() && !src1->IsEqual(dst)) { // Swap if src2 is dead, as the reg can be reuse for the dst for opEqs like on x86 (ADD r1, r2) } else if (src2->IsEqual(dst)) { // Helps lowering of opEqs } else { return; } // Make sure we don't swap 2 srcs with valueOf calls. if (OpCodeAttr::OpndHasImplicitCall(instr->m_opcode)) { if (instr->IsBranchInstr()) { if (!src1->GetValueType().IsPrimitive() || !src2->GetValueType().IsPrimitive()) { return; } } else if (!src1->GetValueType().IsPrimitive() && !src2->GetValueType().IsPrimitive()) { return; } } } else { return; } Js::OpCode opcode = instr->m_opcode; switch (opcode) { case Js::OpCode::And_A: case Js::OpCode::Mul_A: case Js::OpCode::Or_A: case Js::OpCode::Xor_A: case Js::OpCode::And_I4: case Js::OpCode::Mul_I4: case Js::OpCode::Or_I4: case Js::OpCode::Xor_I4: case Js::OpCode::Add_I4: swap_srcs: if (!instr->GetSrc2()->IsImmediateOpnd()) { instr->m_opcode = opcode; instr->SwapOpnds(); Value *tempVal = *pSrc1Val; *pSrc1Val = *pSrc2Val; *pSrc2Val = tempVal; return; } break; case Js::OpCode::BrSrEq_A: case Js::OpCode::BrSrNotNeq_A: case Js::OpCode::BrEq_I4: goto swap_srcs; case Js::OpCode::BrSrNeq_A: case Js::OpCode::BrNeq_A: case Js::OpCode::BrSrNotEq_A: case Js::OpCode::BrNotEq_A: case Js::OpCode::BrNeq_I4: goto swap_srcs; case Js::OpCode::BrGe_A: opcode = Js::OpCode::BrLe_A; goto swap_srcs; case Js::OpCode::BrNotGe_A: opcode = Js::OpCode::BrNotLe_A; goto swap_srcs; case Js::OpCode::BrGe_I4: opcode = Js::OpCode::BrLe_I4; goto swap_srcs; case Js::OpCode::BrGt_A: opcode = Js::OpCode::BrLt_A; goto swap_srcs; case Js::OpCode::BrNotGt_A: opcode = Js::OpCode::BrNotLt_A; goto swap_srcs; case Js::OpCode::BrGt_I4: opcode = Js::OpCode::BrLt_I4; goto swap_srcs; case Js::OpCode::BrLe_A: opcode = Js::OpCode::BrGe_A; goto swap_srcs; case Js::OpCode::BrNotLe_A: opcode = Js::OpCode::BrNotGe_A; goto swap_srcs; case Js::OpCode::BrLe_I4: opcode = Js::OpCode::BrGe_I4; goto swap_srcs; case Js::OpCode::BrLt_A: opcode = Js::OpCode::BrGt_A; goto swap_srcs; case Js::OpCode::BrNotLt_A: opcode = Js::OpCode::BrNotGt_A; goto swap_srcs; case Js::OpCode::BrLt_I4: opcode = Js::OpCode::BrGt_I4; goto swap_srcs; case Js::OpCode::BrEq_A: case Js::OpCode::BrNotNeq_A: case Js::OpCode::CmEq_A: case Js::OpCode::CmNeq_A: // this == "" not the same as "" == this... if (!src1->IsImmediateOpnd() && (!src1->GetValueType().IsPrimitive() || !src2->GetValueType().IsPrimitive())) { return; } goto swap_srcs; case Js::OpCode::CmGe_A: if (!src1->IsImmediateOpnd() && (!src1->GetValueType().IsPrimitive() || !src2->GetValueType().IsPrimitive())) { return; } opcode = Js::OpCode::CmLe_A; goto swap_srcs; case Js::OpCode::CmGt_A: if (!src1->IsImmediateOpnd() && (!src1->GetValueType().IsPrimitive() || !src2->GetValueType().IsPrimitive())) { return; } opcode = Js::OpCode::CmLt_A; goto swap_srcs; case Js::OpCode::CmLe_A: if (!src1->IsImmediateOpnd() && (!src1->GetValueType().IsPrimitive() || !src2->GetValueType().IsPrimitive())) { return; } opcode = Js::OpCode::CmGe_A; goto swap_srcs; case Js::OpCode::CmLt_A: if (!src1->IsImmediateOpnd() && (!src1->GetValueType().IsPrimitive() || !src2->GetValueType().IsPrimitive())) { return; } opcode = Js::OpCode::CmGt_A; goto swap_srcs; case Js::OpCode::CallI: case Js::OpCode::CallIFixed: case Js::OpCode::NewScObject: case Js::OpCode::NewScObjectSpread: case Js::OpCode::NewScObjArray: case Js::OpCode::NewScObjArraySpread: case Js::OpCode::NewScObjectNoCtor: // Don't insert load to register if the function operand is a fixed function. if (instr->HasFixedFunctionAddressTarget()) { return; } break; // Can't do add because <32 + "Hello"> isn't equal to <"Hello" + 32> // Lower can do the swap. Other op-codes listed below don't need immediate source hoisting, as the fast paths handle it, // or the lowering handles the hoisting. case Js::OpCode::Add_A: if (src1->IsFloat()) { goto swap_srcs; } return; case Js::OpCode::Sub_I4: case Js::OpCode::Neg_I4: case Js::OpCode::Not_I4: case Js::OpCode::NewScFunc: case Js::OpCode::NewScGenFunc: case Js::OpCode::NewScArray: case Js::OpCode::NewScIntArray: case Js::OpCode::NewScFltArray: case Js::OpCode::NewScArrayWithMissingValues: case Js::OpCode::NewRegEx: case Js::OpCode::Ld_A: case Js::OpCode::Ld_I4: case Js::OpCode::ThrowRuntimeError: case Js::OpCode::TrapIfMinIntOverNegOne: case Js::OpCode::TrapIfTruncOverflow: case Js::OpCode::TrapIfZero: case Js::OpCode::FromVar: case Js::OpCode::Conv_Prim: case Js::OpCode::LdC_A_I4: case Js::OpCode::LdStr: case Js::OpCode::InitFld: case Js::OpCode::InitRootFld: case Js::OpCode::StartCall: case Js::OpCode::ArgOut_A: case Js::OpCode::ArgOut_A_Inline: case Js::OpCode::ArgOut_A_Dynamic: case Js::OpCode::ArgOut_A_FromStackArgs: case Js::OpCode::ArgOut_A_InlineBuiltIn: case Js::OpCode::ArgOut_A_InlineSpecialized: case Js::OpCode::ArgOut_A_SpreadArg: case Js::OpCode::InlineeEnd: case Js::OpCode::EndCallForPolymorphicInlinee: case Js::OpCode::InlineeMetaArg: case Js::OpCode::InlineBuiltInEnd: case Js::OpCode::InlineNonTrackingBuiltInEnd: case Js::OpCode::CallHelper: case Js::OpCode::LdElemUndef: case Js::OpCode::LdElemUndefScoped: case Js::OpCode::RuntimeTypeError: case Js::OpCode::RuntimeReferenceError: case Js::OpCode::Ret: case Js::OpCode::NewScObjectSimple: case Js::OpCode::NewScObjectLiteral: case Js::OpCode::StFld: case Js::OpCode::StRootFld: case Js::OpCode::StSlot: case Js::OpCode::StSlotChkUndecl: case Js::OpCode::StElemC: case Js::OpCode::StArrSegElemC: case Js::OpCode::StElemI_A: case Js::OpCode::StElemI_A_Strict: case Js::OpCode::CallDirect: case Js::OpCode::BrNotHasSideEffects: case Js::OpCode::NewConcatStrMulti: case Js::OpCode::NewConcatStrMultiBE: case Js::OpCode::ExtendArg_A: #ifdef ENABLE_DOM_FAST_PATH case Js::OpCode::DOMFastPathGetter: case Js::OpCode::DOMFastPathSetter: #endif case Js::OpCode::NewScopeSlots: case Js::OpCode::NewScopeSlotsWithoutPropIds: case Js::OpCode::NewStackScopeSlots: case Js::OpCode::IsInst: case Js::OpCode::BailOnEqual: case Js::OpCode::BailOnNotEqual: case Js::OpCode::StArrViewElem: return; } if (!src1->IsImmediateOpnd()) { return; } // The fast paths or lowering of the remaining instructions may not support handling immediate opnds for the first src. The // immediate src1 is hoisted here into a separate instruction. if (src1->IsIntConstOpnd()) { IR::Instr *newInstr = instr->HoistSrc1(Js::OpCode::Ld_I4); ToInt32Dst(newInstr, newInstr->GetDst()->AsRegOpnd(), this->currentBlock); } else if (src1->IsInt64ConstOpnd()) { instr->HoistSrc1(Js::OpCode::Ld_I4); } else { instr->HoistSrc1(Js::OpCode::Ld_A); } src1 = instr->GetSrc1(); src1->AsRegOpnd()->m_sym->SetIsConst(); } // Clear the ValueMap pf the values invalidated by this instr. void GlobOpt::ProcessKills(IR::Instr *instr) { this->ProcessFieldKills(instr); this->ProcessValueKills(instr); this->ProcessArrayValueKills(instr); } bool GlobOpt::OptIsInvariant(IR::Opnd *src, BasicBlock *block, Loop *loop, Value *srcVal, bool isNotTypeSpecConv, bool allowNonPrimitives) { if(!loop->CanHoistInvariants()) { return false; } Sym *sym; switch(src->GetKind()) { case IR::OpndKindAddr: case IR::OpndKindFloatConst: case IR::OpndKindIntConst: return true; case IR::OpndKindReg: sym = src->AsRegOpnd()->m_sym; break; case IR::OpndKindSym: sym = src->AsSymOpnd()->m_sym; if (src->AsSymOpnd()->IsPropertySymOpnd()) { if (src->AsSymOpnd()->AsPropertySymOpnd()->IsTypeChecked()) { // We do not handle hoisting these yet. We might be hoisting this across the instr with the type check protecting this one. // And somehow, the dead-store pass now removes the type check on that instr later on... // For CheckFixedFld, there is no benefit hoisting these if they don't have a type check as they won't generate code. return false; } } break; case IR::OpndKindHelperCall: // Helper calls, like the private slot getter, can be invariant. // Consider moving more math builtin to invariant? return HelperMethodAttributes::IsInVariant(src->AsHelperCallOpnd()->m_fnHelper); default: return false; } return OptIsInvariant(sym, block, loop, srcVal, isNotTypeSpecConv, allowNonPrimitives); } bool GlobOpt::OptIsInvariant(Sym *sym, BasicBlock *block, Loop *loop, Value *srcVal, bool isNotTypeSpecConv, bool allowNonPrimitives, Value **loopHeadValRef) { Value *localLoopHeadVal; if(!loopHeadValRef) { loopHeadValRef = &localLoopHeadVal; } Value *&loopHeadVal = *loopHeadValRef; loopHeadVal = nullptr; if(!loop->CanHoistInvariants()) { return false; } if (sym->IsStackSym()) { if (sym->AsStackSym()->IsTypeSpec()) { StackSym *varSym = sym->AsStackSym()->GetVarEquivSym(this->func); // Make sure the int32/float64 version of this is available. // Note: We could handle this by converting the src, but usually the // conversion is hoistable if this is hoistable anyway. // In some weird cases it may not be however, so we'll bail out. if (sym->AsStackSym()->IsInt32()) { Assert(block->globOptData.liveInt32Syms->Test(varSym->m_id)); if (!loop->landingPad->globOptData.liveInt32Syms->Test(varSym->m_id) || (loop->landingPad->globOptData.liveLossyInt32Syms->Test(varSym->m_id) && !block->globOptData.liveLossyInt32Syms->Test(varSym->m_id))) { // Either the int32 sym is not live in the landing pad, or it's lossy in the landing pad and the // instruction's block is using the lossless version. In either case, the instruction cannot be hoisted // without doing a conversion of this operand. return false; } } else if (sym->AsStackSym()->IsFloat64()) { if (!loop->landingPad->globOptData.liveFloat64Syms->Test(varSym->m_id)) { return false; } } #ifdef ENABLE_SIMDJS else { Assert(sym->AsStackSym()->IsSimd128()); if (!loop->landingPad->globOptData.liveSimd128F4Syms->Test(varSym->m_id) && !loop->landingPad->globOptData.liveSimd128I4Syms->Test(varSym->m_id)) { return false; } } #endif sym = sym->AsStackSym()->GetVarEquivSym(this->func); } else { // Make sure the var version of this is available. // Note: We could handle this by converting the src, but usually the // conversion is hoistable if this is hoistable anyway. // In some weird cases it may not be however, so we'll bail out. if (!loop->landingPad->globOptData.liveVarSyms->Test(sym->m_id)) { return false; } } } else if (sym->IsPropertySym()) { if (!loop->landingPad->globOptData.liveFields->Test(sym->m_id)) { return false; } } else { return false; } // We rely on having a value. if (srcVal == NULL) { return false; } // A symbol is invariant if its current value is the same as it was upon entering the loop. loopHeadVal = loop->landingPad->globOptData.FindValue(sym); if (loopHeadVal == NULL || loopHeadVal->GetValueNumber() != srcVal->GetValueNumber()) { return false; } // Can't hoist non-primitives, unless we have safeguards against valueof/tostring. Additionally, we need to consider // the value annotations on the source *before* the loop: if we hoist this instruction outside the loop, we can't // necessarily rely on type annotations added (and enforced) earlier in the loop's body. // // It might look as though !loopHeadVal->GetValueInfo()->IsPrimitive() implies // !loop->landingPad->globOptData.IsTypeSpecialized(sym), but it turns out that this is not always the case. We // encountered a test case in which we had previously hoisted a FromVar (to float 64) instruction, but its bailout code was // BailoutPrimitiveButString, rather than BailoutNumberOnly, which would have allowed us to conclude that the dest was // definitely a float64. Instead, it was only *likely* a float64, causing IsPrimitive to return false. if (!allowNonPrimitives && !loopHeadVal->GetValueInfo()->IsPrimitive() && !loop->landingPad->globOptData.IsTypeSpecialized(sym)) { return false; } if(!isNotTypeSpecConv && loop->symsDefInLoop->Test(sym->m_id)) { // Typically, a sym is considered invariant if it has the same value in the current block and in the loop landing pad. // The sym may have had a different value earlier in the loop or on the back-edge, but as long as it's reassigned to its // value outside the loop, it would be considered invariant in this block. Consider that case: // s1 = s2[invariant] // <loop start> // s1 = s2[invariant] // // s1 now has the same value as in the landing pad, and is considered invariant // s1 += s3 // // s1 is not invariant here, or on the back-edge // ++s3 // s3 is not invariant, so the add above cannot be hoisted // <loop end> // // A problem occurs at the point of (s1 += s3) when: // - At (s1 = s2) inside the loop, s1 was made to be the sym store of that value. This by itself is legal, because // after that transfer, s1 and s2 have the same value. // - (s1 += s3) is type-specialized but s1 is not specialized in the loop header. This happens when s1 is not // specialized entering the loop, and since s1 is not used before it's defined in the loop, it's not specialized // on back-edges. // // With that, at (s1 += s3), the conversion of s1 to the type-specialized version would be hoisted because s1 is // invariant just before that instruction. Since this add is specialized, the specialized version of the sym is modified // in the loop without a reassignment at (s1 = s2) inside the loop, and (s1 += s3) would then use an incorrect value of // s1 (it would use the value of s1 from the previous loop iteration, instead of using the value of s2). // // The problem here, is that we cannot hoist the conversion of s1 into its specialized version across the assignment // (s1 = s2) inside the loop. So for the purposes of type specialization, don't consider a sym invariant if it has a def // inside the loop. return false; } // For values with an int range, require additionally that the range is the same as in the landing pad, as the range may // have been changed on this path based on branches, and int specialization and invariant hoisting may rely on the range // being the same. For type spec conversions, only require that if the value is an int constant in the current block, that // it is also an int constant with the same value in the landing pad. Other range differences don't matter for type spec. IntConstantBounds srcIntConstantBounds, loopHeadIntConstantBounds; if(srcVal->GetValueInfo()->TryGetIntConstantBounds(&srcIntConstantBounds) && (isNotTypeSpecConv || srcIntConstantBounds.IsConstant()) && ( !loopHeadVal->GetValueInfo()->TryGetIntConstantBounds(&loopHeadIntConstantBounds) || loopHeadIntConstantBounds.LowerBound() != srcIntConstantBounds.LowerBound() || loopHeadIntConstantBounds.UpperBound() != srcIntConstantBounds.UpperBound() )) { return false; } // If the loopHeadVal is primitive, the current value should be as well. This really should be // srcVal->GetValueInfo()->IsPrimitive() instead of IsLikelyPrimitive, but this stronger assertion // doesn't hold in some cases when this method is called out of the array code. Assert((!loopHeadVal->GetValueInfo()->IsPrimitive()) || srcVal->GetValueInfo()->IsLikelyPrimitive()); return true; } bool GlobOpt::OptIsInvariant( IR::Instr *instr, BasicBlock *block, Loop *loop, Value *src1Val, Value *src2Val, bool isNotTypeSpecConv, const bool forceInvariantHoisting) { if (!loop->CanHoistInvariants()) { return false; } if (!OpCodeAttr::CanCSE(instr->m_opcode)) { return false; } bool allowNonPrimitives = !OpCodeAttr::OpndHasImplicitCall(instr->m_opcode); switch(instr->m_opcode) { // Can't legally hoist these case Js::OpCode::LdLen_A: return false; //Can't Hoist BailOnNotStackArgs, as it is necessary as InlineArgsOptimization relies on this opcode //to decide whether to throw rejit exception or not. case Js::OpCode::BailOnNotStackArgs: return false; // Usually not worth hoisting these case Js::OpCode::LdStr: case Js::OpCode::Ld_A: case Js::OpCode::Ld_I4: case Js::OpCode::LdC_A_I4: if(!forceInvariantHoisting) { return false; } break; // Can't hoist these outside the function it's for. The LdArgumentsFromFrame for an inlinee depends on the inlinee meta arg // that holds the arguments object, which is only initialized at the start of the inlinee. So, can't hoist this outside the // inlinee. case Js::OpCode::LdArgumentsFromFrame: if(instr->m_func != loop->GetFunc()) { return false; } break; case Js::OpCode::FromVar: if (instr->HasBailOutInfo()) { allowNonPrimitives = true; } break; case Js::OpCode::CheckObjType: // Bug 11712101: If the operand is a field, ensure that its containing object type is invariant // before hoisting -- that is, don't hoist a CheckObjType over a DeleteFld on that object. // (CheckObjType only checks the operand and its immediate parent, so we don't need to go // any farther up the object graph.) Assert(instr->GetSrc1()); PropertySym *propertySym = instr->GetSrc1()->AsPropertySymOpnd()->GetPropertySym(); if (propertySym->HasObjectTypeSym()) { StackSym *objectTypeSym = propertySym->GetObjectTypeSym(); if (!this->OptIsInvariant(objectTypeSym, block, loop, this->CurrentBlockData()->FindValue(objectTypeSym), true, true)) { return false; } } break; } IR::Opnd *dst = instr->GetDst(); if (dst && !dst->IsRegOpnd()) { return false; } IR::Opnd *src1 = instr->GetSrc1(); if (src1) { if (!this->OptIsInvariant(src1, block, loop, src1Val, isNotTypeSpecConv, allowNonPrimitives)) { return false; } IR::Opnd *src2 = instr->GetSrc2(); if (src2) { if (!this->OptIsInvariant(src2, block, loop, src2Val, isNotTypeSpecConv, allowNonPrimitives)) { return false; } } } return true; } bool GlobOpt::OptDstIsInvariant(IR::RegOpnd *dst) { StackSym *dstSym = dst->m_sym; if (dstSym->IsTypeSpec()) { // The type-specialized sym may be single def, but not the original... dstSym = dstSym->GetVarEquivSym(this->func); } return (dstSym->m_isSingleDef); } void GlobOpt::OptHoistUpdateValueType( Loop* loop, IR::Instr* instr, IR::Opnd* srcOpnd, Value* opndVal) { if (opndVal == nullptr || instr->m_opcode == Js::OpCode::FromVar) { return; } Sym* opndSym = srcOpnd->GetSym();; if (opndSym) { BasicBlock* landingPad = loop->landingPad; Value* opndValueInLandingPad = landingPad->globOptData.FindValue(opndSym); Assert(opndVal->GetValueNumber() == opndValueInLandingPad->GetValueNumber()); ValueType opndValueTypeInLandingPad = opndValueInLandingPad->GetValueInfo()->Type(); if (srcOpnd->GetValueType() != opndValueTypeInLandingPad) { if (instr->m_opcode == Js::OpCode::SetConcatStrMultiItemBE) { Assert(!opndValueTypeInLandingPad.IsString()); Assert(instr->GetDst()); IR::RegOpnd* strOpnd = IR::RegOpnd::New(TyVar, instr->m_func); strOpnd->SetValueType(ValueType::String); strOpnd->SetValueTypeFixed(); IR::Instr* convPrimStrInstr = IR::Instr::New(Js::OpCode::Conv_PrimStr, strOpnd, srcOpnd->Use(instr->m_func), instr->m_func); instr->ReplaceSrc(srcOpnd, strOpnd); if (loop->bailOutInfo->bailOutInstr) { loop->bailOutInfo->bailOutInstr->InsertBefore(convPrimStrInstr); } else { landingPad->InsertAfter(convPrimStrInstr); } } srcOpnd->SetValueType(opndValueTypeInLandingPad); } if (opndSym->IsPropertySym()) { // Also fix valueInfo on objPtr StackSym* opndObjPtrSym = opndSym->AsPropertySym()->m_stackSym; Value* opndObjPtrSymValInLandingPad = landingPad->globOptData.FindValue(opndObjPtrSym); ValueInfo* opndObjPtrSymValueInfoInLandingPad = opndObjPtrSymValInLandingPad->GetValueInfo(); srcOpnd->AsSymOpnd()->SetPropertyOwnerValueType(opndObjPtrSymValueInfoInLandingPad->Type()); } } } void GlobOpt::OptHoistInvariant( IR::Instr *instr, BasicBlock *block, Loop *loop, Value *dstVal, Value *const src1Val, Value *const src2Val, bool isNotTypeSpecConv, bool lossy, IR::BailOutKind bailoutKind) { BasicBlock *landingPad = loop->landingPad; IR::Opnd* src1 = instr->GetSrc1(); if (src1) { // We are hoisting this instruction possibly past other uses, which might invalidate the last use info. Clear it. OptHoistUpdateValueType(loop, instr, src1, src1Val); if (src1->IsRegOpnd()) { src1->AsRegOpnd()->m_isTempLastUse = false; } IR::Opnd* src2 = instr->GetSrc2(); if (src2) { OptHoistUpdateValueType(loop, instr, src2, src2Val); if (src2->IsRegOpnd()) { src2->AsRegOpnd()->m_isTempLastUse = false; } } } IR::RegOpnd *dst = instr->GetDst() ? instr->GetDst()->AsRegOpnd() : nullptr; if(dst) { switch (instr->m_opcode) { case Js::OpCode::CmEq_I4: case Js::OpCode::CmNeq_I4: case Js::OpCode::CmLt_I4: case Js::OpCode::CmLe_I4: case Js::OpCode::CmGt_I4: case Js::OpCode::CmGe_I4: case Js::OpCode::CmUnLt_I4: case Js::OpCode::CmUnLe_I4: case Js::OpCode::CmUnGt_I4: case Js::OpCode::CmUnGe_I4: // These operations are a special case. They generate a lossy int value, and the var sym is initialized using // Conv_Bool. A sym cannot be live only as a lossy int sym, the var needs to be live as well since the lossy int // sym cannot be used to convert to var. We don't know however, whether the Conv_Bool will be hoisted. The idea // currently is that the sym is only used on the path in which it is initialized inside the loop. So, don't // hoist any liveness info for the dst. if (!this->GetIsAsmJSFunc()) { lossy = true; } break; case Js::OpCode::FromVar: { StackSym* src1StackSym = IR::RegOpnd::TryGetStackSym(instr->GetSrc1()); if (instr->HasBailOutInfo()) { IR::BailOutKind instrBailoutKind = instr->GetBailOutKind(); #ifdef ENABLE_SIMDJS Assert(instrBailoutKind == IR::BailOutIntOnly || instrBailoutKind == IR::BailOutExpectingInteger || instrBailoutKind == IR::BailOutOnNotPrimitive || instrBailoutKind == IR::BailOutNumberOnly || instrBailoutKind == IR::BailOutPrimitiveButString || instrBailoutKind == IR::BailOutSimd128F4Only || instrBailoutKind == IR::BailOutSimd128I4Only); #else Assert(instrBailoutKind == IR::BailOutIntOnly || instrBailoutKind == IR::BailOutExpectingInteger || instrBailoutKind == IR::BailOutOnNotPrimitive || instrBailoutKind == IR::BailOutNumberOnly || instrBailoutKind == IR::BailOutPrimitiveButString); #endif } else if (src1StackSym && bailoutKind != IR::BailOutInvalid) { // We may be hoisting FromVar from a region where it didn't need a bailout (src1 had a definite value type) to a region // where it would. In such cases, the FromVar needs a bailout based on the value type of src1 in its new position. Assert(!src1StackSym->IsTypeSpec()); Value* landingPadSrc1val = landingPad->globOptData.FindValue(src1StackSym); Assert(src1Val->GetValueNumber() == landingPadSrc1val->GetValueNumber()); ValueInfo *src1ValueInfo = src1Val->GetValueInfo(); ValueInfo *landingPadSrc1ValueInfo = landingPadSrc1val->GetValueInfo(); IRType dstType = dst->GetType(); const auto AddBailOutToFromVar = [&]() { instr->GetSrc1()->SetValueType(landingPadSrc1val->GetValueInfo()->Type()); EnsureBailTarget(loop); if (block->IsLandingPad()) { instr = instr->ConvertToBailOutInstr(instr, bailoutKind, loop->bailOutInfo->bailOutOffset); } else { instr = instr->ConvertToBailOutInstr(instr, bailoutKind); } }; // A definite type in the source position and not a definite type in the destination (landing pad) // and no bailout on the instruction; we should put a bailout on the hoisted instruction. if (dstType == TyInt32) { if (lossy) { if ((src1ValueInfo->IsPrimitive() || block->globOptData.IsTypeSpecialized(src1StackSym)) && // didn't need a lossy type spec bailout in the source block (!landingPadSrc1ValueInfo->IsPrimitive() && !landingPad->globOptData.IsTypeSpecialized(src1StackSym))) // needs a lossy type spec bailout in the landing pad { bailoutKind = IR::BailOutOnNotPrimitive; AddBailOutToFromVar(); } } else if (src1ValueInfo->IsInt() && !landingPadSrc1ValueInfo->IsInt()) { AddBailOutToFromVar(); } } else if ((dstType == TyFloat64 && src1ValueInfo->IsNumber() && !landingPadSrc1ValueInfo->IsNumber()) || (IRType_IsSimd128(dstType) && src1ValueInfo->IsSimd128() && !landingPadSrc1ValueInfo->IsSimd128())) { AddBailOutToFromVar(); } } break; } } if (dstVal == NULL) { dstVal = this->NewGenericValue(ValueType::Uninitialized, dst); } // ToVar/FromVar don't need a new dst because it has to be invariant if their src is invariant. bool dstDoesntNeedLoad = (!isNotTypeSpecConv && instr->m_opcode != Js::OpCode::LdC_A_I4); StackSym *varSym = dst->m_sym; if (varSym->IsTypeSpec()) { varSym = varSym->GetVarEquivSym(this->func); } Value *const landingPadDstVal = loop->landingPad->globOptData.FindValue(varSym); if(landingPadDstVal ? dstVal->GetValueNumber() != landingPadDstVal->GetValueNumber() : loop->symsDefInLoop->Test(varSym->m_id)) { // We need a temp for FromVar/ToVar if dst changes in the loop. dstDoesntNeedLoad = false; } if (!dstDoesntNeedLoad && this->OptDstIsInvariant(dst) == false) { // Keep dst in place, hoist instr using a new dst. instr->UnlinkDst(); // Set type specialization info correctly for this new sym StackSym *copyVarSym; IR::RegOpnd *copyReg; if (dst->m_sym->IsTypeSpec()) { copyVarSym = StackSym::New(TyVar, instr->m_func); StackSym *copySym = copyVarSym; if (dst->m_sym->IsInt32()) { if(lossy) { // The new sym would only be live as a lossy int since we're only hoisting the store to the int version // of the sym, and cannot be converted to var. It is not legal to have a sym only live as a lossy int, // so don't update liveness info for this sym. } else { block->globOptData.liveInt32Syms->Set(copyVarSym->m_id); } copySym = copySym->GetInt32EquivSym(instr->m_func); } else if (dst->m_sym->IsFloat64()) { block->globOptData.liveFloat64Syms->Set(copyVarSym->m_id); copySym = copySym->GetFloat64EquivSym(instr->m_func); } #ifdef ENABLE_SIMDJS else if (dst->IsSimd128()) { // SIMD_JS if (dst->IsSimd128F4()) { block->globOptData.liveSimd128F4Syms->Set(copyVarSym->m_id); copySym = copySym->GetSimd128F4EquivSym(instr->m_func); } else { Assert(dst->IsSimd128I4()); block->globOptData.liveSimd128I4Syms->Set(copyVarSym->m_id); copySym = copySym->GetSimd128I4EquivSym(instr->m_func); } } #endif copyReg = IR::RegOpnd::New(copySym, copySym->GetType(), instr->m_func); } else { copyReg = IR::RegOpnd::New(dst->GetType(), instr->m_func); copyVarSym = copyReg->m_sym; block->globOptData.liveVarSyms->Set(copyVarSym->m_id); } copyReg->SetValueType(dst->GetValueType()); IR::Instr *copyInstr = IR::Instr::New(Js::OpCode::Ld_A, dst, copyReg, instr->m_func); copyInstr->SetByteCodeOffset(instr); instr->SetDst(copyReg); instr->InsertBefore(copyInstr); dst->m_sym->m_mayNotBeTempLastUse = true; if (instr->GetSrc1() && instr->GetSrc1()->IsImmediateOpnd()) { // Propagate IsIntConst if appropriate switch(instr->m_opcode) { case Js::OpCode::Ld_A: case Js::OpCode::Ld_I4: case Js::OpCode::LdC_A_I4: copyReg->m_sym->SetIsConst(); break; } } ValueInfo *dstValueInfo = dstVal->GetValueInfo(); if((!dstValueInfo->GetSymStore() || dstValueInfo->GetSymStore() == varSym) && !lossy) { // The destination's value may have been transferred from one of the invariant sources, in which case we should // keep the sym store intact, as that sym will likely have a better lifetime than this new copy sym. For // instance, if we're inside a conditioned block, because we don't make the copy sym live and set its value in // all preceding blocks, this sym would not be live after exiting this block, causing this value to not // participate in copy-prop after this block. this->SetSymStoreDirect(dstValueInfo, copyVarSym); } block->globOptData.InsertNewValue(dstVal, copyReg); dst = copyReg; } } // Move to landing pad block->UnlinkInstr(instr); if (loop->bailOutInfo->bailOutInstr) { loop->bailOutInfo->bailOutInstr->InsertBefore(instr); } else { landingPad->InsertAfter(instr); } GlobOpt::MarkNonByteCodeUsed(instr); if (instr->HasBailOutInfo() || instr->HasAuxBailOut()) { Assert(loop->bailOutInfo); EnsureBailTarget(loop); // Copy bailout info of loop top. instr->ReplaceBailOutInfo(loop->bailOutInfo); } if(!dst) { return; } // The bailout info's liveness for the dst sym is not updated in loop landing pads because bailout instructions previously // hoisted into the loop's landing pad may bail out before the current type of the dst sym became live (perhaps due to this // instruction). Since the landing pad will have a shared bailout point, the bailout info cannot assume that the current // type of the dst sym was live during every bailout hoisted into the landing pad. StackSym *const dstSym = dst->m_sym; StackSym *const dstVarSym = dstSym->IsTypeSpec() ? dstSym->GetVarEquivSym(nullptr) : dstSym; Assert(dstVarSym); if(isNotTypeSpecConv || !loop->landingPad->globOptData.IsLive(dstVarSym)) { // A new dst is being hoisted, or the same single-def dst that would not be live before this block. So, make it live and // update the value info with the same value info in this block. if(lossy) { // This is a lossy conversion to int. The instruction was given a new dst specifically for hoisting, so this new dst // will not be live as a var before this block. A sym cannot be live only as a lossy int sym, the var needs to be // live as well since the lossy int sym cannot be used to convert to var. Since the var version of the sym is not // going to be initialized, don't hoist any liveness info for the dst. The sym is only going to be used on the path // in which it is initialized inside the loop. Assert(dstSym->IsTypeSpec()); Assert(dstSym->IsInt32()); return; } // Check if the dst value was transferred from the src. If so, the value transfer needs to be replicated. bool isTransfer = dstVal == src1Val; StackSym *transferValueOfSym = nullptr; if(isTransfer) { Assert(instr->GetSrc1()); if(instr->GetSrc1()->IsRegOpnd()) { StackSym *src1Sym = instr->GetSrc1()->AsRegOpnd()->m_sym; if(src1Sym->IsTypeSpec()) { src1Sym = src1Sym->GetVarEquivSym(nullptr); Assert(src1Sym); } if(dstVal == block->globOptData.FindValue(src1Sym)) { transferValueOfSym = src1Sym; } } } // SIMD_JS if (instr->m_opcode == Js::OpCode::ExtendArg_A) { // Check if we should have CSE'ed this EA Assert(instr->GetSrc1()); // If the dstVal symstore is not the dst itself, then we copied the Value from another expression. if (dstVal->GetValueInfo()->GetSymStore() != instr->GetDst()->GetStackSym()) { isTransfer = true; transferValueOfSym = dstVal->GetValueInfo()->GetSymStore()->AsStackSym(); } } const ValueNumber dstValueNumber = dstVal->GetValueNumber(); ValueNumber dstNewValueNumber = InvalidValueNumber; for(InvariantBlockBackwardIterator it(this, block, loop->landingPad, nullptr); it.IsValid(); it.MoveNext()) { BasicBlock *const hoistBlock = it.Block(); GlobOptBlockData &hoistBlockData = hoistBlock->globOptData; Assert(!hoistBlockData.IsLive(dstVarSym)); hoistBlockData.MakeLive(dstSym, lossy); Value *newDstValue; do { if(isTransfer) { if(transferValueOfSym) { newDstValue = hoistBlockData.FindValue(transferValueOfSym); if(newDstValue && newDstValue->GetValueNumber() == dstValueNumber) { break; } } // It's a transfer, but we don't have a sym whose value number matches in the target block. Use a new value // number since we don't know if there is already a value with the current number for the target block. if(dstNewValueNumber == InvalidValueNumber) { dstNewValueNumber = NewValueNumber(); } newDstValue = CopyValue(dstVal, dstNewValueNumber); break; } newDstValue = CopyValue(dstVal, dstValueNumber); } while(false); hoistBlockData.SetValue(newDstValue, dstVarSym); } return; } #if DBG if(instr->GetSrc1()->IsRegOpnd()) // Type spec conversion may load a constant into a dst sym { StackSym *const srcSym = instr->GetSrc1()->AsRegOpnd()->m_sym; Assert(srcSym != dstSym); // Type spec conversion must be changing the type, so the syms must be different StackSym *const srcVarSym = srcSym->IsTypeSpec() ? srcSym->GetVarEquivSym(nullptr) : srcSym; Assert(srcVarSym == dstVarSym); // Type spec conversion must be between variants of the same var sym } #endif bool changeValueType = false, changeValueTypeToInt = false; if(dstSym->IsTypeSpec()) { if(dst->IsInt32()) { if(!lossy) { Assert( !instr->HasBailOutInfo() || instr->GetBailOutKind() == IR::BailOutIntOnly || instr->GetBailOutKind() == IR::BailOutExpectingInteger); changeValueType = changeValueTypeToInt = true; } } else if (dst->IsFloat64()) { if(instr->HasBailOutInfo() && instr->GetBailOutKind() == IR::BailOutNumberOnly) { changeValueType = true; } } #ifdef ENABLE_SIMDJS else { // SIMD_JS Assert(dst->IsSimd128()); if (instr->HasBailOutInfo() && (instr->GetBailOutKind() == IR::BailOutSimd128F4Only || instr->GetBailOutKind() == IR::BailOutSimd128I4Only)) { changeValueType = true; } } #endif } ValueInfo *previousValueInfoBeforeUpdate = nullptr, *previousValueInfoAfterUpdate = nullptr; for(InvariantBlockBackwardIterator it( this, block, loop->landingPad, dstVarSym, dstVal->GetValueNumber()); it.IsValid(); it.MoveNext()) { BasicBlock *const hoistBlock = it.Block(); GlobOptBlockData &hoistBlockData = hoistBlock->globOptData; #if DBG // TODO: There are some odd cases with field hoisting where the sym is invariant in only part of the loop and the info // does not flow through all blocks. Un-comment the verification below after PRE replaces field hoisting. //// Verify that the src sym is live as the required type, and that the conversion is valid //Assert(IsLive(dstVarSym, &hoistBlockData)); //if(instr->GetSrc1()->IsRegOpnd()) //{ // IR::RegOpnd *const src = instr->GetSrc1()->AsRegOpnd(); // StackSym *const srcSym = instr->GetSrc1()->AsRegOpnd()->m_sym; // if(srcSym->IsTypeSpec()) // { // if(src->IsInt32()) // { // Assert(hoistBlockData.liveInt32Syms->Test(dstVarSym->m_id)); // Assert(!hoistBlockData.liveLossyInt32Syms->Test(dstVarSym->m_id)); // shouldn't try to convert a lossy int32 to anything // } // else // { // Assert(src->IsFloat64()); // Assert(hoistBlockData.liveFloat64Syms->Test(dstVarSym->m_id)); // if(dstSym->IsTypeSpec() && dst->IsInt32()) // { // Assert(lossy); // shouldn't try to do a lossless conversion from float64 to int32 // } // } // } // else // { // Assert(hoistBlockData.liveVarSyms->Test(dstVarSym->m_id)); // } //} //if(dstSym->IsTypeSpec() && dst->IsInt32()) //{ // // If the sym is already specialized as required in the block to which we are attempting to hoist the conversion, // // that info should have flowed into this block // if(lossy) // { // Assert(!hoistBlockData.liveInt32Syms->Test(dstVarSym->m_id)); // } // else // { // Assert(!IsInt32TypeSpecialized(dstVarSym, hoistBlock)); // } //} #endif hoistBlockData.MakeLive(dstSym, lossy); if(!changeValueType) { continue; } Value *const hoistBlockValue = it.InvariantSymValue(); ValueInfo *const hoistBlockValueInfo = hoistBlockValue->GetValueInfo(); if(hoistBlockValueInfo == previousValueInfoBeforeUpdate) { if(hoistBlockValueInfo != previousValueInfoAfterUpdate) { HoistInvariantValueInfo(previousValueInfoAfterUpdate, hoistBlockValue, hoistBlock); } } else { previousValueInfoBeforeUpdate = hoistBlockValueInfo; ValueInfo *const newValueInfo = changeValueTypeToInt ? hoistBlockValueInfo->SpecializeToInt32(alloc) : hoistBlockValueInfo->SpecializeToFloat64(alloc); previousValueInfoAfterUpdate = newValueInfo; ChangeValueInfo(changeValueTypeToInt ? nullptr : hoistBlock, hoistBlockValue, newValueInfo); } } } bool GlobOpt::TryHoistInvariant( IR::Instr *instr, BasicBlock *block, Value *dstVal, Value *src1Val, Value *src2Val, bool isNotTypeSpecConv, const bool lossy, const bool forceInvariantHoisting, IR::BailOutKind bailoutKind) { Assert(!this->IsLoopPrePass()); if (OptIsInvariant(instr, block, block->loop, src1Val, src2Val, isNotTypeSpecConv, forceInvariantHoisting)) { #if DBG if (Js::Configuration::Global.flags.Trace.IsEnabled(Js::InvariantsPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId())) { Output::Print(_u(" **** INVARIANT *** ")); instr->Dump(); } #endif #if ENABLE_DEBUG_CONFIG_OPTIONS if (Js::Configuration::Global.flags.TestTrace.IsEnabled(Js::InvariantsPhase)) { Output::Print(_u(" **** INVARIANT *** ")); Output::Print(_u("%s \n"), Js::OpCodeUtil::GetOpCodeName(instr->m_opcode)); } #endif Loop *loop = block->loop; // Try hoisting from to outer most loop while (loop->parent && OptIsInvariant(instr, block, loop->parent, src1Val, src2Val, isNotTypeSpecConv, forceInvariantHoisting)) { loop = loop->parent; } // Record the byte code use here since we are going to move this instruction up if (isNotTypeSpecConv) { InsertNoImplicitCallUses(instr); this->CaptureByteCodeSymUses(instr); this->InsertByteCodeUses(instr, true); } #if DBG else { PropertySym *propertySymUse = NULL; NoRecoverMemoryJitArenaAllocator tempAllocator(_u("BE-GlobOpt-Temp"), this->alloc->GetPageAllocator(), Js::Throw::OutOfMemory); BVSparse<JitArenaAllocator> * tempByteCodeUse = JitAnew(&tempAllocator, BVSparse<JitArenaAllocator>, &tempAllocator); GlobOpt::TrackByteCodeSymUsed(instr, tempByteCodeUse, &propertySymUse); Assert(tempByteCodeUse->Count() == 0 && propertySymUse == NULL); } #endif OptHoistInvariant(instr, block, loop, dstVal, src1Val, src2Val, isNotTypeSpecConv, lossy, bailoutKind); return true; } return false; } InvariantBlockBackwardIterator::InvariantBlockBackwardIterator( GlobOpt *const globOpt, BasicBlock *const exclusiveBeginBlock, BasicBlock *const inclusiveEndBlock, StackSym *const invariantSym, const ValueNumber invariantSymValueNumber) : globOpt(globOpt), exclusiveEndBlock(inclusiveEndBlock->prev), invariantSym(invariantSym), invariantSymValueNumber(invariantSymValueNumber), block(exclusiveBeginBlock) #if DBG , inclusiveEndBlock(inclusiveEndBlock) #endif { Assert(exclusiveBeginBlock); Assert(inclusiveEndBlock); Assert(!inclusiveEndBlock->isDeleted); Assert(exclusiveBeginBlock != inclusiveEndBlock); Assert(!invariantSym == (invariantSymValueNumber == InvalidValueNumber)); MoveNext(); } bool InvariantBlockBackwardIterator::IsValid() const { return block != exclusiveEndBlock; } void InvariantBlockBackwardIterator::MoveNext() { Assert(IsValid()); while(true) { #if DBG BasicBlock *const previouslyIteratedBlock = block; #endif block = block->prev; if(!IsValid()) { Assert(previouslyIteratedBlock == inclusiveEndBlock); break; } if(block->isDeleted) { continue; } if(!block->globOptData.HasData()) { // This block's info has already been merged with all of its successors continue; } if(!invariantSym) { break; } invariantSymValue = block->globOptData.FindValue(invariantSym); if(!invariantSymValue || invariantSymValue->GetValueNumber() != invariantSymValueNumber) { // BailOnNoProfile and throw blocks are not moved outside loops. A sym table cleanup on these paths may delete the // values. Field hoisting also has some odd cases where the hoisted stack sym is invariant in only part of the loop. continue; } break; } } BasicBlock * InvariantBlockBackwardIterator::Block() const { Assert(IsValid()); return block; } Value * InvariantBlockBackwardIterator::InvariantSymValue() const { Assert(IsValid()); Assert(invariantSym); return invariantSymValue; } void GlobOpt::HoistInvariantValueInfo( ValueInfo *const invariantValueInfoToHoist, Value *const valueToUpdate, BasicBlock *const targetBlock) { Assert(invariantValueInfoToHoist); Assert(valueToUpdate); Assert(targetBlock); // Why are we trying to change the value type of the type sym value? Asserting here to make sure we don't deep copy the type sym's value info. Assert(!invariantValueInfoToHoist->IsJsType()); Sym *const symStore = valueToUpdate->GetValueInfo()->GetSymStore(); ValueInfo *newValueInfo; if(invariantValueInfoToHoist->GetSymStore() == symStore) { newValueInfo = invariantValueInfoToHoist; } else { newValueInfo = invariantValueInfoToHoist->Copy(alloc); this->SetSymStoreDirect(newValueInfo, symStore); } ChangeValueInfo(targetBlock, valueToUpdate, newValueInfo, true); } // static bool GlobOpt::DoInlineArgsOpt(Func const * func) { Func const * topFunc = func->GetTopFunc(); Assert(topFunc != func); bool doInlineArgsOpt = !PHASE_OFF(Js::InlineArgsOptPhase, topFunc) && !func->GetHasCalls() && !func->GetHasUnoptimizedArgumentsAccess() && func->m_canDoInlineArgsOpt; return doInlineArgsOpt; } bool GlobOpt::IsSwitchOptEnabled(Func const * func) { Assert(func->IsTopFunc()); return !PHASE_OFF(Js::SwitchOptPhase, func) && !func->IsSwitchOptDisabled() && func->DoGlobOpt(); } bool GlobOpt::IsSwitchOptEnabledForIntTypeSpec(Func const * func) { return IsSwitchOptEnabled(func) && !IsTypeSpecPhaseOff(func) && DoAggressiveIntTypeSpec(func); } bool GlobOpt::DoConstFold() const { return !PHASE_OFF(Js::ConstFoldPhase, func); } bool GlobOpt::IsTypeSpecPhaseOff(Func const *func) { return PHASE_OFF(Js::TypeSpecPhase, func) || func->IsJitInDebugMode() || !func->DoGlobOptsForGeneratorFunc(); } bool GlobOpt::DoTypeSpec() const { return doTypeSpec; } bool GlobOpt::DoAggressiveIntTypeSpec(Func const * func) { return !PHASE_OFF(Js::AggressiveIntTypeSpecPhase, func) && !IsTypeSpecPhaseOff(func) && !func->IsAggressiveIntTypeSpecDisabled(); } bool GlobOpt::DoAggressiveIntTypeSpec() const { return doAggressiveIntTypeSpec; } bool GlobOpt::DoAggressiveMulIntTypeSpec() const { return doAggressiveMulIntTypeSpec; } bool GlobOpt::DoDivIntTypeSpec() const { return doDivIntTypeSpec; } // static bool GlobOpt::DoLossyIntTypeSpec(Func const * func) { return !PHASE_OFF(Js::LossyIntTypeSpecPhase, func) && !IsTypeSpecPhaseOff(func) && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsLossyIntTypeSpecDisabled()); } bool GlobOpt::DoLossyIntTypeSpec() const { return doLossyIntTypeSpec; } // static bool GlobOpt::DoFloatTypeSpec(Func const * func) { return !PHASE_OFF(Js::FloatTypeSpecPhase, func) && !IsTypeSpecPhaseOff(func) && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsFloatTypeSpecDisabled()) && AutoSystemInfo::Data.SSE2Available(); } bool GlobOpt::DoFloatTypeSpec() const { return doFloatTypeSpec; } bool GlobOpt::DoStringTypeSpec(Func const * func) { return !PHASE_OFF(Js::StringTypeSpecPhase, func) && !IsTypeSpecPhaseOff(func); } // static bool GlobOpt::DoTypedArrayTypeSpec(Func const * func) { return !PHASE_OFF(Js::TypedArrayTypeSpecPhase, func) && !IsTypeSpecPhaseOff(func) && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsTypedArrayTypeSpecDisabled(func->IsLoopBody())) #if defined(_M_IX86) && AutoSystemInfo::Data.SSE2Available() #endif ; } // static bool GlobOpt::DoNativeArrayTypeSpec(Func const * func) { return !PHASE_OFF(Js::NativeArrayPhase, func) && !IsTypeSpecPhaseOff(func) #if defined(_M_IX86) && AutoSystemInfo::Data.SSE2Available() #endif ; } bool GlobOpt::DoArrayCheckHoist(Func const * const func) { Assert(func->IsTopFunc()); return !PHASE_OFF(Js::ArrayCheckHoistPhase, func) && !func->IsArrayCheckHoistDisabled() && !func->IsJitInDebugMode() && // StElemI fast path is not allowed when in debug mode, so it cannot have bailout func->DoGlobOptsForGeneratorFunc(); } bool GlobOpt::DoArrayCheckHoist() const { return doArrayCheckHoist; } bool GlobOpt::DoArrayCheckHoist(const ValueType baseValueType, Loop* loop, IR::Instr const * const instr) const { if(!DoArrayCheckHoist() || (instr && !IsLoopPrePass() && instr->DoStackArgsOpt(func))) { return false; } if(!baseValueType.IsLikelyArrayOrObjectWithArray() || (loop ? ImplicitCallFlagsAllowOpts(loop) : ImplicitCallFlagsAllowOpts(func))) { return true; } // The function or loop does not allow disabling implicit calls, which is required to eliminate redundant JS array checks #if DBG_DUMP if((((loop ? loop->GetImplicitCallFlags() : func->m_fg->implicitCallFlags) & ~Js::ImplicitCall_External) == 0) && Js::Configuration::Global.flags.Trace.IsEnabled(Js::HostOptPhase)) { Output::Print(_u("DoArrayCheckHoist disabled for JS arrays because of external: ")); func->DumpFullFunctionName(); Output::Print(_u("\n")); Output::Flush(); } #endif return false; } bool GlobOpt::DoArrayMissingValueCheckHoist(Func const * const func) { return DoArrayCheckHoist(func) && !PHASE_OFF(Js::ArrayMissingValueCheckHoistPhase, func) && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsArrayMissingValueCheckHoistDisabled(func->IsLoopBody())); } bool GlobOpt::DoArrayMissingValueCheckHoist() const { return doArrayMissingValueCheckHoist; } bool GlobOpt::DoArraySegmentHoist(const ValueType baseValueType, Func const * const func) { Assert(baseValueType.IsLikelyAnyOptimizedArray()); if(!DoArrayCheckHoist(func) || PHASE_OFF(Js::ArraySegmentHoistPhase, func)) { return false; } if(!baseValueType.IsLikelyArrayOrObjectWithArray()) { return true; } return !PHASE_OFF(Js::JsArraySegmentHoistPhase, func) && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsJsArraySegmentHoistDisabled(func->IsLoopBody())); } bool GlobOpt::DoArraySegmentHoist(const ValueType baseValueType) const { Assert(baseValueType.IsLikelyAnyOptimizedArray()); return baseValueType.IsLikelyArrayOrObjectWithArray() ? doJsArraySegmentHoist : doArraySegmentHoist; } bool GlobOpt::DoTypedArraySegmentLengthHoist(Loop *const loop) const { if(!DoArraySegmentHoist(ValueType::GetObject(ObjectType::Int32Array))) { return false; } if(loop ? ImplicitCallFlagsAllowOpts(loop) : ImplicitCallFlagsAllowOpts(func)) { return true; } // The function or loop does not allow disabling implicit calls, which is required to eliminate redundant typed array // segment length loads. #if DBG_DUMP if((((loop ? loop->GetImplicitCallFlags() : func->m_fg->implicitCallFlags) & ~Js::ImplicitCall_External) == 0) && Js::Configuration::Global.flags.Trace.IsEnabled(Js::HostOptPhase)) { Output::Print(_u("DoArraySegmentLengthHoist disabled for typed arrays because of external: ")); func->DumpFullFunctionName(); Output::Print(_u("\n")); Output::Flush(); } #endif return false; } bool GlobOpt::DoArrayLengthHoist(Func const * const func) { return DoArrayCheckHoist(func) && !PHASE_OFF(Js::Phase::ArrayLengthHoistPhase, func) && (!func->HasProfileInfo() || !func->GetReadOnlyProfileInfo()->IsArrayLengthHoistDisabled(func->IsLoopBody())); } bool GlobOpt::DoArrayLengthHoist() const { return doArrayLengthHoist; } bool GlobOpt::DoEliminateArrayAccessHelperCall(Func *const func) { return DoArrayCheckHoist(func); } bool GlobOpt::DoEliminateArrayAccessHelperCall() const { return doEliminateArrayAccessHelperCall; } bool GlobOpt::DoLdLenIntSpec(IR::Instr * const instr, const ValueType baseValueType) { Assert(!instr || instr->m_opcode == Js::OpCode::LdLen_A); Assert(!instr || instr->GetDst()); Assert(!instr || instr->GetSrc1()); if(PHASE_OFF(Js::LdLenIntSpecPhase, func) || IsTypeSpecPhaseOff(func) || (func->HasProfileInfo() && func->GetReadOnlyProfileInfo()->IsLdLenIntSpecDisabled()) || (instr && !IsLoopPrePass() && instr->DoStackArgsOpt(func))) { return false; } if(instr && instr->IsProfiledInstr() && ( !instr->AsProfiledInstr()->u.ldElemInfo->GetElementType().IsLikelyInt() || instr->GetDst()->AsRegOpnd()->m_sym->m_isNotInt )) { return false; } Assert(!instr || baseValueType == instr->GetSrc1()->GetValueType()); return baseValueType.HasBeenString() || (baseValueType.IsLikelyAnyOptimizedArray() && baseValueType.GetObjectType() != ObjectType::ObjectWithArray); } bool GlobOpt::DoPathDependentValues() const { return !PHASE_OFF(Js::Phase::PathDependentValuesPhase, func); } bool GlobOpt::DoTrackRelativeIntBounds() const { return doTrackRelativeIntBounds; } bool GlobOpt::DoBoundCheckElimination() const { return doBoundCheckElimination; } bool GlobOpt::DoBoundCheckHoist() const { return doBoundCheckHoist; } bool GlobOpt::DoLoopCountBasedBoundCheckHoist() const { return doLoopCountBasedBoundCheckHoist; } bool GlobOpt::DoPowIntIntTypeSpec() const { return doPowIntIntTypeSpec; } bool GlobOpt::DoTagChecks() const { return doTagChecks; } bool GlobOpt::TrackArgumentsObject() { if (PHASE_OFF(Js::StackArgOptPhase, this->func)) { this->CannotAllocateArgumentsObjectOnStack(); return false; } return func->GetHasStackArgs(); } void GlobOpt::CannotAllocateArgumentsObjectOnStack() { func->SetHasStackArgs(false); #ifdef ENABLE_DEBUG_CONFIG_OPTIONS if (PHASE_TESTTRACE(Js::StackArgOptPhase, this->func)) { char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; Output::Print(_u("Stack args disabled for function %s(%s)\n"), func->GetJITFunctionBody()->GetDisplayName(), func->GetDebugNumberSet(debugStringBuffer)); Output::Flush(); } #endif } IR::Instr * GlobOpt::PreOptPeep(IR::Instr *instr) { if (OpCodeAttr::HasDeadFallThrough(instr->m_opcode)) { switch (instr->m_opcode) { case Js::OpCode::BailOnNoProfile: { // Handle BailOnNoProfile if (instr->HasBailOutInfo()) { if (!this->prePassLoop) { FillBailOutInfo(this->currentBlock, instr->GetBailOutInfo()); } // Already processed. return instr; } // Convert to bailout instr IR::Instr *nextBytecodeOffsetInstr = instr->GetNextRealInstrOrLabel(); while(nextBytecodeOffsetInstr->GetByteCodeOffset() == Js::Constants::NoByteCodeOffset) { nextBytecodeOffsetInstr = nextBytecodeOffsetInstr->GetNextRealInstrOrLabel(); Assert(!nextBytecodeOffsetInstr->IsLabelInstr()); } instr = instr->ConvertToBailOutInstr(nextBytecodeOffsetInstr, IR::BailOutOnNoProfile); instr->ClearByteCodeOffset(); instr->SetByteCodeOffset(nextBytecodeOffsetInstr); if (!this->currentBlock->loop) { FillBailOutInfo(this->currentBlock, instr->GetBailOutInfo()); } else { Assert(this->prePassLoop); } break; } case Js::OpCode::BailOnException: { Assert( ( this->func->HasTry() && this->func->DoOptimizeTry() && instr->m_prev->m_opcode == Js::OpCode::Catch && instr->m_prev->m_prev->IsLabelInstr() && instr->m_prev->m_prev->AsLabelInstr()->GetRegion()->GetType() == RegionType::RegionTypeCatch ) || ( this->func->HasFinally() && this->func->DoOptimizeTry() && instr->m_prev->AsLabelInstr() && instr->m_prev->AsLabelInstr()->GetRegion()->GetType() == RegionType::RegionTypeFinally ) ); break; } case Js::OpCode::BailOnEarlyExit: { Assert(this->func->HasFinally() && this->func->DoOptimizeTry()); break; } default: { if(this->currentBlock->loop && !this->IsLoopPrePass()) { return instr; } break; } } RemoveCodeAfterNoFallthroughInstr(instr); } return instr; } void GlobOpt::RemoveCodeAfterNoFallthroughInstr(IR::Instr *instr) { if (instr != this->currentBlock->GetLastInstr()) { // Remove dead code after bailout IR::Instr *instrDead = instr->m_next; IR::Instr *instrNext; for (; instrDead != this->currentBlock->GetLastInstr(); instrDead = instrNext) { instrNext = instrDead->m_next; if (instrNext->m_opcode == Js::OpCode::FunctionExit) { break; } this->func->m_fg->RemoveInstr(instrDead, this); } IR::Instr *instrNextBlock = instrDead->m_next; this->func->m_fg->RemoveInstr(instrDead, this); this->currentBlock->SetLastInstr(instrNextBlock->m_prev); } // Cleanup dead successors FOREACH_SUCCESSOR_BLOCK_EDITING(deadBlock, this->currentBlock, iter) { this->currentBlock->RemoveDeadSucc(deadBlock, this->func->m_fg); if (this->currentBlock->GetDataUseCount() > 0) { this->currentBlock->DecrementDataUseCount(); } } NEXT_SUCCESSOR_BLOCK_EDITING; } void GlobOpt::ProcessTryHandler(IR::Instr* instr) { Assert(instr->m_next->IsLabelInstr() && instr->m_next->AsLabelInstr()->GetRegion()->GetType() == RegionType::RegionTypeTry); Region* tryRegion = instr->m_next->AsLabelInstr()->GetRegion(); BVSparse<JitArenaAllocator> * writeThroughSymbolsSet = tryRegion->writeThroughSymbolsSet; ToVar(writeThroughSymbolsSet, this->currentBlock); } bool GlobOpt::ProcessExceptionHandlingEdges(IR::Instr* instr) { Assert(instr->m_opcode == Js::OpCode::BrOnException || instr->m_opcode == Js::OpCode::BrOnNoException); if (instr->m_opcode == Js::OpCode::BrOnException) { if (instr->AsBranchInstr()->GetTarget()->GetRegion()->GetType() == RegionType::RegionTypeCatch) { // BrOnException was added to model flow from try region to the catch region to assist // the backward pass in propagating bytecode upward exposed info from the catch block // to the try, and to handle break blocks. Removing it here as it has served its purpose // and keeping it around might also have unintended effects while merging block data for // the catch block's predecessors. // Note that the Deadstore pass will still be able to propagate bytecode upward exposed info // because it doesn't skip dead blocks for that. this->RemoveFlowEdgeToCatchBlock(instr); this->currentBlock->RemoveInstr(instr); return true; } else { // We add BrOnException from a finally region to early exit, remove that since it has served its purpose return this->RemoveFlowEdgeToFinallyOnExceptionBlock(instr); } } else if (instr->m_opcode == Js::OpCode::BrOnNoException) { if (instr->AsBranchInstr()->GetTarget()->GetRegion()->GetType() == RegionType::RegionTypeCatch) { this->RemoveFlowEdgeToCatchBlock(instr); } else { this->RemoveFlowEdgeToFinallyOnExceptionBlock(instr); } } return false; } void GlobOpt::InsertToVarAtDefInTryRegion(IR::Instr * instr, IR::Opnd * dstOpnd) { if ((this->currentRegion->GetType() == RegionTypeTry || this->currentRegion->GetType() == RegionTypeFinally) && dstOpnd->IsRegOpnd() && dstOpnd->AsRegOpnd()->m_sym->HasByteCodeRegSlot()) { StackSym * sym = dstOpnd->AsRegOpnd()->m_sym; if (sym->IsVar()) { return; } StackSym * varSym = sym->GetVarEquivSym(nullptr); if ((this->currentRegion->GetType() == RegionTypeTry && this->currentRegion->writeThroughSymbolsSet->Test(varSym->m_id)) || ((this->currentRegion->GetType() == RegionTypeFinally && this->currentRegion->GetMatchingTryRegion()->writeThroughSymbolsSet->Test(varSym->m_id)))) { IR::RegOpnd * regOpnd = IR::RegOpnd::New(varSym, IRType::TyVar, instr->m_func); this->ToVar(instr->m_next, regOpnd, this->currentBlock, NULL, false); } } } void GlobOpt::RemoveFlowEdgeToCatchBlock(IR::Instr * instr) { Assert(instr->IsBranchInstr()); BasicBlock * catchBlock = nullptr; BasicBlock * predBlock = nullptr; if (instr->m_opcode == Js::OpCode::BrOnException) { catchBlock = instr->AsBranchInstr()->GetTarget()->GetBasicBlock(); predBlock = this->currentBlock; } else { Assert(instr->m_opcode == Js::OpCode::BrOnNoException); IR::Instr * nextInstr = instr->GetNextRealInstrOrLabel(); Assert(nextInstr->IsLabelInstr()); IR::LabelInstr * nextLabel = nextInstr->AsLabelInstr(); if (nextLabel->GetRegion() && nextLabel->GetRegion()->GetType() == RegionTypeCatch) { catchBlock = nextLabel->GetBasicBlock(); predBlock = this->currentBlock; } else { Assert(nextLabel->m_next->IsBranchInstr() && nextLabel->m_next->AsBranchInstr()->IsUnconditional()); BasicBlock * nextBlock = nextLabel->GetBasicBlock(); IR::BranchInstr * branchToCatchBlock = nextLabel->m_next->AsBranchInstr(); IR::LabelInstr * catchBlockLabel = branchToCatchBlock->GetTarget(); Assert(catchBlockLabel->GetRegion()->GetType() == RegionTypeCatch); catchBlock = catchBlockLabel->GetBasicBlock(); predBlock = nextBlock; } } Assert(catchBlock); Assert(predBlock); if (this->func->m_fg->FindEdge(predBlock, catchBlock)) { predBlock->RemoveDeadSucc(catchBlock, this->func->m_fg); if (predBlock == this->currentBlock) { predBlock->DecrementDataUseCount(); } } } bool GlobOpt::RemoveFlowEdgeToFinallyOnExceptionBlock(IR::Instr * instr) { Assert(instr->IsBranchInstr()); if (instr->m_opcode == Js::OpCode::BrOnNoException && instr->AsBranchInstr()->m_brFinallyToEarlyExit) { // We add edge from finally to early exit block // We should not remove this edge // If a loop has continue, and we add edge in finally to continue // Break block removal can move all continues inside the loop to branch to the continue added within finally // If we get rid of this edge, then loop may loose all backedges // Ideally, doing tail duplication before globopt would enable us to remove these edges, but since we do it after globopt, keep it this way for now // See test1() in core/test/tryfinallytests.js return false; } BasicBlock * finallyBlock = nullptr; BasicBlock * predBlock = nullptr; if (instr->m_opcode == Js::OpCode::BrOnException) { finallyBlock = instr->AsBranchInstr()->GetTarget()->GetBasicBlock(); predBlock = this->currentBlock; } else { Assert(instr->m_opcode == Js::OpCode::BrOnNoException); IR::Instr * nextInstr = instr->GetNextRealInstrOrLabel(); Assert(nextInstr->IsLabelInstr()); IR::LabelInstr * nextLabel = nextInstr->AsLabelInstr(); if (nextLabel->GetRegion() && nextLabel->GetRegion()->GetType() == RegionTypeFinally) { finallyBlock = nextLabel->GetBasicBlock(); predBlock = this->currentBlock; } else { if (!(nextLabel->m_next->IsBranchInstr() && nextLabel->m_next->AsBranchInstr()->IsUnconditional())) { return false; } BasicBlock * nextBlock = nextLabel->GetBasicBlock(); IR::BranchInstr * branchTofinallyBlockOrEarlyExit = nextLabel->m_next->AsBranchInstr(); IR::LabelInstr * finallyBlockLabelOrEarlyExitLabel = branchTofinallyBlockOrEarlyExit->GetTarget(); finallyBlock = finallyBlockLabelOrEarlyExitLabel->GetBasicBlock(); predBlock = nextBlock; } } Assert(finallyBlock && predBlock); if (this->func->m_fg->FindEdge(predBlock, finallyBlock)) { predBlock->RemoveDeadSucc(finallyBlock, this->func->m_fg); if (instr->m_opcode == Js::OpCode::BrOnException) { this->currentBlock->RemoveInstr(instr); } if (finallyBlock->GetFirstInstr()->AsLabelInstr()->IsUnreferenced()) { // Traverse predBlocks of finallyBlock, if any of the preds have a different region, set m_hasNonBranchRef to true // If not, this label can get eliminated and an incorrect region from the predecessor can get propagated in lowered code // See test3() in tryfinallytests.js Region * finallyRegion = finallyBlock->GetFirstInstr()->AsLabelInstr()->GetRegion(); FOREACH_PREDECESSOR_BLOCK(pred, finallyBlock) { Region * predRegion = pred->GetFirstInstr()->AsLabelInstr()->GetRegion(); if (predRegion != finallyRegion) { finallyBlock->GetFirstInstr()->AsLabelInstr()->m_hasNonBranchRef = true; } } NEXT_PREDECESSOR_BLOCK; } if (predBlock == this->currentBlock) { predBlock->DecrementDataUseCount(); } } return true; } IR::Instr * GlobOpt::OptPeep(IR::Instr *instr, Value *src1Val, Value *src2Val) { IR::Opnd *dst, *src1, *src2; if (this->IsLoopPrePass()) { return instr; } switch (instr->m_opcode) { case Js::OpCode::DeadBrEqual: case Js::OpCode::DeadBrRelational: case Js::OpCode::DeadBrSrEqual: src1 = instr->GetSrc1(); src2 = instr->GetSrc2(); // These branches were turned into dead branches because they were unnecessary (branch to next, ...). // The DeadBr are necessary in case the evaluation of the sources have side-effects. // If we know for sure the srcs are primitive or have been type specialized, we don't need these instructions if (((src1Val && src1Val->GetValueInfo()->IsPrimitive()) || (src1->IsRegOpnd() && CurrentBlockData()->IsTypeSpecialized(src1->AsRegOpnd()->m_sym))) && ((src2Val && src2Val->GetValueInfo()->IsPrimitive()) || (src2->IsRegOpnd() && CurrentBlockData()->IsTypeSpecialized(src2->AsRegOpnd()->m_sym)))) { this->CaptureByteCodeSymUses(instr); instr->m_opcode = Js::OpCode::Nop; } break; case Js::OpCode::DeadBrOnHasProperty: src1 = instr->GetSrc1(); if (((src1Val && src1Val->GetValueInfo()->IsPrimitive()) || (src1->IsRegOpnd() && CurrentBlockData()->IsTypeSpecialized(src1->AsRegOpnd()->m_sym)))) { this->CaptureByteCodeSymUses(instr); instr->m_opcode = Js::OpCode::Nop; } break; case Js::OpCode::Ld_A: case Js::OpCode::Ld_I4: src1 = instr->GetSrc1(); dst = instr->GetDst(); if (dst->IsRegOpnd() && dst->IsEqual(src1)) { dst = instr->UnlinkDst(); if (!dst->GetIsJITOptimizedReg()) { IR::ByteCodeUsesInstr *bytecodeUse = IR::ByteCodeUsesInstr::New(instr); bytecodeUse->SetDst(dst); instr->InsertAfter(bytecodeUse); } instr->FreeSrc1(); instr->m_opcode = Js::OpCode::Nop; } break; } return instr; } void GlobOpt::OptimizeIndirUses(IR::IndirOpnd *indirOpnd, IR::Instr * *pInstr, Value **indirIndexValRef) { IR::Instr * &instr = *pInstr; Assert(!indirIndexValRef || !*indirIndexValRef); // Update value types and copy-prop the base OptSrc(indirOpnd->GetBaseOpnd(), &instr, nullptr, indirOpnd); IR::RegOpnd *indexOpnd = indirOpnd->GetIndexOpnd(); if (!indexOpnd) { return; } // Update value types and copy-prop the index Value *indexVal = OptSrc(indexOpnd, &instr, nullptr, indirOpnd); if(indirIndexValRef) { *indirIndexValRef = indexVal; } } bool GlobOpt::IsPREInstrCandidateLoad(Js::OpCode opcode) { switch (opcode) { case Js::OpCode::LdFld: case Js::OpCode::LdFldForTypeOf: case Js::OpCode::LdRootFld: case Js::OpCode::LdRootFldForTypeOf: case Js::OpCode::LdMethodFld: case Js::OpCode::LdRootMethodFld: case Js::OpCode::LdSlot: case Js::OpCode::LdSlotArr: return true; } return false; } bool GlobOpt::IsPREInstrCandidateStore(Js::OpCode opcode) { switch (opcode) { case Js::OpCode::StFld: case Js::OpCode::StRootFld: case Js::OpCode::StSlot: return true; } return false; } bool GlobOpt::ImplicitCallFlagsAllowOpts(Loop *loop) { return loop->GetImplicitCallFlags() != Js::ImplicitCall_HasNoInfo && (((loop->GetImplicitCallFlags() & ~Js::ImplicitCall_Accessor) | Js::ImplicitCall_None) == Js::ImplicitCall_None); } bool GlobOpt::ImplicitCallFlagsAllowOpts(Func const *func) { return func->m_fg->implicitCallFlags != Js::ImplicitCall_HasNoInfo && (((func->m_fg->implicitCallFlags & ~Js::ImplicitCall_Accessor) | Js::ImplicitCall_None) == Js::ImplicitCall_None); } #if DBG_DUMP void GlobOpt::Dump() const { this->DumpSymToValueMap(); } void GlobOpt::DumpSymToValueMap(BasicBlock const * block) const { Output::Print(_u("\n*** SymToValueMap ***\n")); block->globOptData.DumpSymToValueMap(); } void GlobOpt::DumpSymToValueMap() const { DumpSymToValueMap(this->currentBlock); } void GlobOpt::DumpSymVal(int index) { SymID id = index; extern Func *CurrentFunc; Sym *sym = this->func->m_symTable->Find(id); AssertMsg(sym, "Sym not found!!!"); Output::Print(_u("Sym: ")); sym->Dump(); Output::Print(_u("\t\tValueNumber: ")); Value * pValue = CurrentBlockData()->FindValueFromMapDirect(sym->m_id); pValue->Dump(); Output::Print(_u("\n")); } void GlobOpt::Trace(BasicBlock * block, bool before) const { bool globOptTrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::GlobOptPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId()); bool typeSpecTrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::TypeSpecPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId()); bool floatTypeSpecTrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::FloatTypeSpecPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId()); bool fieldHoistTrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::FieldHoistPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId()); bool fieldCopyPropTrace = fieldHoistTrace || Js::Configuration::Global.flags.Trace.IsEnabled(Js::FieldCopyPropPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId()); bool objTypeSpecTrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::ObjTypeSpecPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId()); bool valueTableTrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::ValueTablePhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId()); bool fieldPRETrace = Js::Configuration::Global.flags.Trace.IsEnabled(Js::FieldPREPhase, this->func->GetSourceContextId(), this->func->GetLocalFunctionId()); bool anyTrace = globOptTrace || typeSpecTrace || floatTypeSpecTrace || fieldCopyPropTrace || fieldHoistTrace || objTypeSpecTrace || valueTableTrace || fieldPRETrace; if (!anyTrace) { return; } if (fieldPRETrace && this->IsLoopPrePass()) { if (block->isLoopHeader && before) { Output::Print(_u("==== Loop Prepass block header #%-3d, Visiting Loop block head #%-3d\n"), this->prePassLoop->GetHeadBlock()->GetBlockNum(), block->GetBlockNum()); } } if (!typeSpecTrace && !floatTypeSpecTrace && !valueTableTrace && !Js::Configuration::Global.flags.Verbose) { return; } if (before) { Output::Print(_u("========================================================================\n")); Output::Print(_u("Begin OptBlock: Block #%-3d"), block->GetBlockNum()); if (block->loop) { Output::Print(_u(" Loop block header:%-3d currentLoop block head:%-3d %s"), block->loop->GetHeadBlock()->GetBlockNum(), this->prePassLoop ? this->prePassLoop->GetHeadBlock()->GetBlockNum() : 0, this->IsLoopPrePass() ? _u("PrePass") : _u("")); } Output::Print(_u("\n")); } else { Output::Print(_u("-----------------------------------------------------------------------\n")); Output::Print(_u("After OptBlock: Block #%-3d\n"), block->GetBlockNum()); } if ((typeSpecTrace || floatTypeSpecTrace) && !block->globOptData.liveVarSyms->IsEmpty()) { Output::Print(_u(" Live var syms: ")); block->globOptData.liveVarSyms->Dump(); } if (typeSpecTrace && !block->globOptData.liveInt32Syms->IsEmpty()) { Assert(this->tempBv->IsEmpty()); this->tempBv->Minus(block->globOptData.liveInt32Syms, block->globOptData.liveLossyInt32Syms); if(!this->tempBv->IsEmpty()) { Output::Print(_u(" Int32 type specialized (lossless) syms: ")); this->tempBv->Dump(); } this->tempBv->ClearAll(); if(!block->globOptData.liveLossyInt32Syms->IsEmpty()) { Output::Print(_u(" Int32 converted (lossy) syms: ")); block->globOptData.liveLossyInt32Syms->Dump(); } } if (floatTypeSpecTrace && !block->globOptData.liveFloat64Syms->IsEmpty()) { Output::Print(_u(" Float64 type specialized syms: ")); block->globOptData.liveFloat64Syms->Dump(); } if ((fieldCopyPropTrace || objTypeSpecTrace) && this->DoFieldCopyProp(block->loop) && !block->globOptData.liveFields->IsEmpty()) { Output::Print(_u(" Live field syms: ")); block->globOptData.liveFields->Dump(); } if ((fieldHoistTrace || objTypeSpecTrace) && this->DoFieldHoisting(block->loop) && HasHoistableFields(block)) { Output::Print(_u(" Hoistable field sym: ")); block->globOptData.hoistableFields->Dump(); } if (objTypeSpecTrace || valueTableTrace) { Output::Print(_u(" Value table:\n")); block->globOptData.DumpSymToValueMap(); } if (before) { Output::Print(_u("-----------------------------------------------------------------------\n")); \ } Output::Flush(); } void GlobOpt::TraceSettings() const { Output::Print(_u("GlobOpt Settings:\r\n")); Output::Print(_u(" FloatTypeSpec: %s\r\n"), this->DoFloatTypeSpec() ? _u("enabled") : _u("disabled")); Output::Print(_u(" AggressiveIntTypeSpec: %s\r\n"), this->DoAggressiveIntTypeSpec() ? _u("enabled") : _u("disabled")); Output::Print(_u(" LossyIntTypeSpec: %s\r\n"), this->DoLossyIntTypeSpec() ? _u("enabled") : _u("disabled")); Output::Print(_u(" ArrayCheckHoist: %s\r\n"), this->func->IsArrayCheckHoistDisabled() ? _u("disabled") : _u("enabled")); Output::Print(_u(" ImplicitCallFlags: %s\r\n"), Js::DynamicProfileInfo::GetImplicitCallFlagsString(this->func->m_fg->implicitCallFlags)); for (Loop * loop = this->func->m_fg->loopList; loop != NULL; loop = loop->next) { Output::Print(_u(" loop: %d, ImplicitCallFlags: %s\r\n"), loop->GetLoopNumber(), Js::DynamicProfileInfo::GetImplicitCallFlagsString(loop->GetImplicitCallFlags())); } Output::Flush(); } #endif // DBG_DUMP IR::Instr * GlobOpt::TrackMarkTempObject(IR::Instr * instrStart, IR::Instr * instrLast) { if (!this->func->GetHasMarkTempObjects()) { return instrLast; } IR::Instr * instr = instrStart; IR::Instr * instrEnd = instrLast->m_next; IR::Instr * lastInstr = nullptr; GlobOptBlockData& globOptData = *CurrentBlockData(); do { bool mayNeedBailOnImplicitCallsPreOp = !this->IsLoopPrePass() && instr->HasAnyImplicitCalls() && globOptData.maybeTempObjectSyms != nullptr; if (mayNeedBailOnImplicitCallsPreOp) { IR::Opnd * src1 = instr->GetSrc1(); if (src1) { instr = GenerateBailOutMarkTempObjectIfNeeded(instr, src1, false); IR::Opnd * src2 = instr->GetSrc2(); if (src2) { instr = GenerateBailOutMarkTempObjectIfNeeded(instr, src2, false); } } } IR::Opnd *dst = instr->GetDst(); if (dst) { if (dst->IsRegOpnd()) { TrackTempObjectSyms(instr, dst->AsRegOpnd()); } else if (mayNeedBailOnImplicitCallsPreOp) { instr = GenerateBailOutMarkTempObjectIfNeeded(instr, dst, true); } } lastInstr = instr; instr = instr->m_next; } while (instr != instrEnd); return lastInstr; } void GlobOpt::TrackTempObjectSyms(IR::Instr * instr, IR::RegOpnd * opnd) { // If it is marked as dstIsTempObject, we should have mark temped it, or type specialized it to Ld_I4. Assert(!instr->dstIsTempObject || ObjectTempVerify::CanMarkTemp(instr, nullptr)); GlobOptBlockData& globOptData = *CurrentBlockData(); bool canStoreTemp = false; bool maybeTemp = false; if (OpCodeAttr::TempObjectProducing(instr->m_opcode)) { maybeTemp = instr->dstIsTempObject; // We have to make sure that lower will always generate code to do stack allocation // before we can store any other stack instance onto it. Otherwise, we would not // walk object to box the stack property. canStoreTemp = instr->dstIsTempObject && ObjectTemp::CanStoreTemp(instr); } else if (OpCodeAttr::TempObjectTransfer(instr->m_opcode)) { // Need to check both sources, GetNewScObject has two srcs for transfer. // No need to get var equiv sym here as transfer of type spec value does not transfer a mark temp object. maybeTemp = globOptData.maybeTempObjectSyms && ( (instr->GetSrc1()->IsRegOpnd() && globOptData.maybeTempObjectSyms->Test(instr->GetSrc1()->AsRegOpnd()->m_sym->m_id)) || (instr->GetSrc2() && instr->GetSrc2()->IsRegOpnd() && globOptData.maybeTempObjectSyms->Test(instr->GetSrc2()->AsRegOpnd()->m_sym->m_id))); canStoreTemp = globOptData.canStoreTempObjectSyms && ( (instr->GetSrc1()->IsRegOpnd() && globOptData.canStoreTempObjectSyms->Test(instr->GetSrc1()->AsRegOpnd()->m_sym->m_id)) && (!instr->GetSrc2() || (instr->GetSrc2()->IsRegOpnd() && globOptData.canStoreTempObjectSyms->Test(instr->GetSrc2()->AsRegOpnd()->m_sym->m_id)))); AssertOrFailFast(!canStoreTemp || instr->dstIsTempObject); AssertOrFailFast(!maybeTemp || instr->dstIsTempObject); } // Need to get the var equiv sym as assignment of type specialized sym kill the var sym value anyway. StackSym * sym = opnd->m_sym; if (!sym->IsVar()) { sym = sym->GetVarEquivSym(nullptr); if (sym == nullptr) { return; } } SymID symId = sym->m_id; if (maybeTemp) { // Only var sym should be temp objects Assert(opnd->m_sym == sym); if (globOptData.maybeTempObjectSyms == nullptr) { globOptData.maybeTempObjectSyms = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); } globOptData.maybeTempObjectSyms->Set(symId); if (canStoreTemp) { if (instr->m_opcode == Js::OpCode::NewScObjectLiteral && !this->IsLoopPrePass()) { // For object literal, we install the final type up front. // If there are bailout before we finish initializing all the fields, we need to // zero out the rest if we stack allocate the literal, so that the boxing would not // try to box trash pointer in the properties. // Although object Literal initialization can be done lexically, BailOnNoProfile may cause some path // to disappear. Do it is flow base make it easier to stop propagate those entries. IR::IntConstOpnd * propertyArrayIdOpnd = instr->GetSrc1()->AsIntConstOpnd(); const Js::PropertyIdArray * propIds = instr->m_func->GetJITFunctionBody()->ReadPropertyIdArrayFromAuxData(propertyArrayIdOpnd->AsUint32()); // Duplicates are removed by parser Assert(!propIds->hadDuplicates); if (globOptData.stackLiteralInitFldDataMap == nullptr) { globOptData.stackLiteralInitFldDataMap = JitAnew(alloc, StackLiteralInitFldDataMap, alloc); } else { Assert(!globOptData.stackLiteralInitFldDataMap->ContainsKey(sym)); } StackLiteralInitFldData data = { propIds, 0}; globOptData.stackLiteralInitFldDataMap->AddNew(sym, data); } if (globOptData.canStoreTempObjectSyms == nullptr) { globOptData.canStoreTempObjectSyms = JitAnew(this->alloc, BVSparse<JitArenaAllocator>, this->alloc); } globOptData.canStoreTempObjectSyms->Set(symId); } else if (globOptData.canStoreTempObjectSyms) { globOptData.canStoreTempObjectSyms->Clear(symId); } } else { Assert(!canStoreTemp); if (globOptData.maybeTempObjectSyms) { if (globOptData.canStoreTempObjectSyms) { globOptData.canStoreTempObjectSyms->Clear(symId); } globOptData.maybeTempObjectSyms->Clear(symId); } else { Assert(!globOptData.canStoreTempObjectSyms); } // The symbol is being assigned to, the sym shouldn't still be in the stackLiteralInitFldDataMap Assert(this->IsLoopPrePass() || globOptData.stackLiteralInitFldDataMap == nullptr || globOptData.stackLiteralInitFldDataMap->Count() == 0 || !globOptData.stackLiteralInitFldDataMap->ContainsKey(sym)); } } IR::Instr * GlobOpt::GenerateBailOutMarkTempObjectIfNeeded(IR::Instr * instr, IR::Opnd * opnd, bool isDst) { Assert(opnd); Assert(isDst == (opnd == instr->GetDst())); Assert(opnd != instr->GetDst() || !opnd->IsRegOpnd()); Assert(!this->IsLoopPrePass()); Assert(instr->HasAnyImplicitCalls()); // Only dst reg opnd opcode or ArgOut_A should have dstIsTempObject marked Assert(!isDst || !instr->dstIsTempObject || instr->m_opcode == Js::OpCode::ArgOut_A); // Post-op implicit call shouldn't have installed yet Assert(!instr->HasBailOutInfo() || (instr->GetBailOutKind() & IR::BailOutKindBits) != IR::BailOutOnImplicitCalls); GlobOptBlockData& globOptData = *CurrentBlockData(); Assert(globOptData.maybeTempObjectSyms != nullptr); IR::PropertySymOpnd * propertySymOpnd = nullptr; StackSym * stackSym = ObjectTemp::GetStackSym(opnd, &propertySymOpnd); // It is okay to not get the var equiv sym here, as use of a type specialized sym is not use of the temp object // so no need to add mark temp bailout. // TempObjectSysm doesn't contain any type spec sym, so we will get false here for all type spec sym. if (stackSym && globOptData.maybeTempObjectSyms->Test(stackSym->m_id)) { if (instr->HasBailOutInfo()) { instr->SetBailOutKind(instr->GetBailOutKind() | IR::BailOutMarkTempObject); } else { // On insert the pre op bailout if it is not Direct field access do nothing, don't check the dst yet. // SetTypeCheckBailout will clear this out if it is direct field access. if (isDst || (instr->m_opcode == Js::OpCode::FromVar && !opnd->GetValueType().IsPrimitive()) || propertySymOpnd == nullptr || !propertySymOpnd->IsTypeCheckProtected()) { this->GenerateBailAtOperation(&instr, IR::BailOutMarkTempObject); } } if (!opnd->IsRegOpnd() && (!isDst || (globOptData.canStoreTempObjectSyms && globOptData.canStoreTempObjectSyms->Test(stackSym->m_id)))) { // If this opnd is a dst, that means that the object pointer is a stack object, // and we can store temp object/number on it. // If the opnd is a src, that means that the object pointer may be a stack object // so the load may be a temp object/number and we need to track its use. // Don't mark start of indir as can store temp, because we don't actually know // what it is assigning to. if (!isDst || !opnd->IsIndirOpnd()) { opnd->SetCanStoreTemp(); } if (propertySymOpnd) { // Track initfld of stack literals if (isDst && instr->m_opcode == Js::OpCode::InitFld) { const Js::PropertyId propertyId = propertySymOpnd->m_sym->AsPropertySym()->m_propertyId; // We don't need to track numeric properties init if (!this->func->GetThreadContextInfo()->IsNumericProperty(propertyId)) { DebugOnly(bool found = false); globOptData.stackLiteralInitFldDataMap->RemoveIf(stackSym, [&](StackSym * key, StackLiteralInitFldData & data) { DebugOnly(found = true); Assert(key == stackSym); Assert(data.currentInitFldCount < data.propIds->count); if (data.propIds->elements[data.currentInitFldCount] != propertyId) { #if DBG bool duplicate = false; for (uint i = 0; i < data.currentInitFldCount; i++) { if (data.propIds->elements[i] == propertyId) { duplicate = true; break; } } Assert(duplicate); #endif // duplicate initialization return false; } bool finished = (++data.currentInitFldCount == data.propIds->count); #if DBG if (finished) { // We can still track the finished stack literal InitFld lexically. this->finishedStackLiteralInitFld->Set(stackSym->m_id); } #endif return finished; }); // We might still see InitFld even we have finished with all the property Id because // of duplicate entries at the end Assert(found || finishedStackLiteralInitFld->Test(stackSym->m_id)); } } } } } return instr; } LoopCount * GlobOpt::GetOrGenerateLoopCountForMemOp(Loop *loop) { LoopCount *loopCount = loop->loopCount; if (loopCount && !loopCount->HasGeneratedLoopCountSym()) { Assert(loop->bailOutInfo); EnsureBailTarget(loop); GenerateLoopCountPlusOne(loop, loopCount); } return loopCount; } IR::Opnd * GlobOpt::GenerateInductionVariableChangeForMemOp(Loop *loop, byte unroll, IR::Instr *insertBeforeInstr) { LoopCount *loopCount = loop->loopCount; IR::Opnd *sizeOpnd = nullptr; Assert(loopCount); Assert(loop->memOpInfo->inductionVariableOpndPerUnrollMap); if (loop->memOpInfo->inductionVariableOpndPerUnrollMap->TryGetValue(unroll, &sizeOpnd)) { return sizeOpnd; } Func *localFunc = loop->GetFunc(); const auto InsertInstr = [&](IR::Instr *instr) { if (insertBeforeInstr == nullptr) { loop->landingPad->InsertAfter(instr); } else { insertBeforeInstr->InsertBefore(instr); } }; if (loopCount->LoopCountMinusOneSym()) { IRType type = loopCount->LoopCountSym()->GetType(); // Loop count is off by one, so add one IR::RegOpnd *loopCountOpnd = IR::RegOpnd::New(loopCount->LoopCountSym(), type, localFunc); sizeOpnd = loopCountOpnd; if (unroll != 1) { sizeOpnd = IR::RegOpnd::New(TyUint32, this->func); IR::Opnd *unrollOpnd = IR::IntConstOpnd::New(unroll, type, localFunc); InsertInstr(IR::Instr::New(Js::OpCode::Mul_I4, sizeOpnd, loopCountOpnd, unrollOpnd, localFunc)); } } else { uint size = (loopCount->LoopCountMinusOneConstantValue() + 1) * unroll; sizeOpnd = IR::IntConstOpnd::New(size, IRType::TyUint32, localFunc); } loop->memOpInfo->inductionVariableOpndPerUnrollMap->Add(unroll, sizeOpnd); return sizeOpnd; } IR::RegOpnd* GlobOpt::GenerateStartIndexOpndForMemop(Loop *loop, IR::Opnd *indexOpnd, IR::Opnd *sizeOpnd, bool isInductionVariableChangeIncremental, bool bIndexAlreadyChanged, IR::Instr *insertBeforeInstr) { IR::RegOpnd *startIndexOpnd = nullptr; Func *localFunc = loop->GetFunc(); IRType type = indexOpnd->GetType(); const int cacheIndex = ((int)isInductionVariableChangeIncremental << 1) | (int)bIndexAlreadyChanged; if (loop->memOpInfo->startIndexOpndCache[cacheIndex]) { return loop->memOpInfo->startIndexOpndCache[cacheIndex]; } const auto InsertInstr = [&](IR::Instr *instr) { if (insertBeforeInstr == nullptr) { loop->landingPad->InsertAfter(instr); } else { insertBeforeInstr->InsertBefore(instr); } }; startIndexOpnd = IR::RegOpnd::New(type, localFunc); // If the 2 are different we can simply use indexOpnd if (isInductionVariableChangeIncremental != bIndexAlreadyChanged) { InsertInstr(IR::Instr::New(Js::OpCode::Ld_A, startIndexOpnd, indexOpnd, localFunc)); } else { // Otherwise add 1 to it InsertInstr(IR::Instr::New(Js::OpCode::Add_I4, startIndexOpnd, indexOpnd, IR::IntConstOpnd::New(1, type, localFunc, true), localFunc)); } if (!isInductionVariableChangeIncremental) { InsertInstr(IR::Instr::New(Js::OpCode::Sub_I4, startIndexOpnd, startIndexOpnd, sizeOpnd, localFunc)); } loop->memOpInfo->startIndexOpndCache[cacheIndex] = startIndexOpnd; return startIndexOpnd; } IR::Instr* GlobOpt::FindUpperBoundsCheckInstr(IR::Instr* fromInstr) { IR::Instr *upperBoundCheck = fromInstr; do { upperBoundCheck = upperBoundCheck->m_prev; Assert(upperBoundCheck); Assert(!upperBoundCheck->IsLabelInstr()); } while (upperBoundCheck->m_opcode != Js::OpCode::BoundCheck); return upperBoundCheck; } IR::Instr* GlobOpt::FindArraySegmentLoadInstr(IR::Instr* fromInstr) { IR::Instr *headSegmentLengthLoad = fromInstr; do { headSegmentLengthLoad = headSegmentLengthLoad->m_prev; Assert(headSegmentLengthLoad); Assert(!headSegmentLengthLoad->IsLabelInstr()); } while (headSegmentLengthLoad->m_opcode != Js::OpCode::LdIndir); return headSegmentLengthLoad; } void GlobOpt::RemoveMemOpSrcInstr(IR::Instr* memopInstr, IR::Instr* srcInstr, BasicBlock* block) { Assert(srcInstr && (srcInstr->m_opcode == Js::OpCode::LdElemI_A || srcInstr->m_opcode == Js::OpCode::StElemI_A || srcInstr->m_opcode == Js::OpCode::StElemI_A_Strict)); Assert(memopInstr && (memopInstr->m_opcode == Js::OpCode::Memcopy || memopInstr->m_opcode == Js::OpCode::Memset)); Assert(block); const bool isDst = srcInstr->m_opcode == Js::OpCode::StElemI_A || srcInstr->m_opcode == Js::OpCode::StElemI_A_Strict; IR::RegOpnd* opnd = (isDst ? memopInstr->GetDst() : memopInstr->GetSrc1())->AsIndirOpnd()->GetBaseOpnd(); IR::ArrayRegOpnd* arrayOpnd = opnd->IsArrayRegOpnd() ? opnd->AsArrayRegOpnd() : nullptr; IR::Instr* topInstr = srcInstr; if (srcInstr->extractedUpperBoundCheckWithoutHoisting) { IR::Instr *upperBoundCheck = FindUpperBoundsCheckInstr(srcInstr); Assert(upperBoundCheck && upperBoundCheck != srcInstr); topInstr = upperBoundCheck; } if (srcInstr->loadedArrayHeadSegmentLength && arrayOpnd && arrayOpnd->HeadSegmentLengthSym()) { IR::Instr *arrayLoadSegmentHeadLength = FindArraySegmentLoadInstr(topInstr); Assert(arrayLoadSegmentHeadLength); topInstr = arrayLoadSegmentHeadLength; arrayOpnd->RemoveHeadSegmentLengthSym(); } if (srcInstr->loadedArrayHeadSegment && arrayOpnd && arrayOpnd->HeadSegmentSym()) { IR::Instr *arrayLoadSegmentHead = FindArraySegmentLoadInstr(topInstr); Assert(arrayLoadSegmentHead); topInstr = arrayLoadSegmentHead; arrayOpnd->RemoveHeadSegmentSym(); } // If no bounds check are present, simply look up for instruction added for instrumentation if(topInstr == srcInstr) { bool checkPrev = true; while (checkPrev) { switch (topInstr->m_prev->m_opcode) { case Js::OpCode::BailOnNotArray: case Js::OpCode::NoImplicitCallUses: case Js::OpCode::ByteCodeUses: topInstr = topInstr->m_prev; checkPrev = !!topInstr->m_prev; break; default: checkPrev = false; break; } } } while (topInstr != srcInstr) { IR::Instr* removeInstr = topInstr; topInstr = topInstr->m_next; Assert( removeInstr->m_opcode == Js::OpCode::BailOnNotArray || removeInstr->m_opcode == Js::OpCode::NoImplicitCallUses || removeInstr->m_opcode == Js::OpCode::ByteCodeUses || removeInstr->m_opcode == Js::OpCode::LdIndir || removeInstr->m_opcode == Js::OpCode::BoundCheck ); if (removeInstr->m_opcode != Js::OpCode::ByteCodeUses) { block->RemoveInstr(removeInstr); } } this->ConvertToByteCodeUses(srcInstr); } void GlobOpt::GetMemOpSrcInfo(Loop* loop, IR::Instr* instr, IR::RegOpnd*& base, IR::RegOpnd*& index, IRType& arrayType) { Assert(instr && (instr->m_opcode == Js::OpCode::LdElemI_A || instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict)); IR::Opnd* arrayOpnd = instr->m_opcode == Js::OpCode::LdElemI_A ? instr->GetSrc1() : instr->GetDst(); Assert(arrayOpnd->IsIndirOpnd()); IR::IndirOpnd* indirArrayOpnd = arrayOpnd->AsIndirOpnd(); IR::RegOpnd* baseOpnd = (IR::RegOpnd*)indirArrayOpnd->GetBaseOpnd(); IR::RegOpnd* indexOpnd = (IR::RegOpnd*)indirArrayOpnd->GetIndexOpnd(); Assert(baseOpnd); Assert(indexOpnd); // Process Out Params base = baseOpnd; index = indexOpnd; arrayType = indirArrayOpnd->GetType(); } void GlobOpt::EmitMemop(Loop * loop, LoopCount *loopCount, const MemOpEmitData* emitData) { Assert(emitData); Assert(emitData->candidate); Assert(emitData->stElemInstr); Assert(emitData->stElemInstr->m_opcode == Js::OpCode::StElemI_A || emitData->stElemInstr->m_opcode == Js::OpCode::StElemI_A_Strict); IR::BailOutKind bailOutKind = emitData->bailOutKind; const byte unroll = emitData->inductionVar.unroll; Assert(unroll == 1); const bool isInductionVariableChangeIncremental = emitData->inductionVar.isIncremental; const bool bIndexAlreadyChanged = emitData->candidate->bIndexAlreadyChanged; IR::RegOpnd *baseOpnd = nullptr; IR::RegOpnd *indexOpnd = nullptr; IRType dstType; GetMemOpSrcInfo(loop, emitData->stElemInstr, baseOpnd, indexOpnd, dstType); Func *localFunc = loop->GetFunc(); // Handle bailout info EnsureBailTarget(loop); Assert(bailOutKind != IR::BailOutInvalid); // Keep only Array bits bailOuts. Consider handling these bailouts instead of simply ignoring them bailOutKind &= IR::BailOutForArrayBits; // Add our custom bailout to handle Op_MemCopy return value. bailOutKind |= IR::BailOutOnMemOpError; BailOutInfo *const bailOutInfo = loop->bailOutInfo; Assert(bailOutInfo); IR::Instr *insertBeforeInstr = bailOutInfo->bailOutInstr; Assert(insertBeforeInstr); IR::Opnd *sizeOpnd = GenerateInductionVariableChangeForMemOp(loop, unroll, insertBeforeInstr); IR::RegOpnd *startIndexOpnd = GenerateStartIndexOpndForMemop(loop, indexOpnd, sizeOpnd, isInductionVariableChangeIncremental, bIndexAlreadyChanged, insertBeforeInstr); IR::IndirOpnd* dstOpnd = IR::IndirOpnd::New(baseOpnd, startIndexOpnd, dstType, localFunc); IR::Opnd *src1; const bool isMemset = emitData->candidate->IsMemSet(); // Get the source according to the memop type if (isMemset) { MemSetEmitData* data = (MemSetEmitData*)emitData; const Loop::MemSetCandidate* candidate = data->candidate->AsMemSet(); if (candidate->srcSym) { IR::RegOpnd* regSrc = IR::RegOpnd::New(candidate->srcSym, candidate->srcSym->GetType(), func); regSrc->SetIsJITOptimizedReg(true); src1 = regSrc; } else { src1 = IR::AddrOpnd::New(candidate->constant.ToVar(localFunc), IR::AddrOpndKindConstantAddress, localFunc); } } else { Assert(emitData->candidate->IsMemCopy()); MemCopyEmitData* data = (MemCopyEmitData*)emitData; Assert(data->ldElemInstr); Assert(data->ldElemInstr->m_opcode == Js::OpCode::LdElemI_A); IR::RegOpnd *srcBaseOpnd = nullptr; IR::RegOpnd *srcIndexOpnd = nullptr; IRType srcType; GetMemOpSrcInfo(loop, data->ldElemInstr, srcBaseOpnd, srcIndexOpnd, srcType); Assert(GetVarSymID(srcIndexOpnd->GetStackSym()) == GetVarSymID(indexOpnd->GetStackSym())); src1 = IR::IndirOpnd::New(srcBaseOpnd, startIndexOpnd, srcType, localFunc); } // Generate memcopy IR::Instr* memopInstr = IR::BailOutInstr::New(isMemset ? Js::OpCode::Memset : Js::OpCode::Memcopy, bailOutKind, bailOutInfo, localFunc); memopInstr->SetDst(dstOpnd); memopInstr->SetSrc1(src1); memopInstr->SetSrc2(sizeOpnd); insertBeforeInstr->InsertBefore(memopInstr); #if DBG_DUMP if (DO_MEMOP_TRACE()) { char valueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; baseOpnd->GetValueType().ToString(valueTypeStr); const int loopCountBufSize = 16; char16 loopCountBuf[loopCountBufSize]; if (loopCount->LoopCountMinusOneSym()) { swprintf_s(loopCountBuf, _u("s%u"), loopCount->LoopCountMinusOneSym()->m_id); } else { swprintf_s(loopCountBuf, _u("%u"), loopCount->LoopCountMinusOneConstantValue() + 1); } if (isMemset) { const Loop::MemSetCandidate* candidate = emitData->candidate->AsMemSet(); const int constBufSize = 32; char16 constBuf[constBufSize]; if (candidate->srcSym) { swprintf_s(constBuf, _u("s%u"), candidate->srcSym->m_id); } else { switch (candidate->constant.type) { case TyInt8: case TyInt16: case TyInt32: case TyInt64: swprintf_s(constBuf, sizeof(IntConstType) == 8 ? _u("%lld") : _u("%d"), candidate->constant.u.intConst.value); break; case TyFloat32: case TyFloat64: swprintf_s(constBuf, _u("%.4f"), candidate->constant.u.floatConst.value); break; case TyVar: swprintf_s(constBuf, sizeof(Js::Var) == 8 ? _u("0x%.16llX") : _u("0x%.8X"), candidate->constant.u.varConst.value); break; default: AssertMsg(false, "Unsupported constant type"); swprintf_s(constBuf, _u("Unknown")); break; } } TRACE_MEMOP_PHASE(MemSet, loop, emitData->stElemInstr, _u("ValueType: %S, Base: s%u, Index: s%u, Constant: %s, LoopCount: %s, IsIndexChangedBeforeUse: %d"), valueTypeStr, candidate->base, candidate->index, constBuf, loopCountBuf, bIndexAlreadyChanged); } else { const Loop::MemCopyCandidate* candidate = emitData->candidate->AsMemCopy(); TRACE_MEMOP_PHASE(MemCopy, loop, emitData->stElemInstr, _u("ValueType: %S, StBase: s%u, Index: s%u, LdBase: s%u, LoopCount: %s, IsIndexChangedBeforeUse: %d"), valueTypeStr, candidate->base, candidate->index, candidate->ldBase, loopCountBuf, bIndexAlreadyChanged); } } #endif RemoveMemOpSrcInstr(memopInstr, emitData->stElemInstr, emitData->block); if (!isMemset) { RemoveMemOpSrcInstr(memopInstr, ((MemCopyEmitData*)emitData)->ldElemInstr, emitData->block); } } bool GlobOpt::InspectInstrForMemSetCandidate(Loop* loop, IR::Instr* instr, MemSetEmitData* emitData, bool& errorInInstr) { Assert(emitData && emitData->candidate && emitData->candidate->IsMemSet()); Loop::MemSetCandidate* candidate = (Loop::MemSetCandidate*)emitData->candidate; if (instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict) { if (instr->GetDst()->IsIndirOpnd() && (GetVarSymID(instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetStackSym()) == candidate->base) && (GetVarSymID(instr->GetDst()->AsIndirOpnd()->GetIndexOpnd()->GetStackSym()) == candidate->index) ) { Assert(instr->IsProfiledInstr()); emitData->stElemInstr = instr; emitData->bailOutKind = instr->GetBailOutKind(); return true; } TRACE_MEMOP_PHASE_VERBOSE(MemSet, loop, instr, _u("Orphan StElemI_A detected")); errorInInstr = true; } else if (instr->m_opcode == Js::OpCode::LdElemI_A) { TRACE_MEMOP_PHASE_VERBOSE(MemSet, loop, instr, _u("Orphan LdElemI_A detected")); errorInInstr = true; } return false; } bool GlobOpt::InspectInstrForMemCopyCandidate(Loop* loop, IR::Instr* instr, MemCopyEmitData* emitData, bool& errorInInstr) { Assert(emitData && emitData->candidate && emitData->candidate->IsMemCopy()); Loop::MemCopyCandidate* candidate = (Loop::MemCopyCandidate*)emitData->candidate; if (instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict) { if ( instr->GetDst()->IsIndirOpnd() && (GetVarSymID(instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetStackSym()) == candidate->base) && (GetVarSymID(instr->GetDst()->AsIndirOpnd()->GetIndexOpnd()->GetStackSym()) == candidate->index) ) { Assert(instr->IsProfiledInstr()); emitData->stElemInstr = instr; emitData->bailOutKind = instr->GetBailOutKind(); // Still need to find the LdElem return false; } TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, instr, _u("Orphan StElemI_A detected")); errorInInstr = true; } else if (instr->m_opcode == Js::OpCode::LdElemI_A) { if ( emitData->stElemInstr && instr->GetSrc1()->IsIndirOpnd() && (GetVarSymID(instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->GetStackSym()) == candidate->ldBase) && (GetVarSymID(instr->GetSrc1()->AsIndirOpnd()->GetIndexOpnd()->GetStackSym()) == candidate->index) ) { Assert(instr->IsProfiledInstr()); emitData->ldElemInstr = instr; ValueType stValueType = emitData->stElemInstr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetValueType(); ValueType ldValueType = emitData->ldElemInstr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->GetValueType(); if (stValueType != ldValueType) { #if DBG_DUMP char16 stValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; stValueType.ToString(stValueTypeStr); char16 ldValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; ldValueType.ToString(ldValueTypeStr); TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, instr, _u("for mismatch in Load(%s) and Store(%s) value type"), ldValueTypeStr, stValueTypeStr); #endif errorInInstr = true; return false; } // We found both instruction for this candidate return true; } TRACE_MEMOP_PHASE_VERBOSE(MemCopy, loop, instr, _u("Orphan LdElemI_A detected")); errorInInstr = true; } return false; } // The caller is responsible to free the memory allocated between inOrderEmitData[iEmitData -> end] bool GlobOpt::ValidateMemOpCandidates(Loop * loop, _Out_writes_(iEmitData) MemOpEmitData** inOrderEmitData, int& iEmitData) { AnalysisAssert(iEmitData == (int)loop->memOpInfo->candidates->Count()); // We iterate over the second block of the loop only. MemOp Works only if the loop has exactly 2 blocks Assert(loop->blockList.HasTwo()); Loop::MemOpList::Iterator iter(loop->memOpInfo->candidates); BasicBlock* bblock = loop->blockList.Head()->next; Loop::MemOpCandidate* candidate = nullptr; MemOpEmitData* emitData = nullptr; // Iterate backward because the list of candidate is reversed FOREACH_INSTR_BACKWARD_IN_BLOCK(instr, bblock) { if (!candidate) { // Time to check next candidate if (!iter.Next()) { // We have been through the whole list of candidates, finish break; } candidate = iter.Data(); if (!candidate) { continue; } // Common check for memset and memcopy Loop::InductionVariableChangeInfo inductionVariableChangeInfo = { 0, 0 }; // Get the inductionVariable changeInfo if (!loop->memOpInfo->inductionVariableChangeInfoMap->TryGetValue(candidate->index, &inductionVariableChangeInfo)) { TRACE_MEMOP_VERBOSE(loop, nullptr, _u("MemOp skipped (s%d): no induction variable"), candidate->base); return false; } if (inductionVariableChangeInfo.unroll != candidate->count) { TRACE_MEMOP_VERBOSE(loop, nullptr, _u("MemOp skipped (s%d): not matching unroll count"), candidate->base); return false; } if (candidate->IsMemSet()) { Assert(!PHASE_OFF(Js::MemSetPhase, this->func)); emitData = JitAnew(this->alloc, MemSetEmitData); } else { Assert(!PHASE_OFF(Js::MemCopyPhase, this->func)); // Specific check for memcopy Assert(candidate->IsMemCopy()); Loop::MemCopyCandidate* memcopyCandidate = candidate->AsMemCopy(); if (memcopyCandidate->base == Js::Constants::InvalidSymID || memcopyCandidate->ldBase == Js::Constants::InvalidSymID || (memcopyCandidate->ldCount != memcopyCandidate->count)) { TRACE_MEMOP_PHASE(MemCopy, loop, nullptr, _u("(s%d): not matching ldElem and stElem"), candidate->base); return false; } emitData = JitAnew(this->alloc, MemCopyEmitData); } Assert(emitData); emitData->block = bblock; emitData->inductionVar = inductionVariableChangeInfo; emitData->candidate = candidate; } bool errorInInstr = false; bool candidateFound = candidate->IsMemSet() ? InspectInstrForMemSetCandidate(loop, instr, (MemSetEmitData*)emitData, errorInInstr) : InspectInstrForMemCopyCandidate(loop, instr, (MemCopyEmitData*)emitData, errorInInstr); if (errorInInstr) { JitAdelete(this->alloc, emitData); return false; } if (candidateFound) { AnalysisAssert(iEmitData > 0); if (iEmitData == 0) { // Explicit for OACR break; } inOrderEmitData[--iEmitData] = emitData; candidate = nullptr; emitData = nullptr; } } NEXT_INSTR_BACKWARD_IN_BLOCK; if (iter.IsValid()) { TRACE_MEMOP(loop, nullptr, _u("Candidates not found in loop while validating")); return false; } return true; } void GlobOpt::ProcessMemOp() { FOREACH_LOOP_IN_FUNC_EDITING(loop, this->func) { if (HasMemOp(loop)) { const int candidateCount = loop->memOpInfo->candidates->Count(); Assert(candidateCount > 0); LoopCount * loopCount = GetOrGenerateLoopCountForMemOp(loop); // If loopCount is not available we can not continue with memop if (!loopCount || !(loopCount->LoopCountMinusOneSym() || loopCount->LoopCountMinusOneConstantValue())) { TRACE_MEMOP(loop, nullptr, _u("MemOp skipped for no loop count")); loop->doMemOp = false; loop->memOpInfo->candidates->Clear(); continue; } // The list is reversed, check them and place them in order in the following array MemOpEmitData** inOrderCandidates = JitAnewArray(this->alloc, MemOpEmitData*, candidateCount); int i = candidateCount; if (ValidateMemOpCandidates(loop, inOrderCandidates, i)) { Assert(i == 0); // Process the valid MemOp candidate in order. for (; i < candidateCount; ++i) { // Emit EmitMemop(loop, loopCount, inOrderCandidates[i]); JitAdelete(this->alloc, inOrderCandidates[i]); } } else { Assert(i != 0); for (; i < candidateCount; ++i) { JitAdelete(this->alloc, inOrderCandidates[i]); } // One of the memop candidates did not validate. Do not emit for this loop. loop->doMemOp = false; loop->memOpInfo->candidates->Clear(); } // Free memory JitAdeleteArray(this->alloc, candidateCount, inOrderCandidates); } } NEXT_LOOP_EDITING; }
741,483
219,541
// wp_convertible_test.cpp // // Copyright (c) 2008 Peter Dimov // // SPDX-License-Identifier: BSL-1.0 // Distributed under the Boost Software License, Version 1.0. // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt #include <pika/local/config.hpp> #include <pika/modules/memory.hpp> #include <pika/modules/testing.hpp> // struct W { }; void intrusive_ptr_add_ref(W*) {} void intrusive_ptr_release(W*) {} struct X : public virtual W { }; struct Y : public virtual W { }; struct Z : public X { }; int f(pika::intrusive_ptr<X>) { return 1; } int f(pika::intrusive_ptr<Y>) { return 2; } int main() { PIKA_TEST_EQ(1, f(pika::intrusive_ptr<Z>())); return pika::util::report_errors(); }
754
327
// Tags: DP #include <bits/stdc++.h> #define ll long long #define IO ios_base::sync_with_stdio(0);cin.tie(0);cout.tie(0) using namespace std; const int N = 205; int k, a, b, dp[N][N]; string s; int solve(int i, int l) { if(i == s.size()) return l == k; if(l == k) return 0; if(dp[i][l] != -1) return dp[i][l]; dp[i][l] = 0; for(int j = a; j <= b; j++) if(i + j <= s.size()) dp[i][l] |= solve(i + j, l + 1); return dp[i][l]; } void build(int i, int l) { if(i == s.size()) return; if(l == k) return; for(int j = a; j <= b; j++) { if(i + j > s.size()) break; if(!solve(i + j, l + 1)) continue; cout << s.substr(i, j) << '\n'; build(i + j, l + 1); return; } } int main() { IO; memset(dp, -1, sizeof dp); cin >> k >> a >> b >> s; if(solve(0, 0)) build(0, 0); else cout << "No solution"; }
1,022
464
/*============================================================================= Copyright (c) 2009 Hartmut Kaiser Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) ==============================================================================*/ #if !defined(BOOST_FUSION_NVIEW_DISTANCE_IMPL_SEP_23_2009_0328PM) #define BOOST_FUSION_NVIEW_DISTANCE_IMPL_SEP_23_2009_0328PM #include <boost/fusion/iterator/distance.hpp> namespace boost { namespace fusion { struct nview_iterator_tag; namespace extension { template<typename Tag> struct distance_impl; template<> struct distance_impl<nview_iterator_tag> { template<typename First, typename Last> struct apply : result_of::distance<typename First::first_type, typename Last::first_type> { typedef typename result_of::distance< typename First::first_type, typename Last::first_type >::type type; static type call(First const& /*first*/, Last const& /*last*/) { return type(); } }; }; } }} #endif
1,321
383
#include "fipch.h" #include "Buffer.h" #include "Renderer.h" #include "Platform/OpenGL/OpenGLBuffer.h" namespace Flick { ///////////////////Index Buffer/////////////////// VertexBuffer* VertexBuffer::Create(float* verticies, uint32_t size) { switch (Renderer::GetAPI()) { case RendererAPI::API::None: FI_CORE_ASSERT(false, "RendererAPi::None is not yet supported by Flick!"); return nullptr; case RendererAPI::API::OpenGL: return new OpenGLVertexBuffer(verticies, size); } FI_CORE_ASSERT(false, "Unknown RendererAPI!"); return nullptr; } ///////////////////Index Buffer/////////////////// IndexBuffer* IndexBuffer::Create(uint32_t* indicies, uint32_t count) { switch (Renderer::GetAPI()) { case RendererAPI::API::None: FI_CORE_ASSERT(false, "RendererAPi::None is not yet supported by Flick!"); return nullptr; case RendererAPI::API::OpenGL: return new OpenGLIndexBuffer(indicies, count); } FI_CORE_ASSERT(false, "Unknown RendererAPI!"); return nullptr; } }
1,000
353
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/password_manager/core/browser/login_database.h" #include <stddef.h> #include <stdint.h> #include <memory> #include "base/files/file_util.h" #include "base/files/scoped_temp_dir.h" #include "base/memory/scoped_vector.h" #include "base/path_service.h" #include "base/strings/string_number_conversions.h" #include "base/strings/stringprintf.h" #include "base/strings/utf_string_conversions.h" #include "base/test/histogram_tester.h" #include "base/time/time.h" #include "build/build_config.h" #include "components/autofill/core/common/password_form.h" #include "components/password_manager/core/browser/psl_matching_helper.h" #include "sql/connection.h" #include "sql/statement.h" #include "sql/test/test_helpers.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" #include "url/origin.h" #if defined(OS_MACOSX) #include "components/os_crypt/os_crypt.h" #endif using autofill::PasswordForm; using base::ASCIIToUTF16; using ::testing::Eq; namespace password_manager { namespace { PasswordStoreChangeList AddChangeForForm(const PasswordForm& form) { return PasswordStoreChangeList( 1, PasswordStoreChange(PasswordStoreChange::ADD, form)); } PasswordStoreChangeList UpdateChangeForForm(const PasswordForm& form) { return PasswordStoreChangeList( 1, PasswordStoreChange(PasswordStoreChange::UPDATE, form)); } void GenerateExamplePasswordForm(PasswordForm* form) { form->origin = GURL("http://accounts.google.com/LoginAuth"); form->action = GURL("http://accounts.google.com/Login"); form->username_element = ASCIIToUTF16("Email"); form->username_value = ASCIIToUTF16("test@gmail.com"); form->password_element = ASCIIToUTF16("Passwd"); form->password_value = ASCIIToUTF16("test"); form->submit_element = ASCIIToUTF16("signIn"); form->signon_realm = "http://www.google.com/"; form->ssl_valid = false; form->preferred = false; form->scheme = PasswordForm::SCHEME_HTML; form->times_used = 1; form->form_data.name = ASCIIToUTF16("form_name"); form->date_synced = base::Time::Now(); form->display_name = ASCIIToUTF16("Mr. Smith"); form->icon_url = GURL("https://accounts.google.com/Icon"); form->federation_origin = url::Origin(GURL("https://accounts.google.com/")); form->skip_zero_click = true; } // Helper functions to read the value of the first column of an executed // statement if we know its type. You must implement a specialization for // every column type you use. template<class T> struct must_be_specialized { static const bool is_specialized = false; }; template<class T> T GetFirstColumn(const sql::Statement& s) { static_assert(must_be_specialized<T>::is_specialized, "Implement a specialization."); } template<> int64_t GetFirstColumn(const sql::Statement& s) { return s.ColumnInt64(0); }; template<> std::string GetFirstColumn(const sql::Statement& s) { return s.ColumnString(0); }; bool AddZeroClickableLogin(LoginDatabase* db, const std::string& unique_string) { // Example password form. PasswordForm form; form.origin = GURL("https://example.com/"); form.username_element = ASCIIToUTF16(unique_string); form.username_value = ASCIIToUTF16(unique_string); form.password_element = ASCIIToUTF16(unique_string); form.submit_element = ASCIIToUTF16("signIn"); form.signon_realm = form.origin.spec(); form.display_name = ASCIIToUTF16(unique_string); form.icon_url = GURL("https://example.com/"); form.federation_origin = url::Origin(GURL("https://example.com/")); form.date_created = base::Time::Now(); form.skip_zero_click = false; return db->AddLogin(form) == AddChangeForForm(form); } } // namespace // Serialization routines for vectors implemented in login_database.cc. base::Pickle SerializeVector(const std::vector<base::string16>& vec); std::vector<base::string16> DeserializeVector(const base::Pickle& pickle); class LoginDatabaseTest : public testing::Test { protected: void SetUp() override { ASSERT_TRUE(temp_dir_.CreateUniqueTempDir()); file_ = temp_dir_.path().AppendASCII("TestMetadataStoreMacDatabase"); #if defined(OS_MACOSX) OSCrypt::UseMockKeychain(true); #endif // defined(OS_MACOSX) db_.reset(new LoginDatabase(file_)); ASSERT_TRUE(db_->Init()); } LoginDatabase& db() { return *db_; } void TestNonHTMLFormPSLMatching(const PasswordForm::Scheme& scheme) { ScopedVector<autofill::PasswordForm> result; base::Time now = base::Time::Now(); // Simple non-html auth form. PasswordForm non_html_auth; non_html_auth.origin = GURL("http://example.com"); non_html_auth.username_value = ASCIIToUTF16("test@gmail.com"); non_html_auth.password_value = ASCIIToUTF16("test"); non_html_auth.signon_realm = "http://example.com/Realm"; non_html_auth.scheme = scheme; non_html_auth.date_created = now; // Simple password form. PasswordForm html_form(non_html_auth); html_form.action = GURL("http://example.com/login"); html_form.username_element = ASCIIToUTF16("username"); html_form.username_value = ASCIIToUTF16("test2@gmail.com"); html_form.password_element = ASCIIToUTF16("password"); html_form.submit_element = ASCIIToUTF16(""); html_form.signon_realm = "http://example.com/"; html_form.scheme = PasswordForm::SCHEME_HTML; html_form.date_created = now; // Add them and make sure they are there. EXPECT_EQ(AddChangeForForm(non_html_auth), db().AddLogin(non_html_auth)); EXPECT_EQ(AddChangeForForm(html_form), db().AddLogin(html_form)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(2U, result.size()); result.clear(); PasswordForm second_non_html_auth(non_html_auth); second_non_html_auth.origin = GURL("http://second.example.com"); second_non_html_auth.signon_realm = "http://second.example.com/Realm"; // This shouldn't match anything. EXPECT_TRUE(db().GetLogins(second_non_html_auth, &result)); EXPECT_EQ(0U, result.size()); // non-html auth still matches against itself. EXPECT_TRUE(db().GetLogins(non_html_auth, &result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(result[0]->signon_realm, "http://example.com/Realm"); // Clear state. db().RemoveLoginsCreatedBetween(now, base::Time()); } // Checks that a form of a given |scheme|, once stored, can be successfully // retrieved from the database. void TestRetrievingIPAddress(const PasswordForm::Scheme& scheme) { SCOPED_TRACE(testing::Message() << "scheme = " << scheme); ScopedVector<autofill::PasswordForm> result; base::Time now = base::Time::Now(); std::string origin("http://56.7.8.90"); PasswordForm ip_form; ip_form.origin = GURL(origin); ip_form.username_value = ASCIIToUTF16("test@gmail.com"); ip_form.password_value = ASCIIToUTF16("test"); ip_form.signon_realm = origin; ip_form.scheme = scheme; ip_form.date_created = now; EXPECT_EQ(AddChangeForForm(ip_form), db().AddLogin(ip_form)); EXPECT_TRUE(db().GetLogins(ip_form, &result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(result[0]->signon_realm, origin); // Clear state. db().RemoveLoginsCreatedBetween(now, base::Time()); } base::ScopedTempDir temp_dir_; base::FilePath file_; std::unique_ptr<LoginDatabase> db_; }; TEST_F(LoginDatabaseTest, Logins) { ScopedVector<autofill::PasswordForm> result; // Verify the database is empty. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(0U, result.size()); // Example password form. PasswordForm form; GenerateExamplePasswordForm(&form); // Add it and make sure it is there and that all the fields were retrieved // correctly. EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(form, *result[0]); result.clear(); // Match against an exact copy. EXPECT_TRUE(db().GetLogins(form, &result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(form, *result[0]); result.clear(); // The example site changes... PasswordForm form2(form); form2.origin = GURL("http://www.google.com/new/accounts/LoginAuth"); form2.submit_element = ASCIIToUTF16("reallySignIn"); // Match against an inexact copy EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // Uh oh, the site changed origin & action URLs all at once! PasswordForm form3(form2); form3.action = GURL("http://www.google.com/new/accounts/Login"); // signon_realm is the same, should match. EXPECT_TRUE(db().GetLogins(form3, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // Imagine the site moves to a secure server for login. PasswordForm form4(form3); form4.signon_realm = "https://www.google.com/"; form4.ssl_valid = true; // We have only an http record, so no match for this. EXPECT_TRUE(db().GetLogins(form4, &result)); EXPECT_EQ(0U, result.size()); // Let's imagine the user logs into the secure site. EXPECT_EQ(AddChangeForForm(form4), db().AddLogin(form4)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(2U, result.size()); result.clear(); // Now the match works EXPECT_TRUE(db().GetLogins(form4, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // The user chose to forget the original but not the new. EXPECT_TRUE(db().RemoveLogin(form)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(1U, result.size()); result.clear(); // The old form wont match the new site (http vs https). EXPECT_TRUE(db().GetLogins(form, &result)); EXPECT_EQ(0U, result.size()); // The user's request for the HTTPS site is intercepted // by an attacker who presents an invalid SSL cert. PasswordForm form5(form4); form5.ssl_valid = 0; // It will match in this case. EXPECT_TRUE(db().GetLogins(form5, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // User changes his password. PasswordForm form6(form5); form6.password_value = ASCIIToUTF16("test6"); form6.preferred = true; // We update, and check to make sure it matches the // old form, and there is only one record. EXPECT_EQ(UpdateChangeForForm(form6), db().UpdateLogin(form6)); // matches EXPECT_TRUE(db().GetLogins(form5, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // Only one record. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(1U, result.size()); // Password element was updated. EXPECT_EQ(form6.password_value, result[0]->password_value); // Preferred login. EXPECT_TRUE(form6.preferred); result.clear(); // Make sure everything can disappear. EXPECT_TRUE(db().RemoveLogin(form4)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(0U, result.size()); } TEST_F(LoginDatabaseTest, TestPublicSuffixDomainMatching) { ScopedVector<autofill::PasswordForm> result; // Verify the database is empty. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(0U, result.size()); // Example password form. PasswordForm form; form.origin = GURL("https://foo.com/"); form.action = GURL("https://foo.com/login"); form.username_element = ASCIIToUTF16("username"); form.username_value = ASCIIToUTF16("test@gmail.com"); form.password_element = ASCIIToUTF16("password"); form.password_value = ASCIIToUTF16("test"); form.submit_element = ASCIIToUTF16(""); form.signon_realm = "https://foo.com/"; form.ssl_valid = true; form.preferred = false; form.scheme = PasswordForm::SCHEME_HTML; // Add it and make sure it is there. EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(1U, result.size()); result.clear(); // Match against an exact copy. EXPECT_TRUE(db().GetLogins(form, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // We go to the mobile site. PasswordForm form2(form); form2.origin = GURL("https://mobile.foo.com/"); form2.action = GURL("https://mobile.foo.com/login"); form2.signon_realm = "https://mobile.foo.com/"; // Match against the mobile site. EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); EXPECT_EQ("https://foo.com/", result[0]->signon_realm); EXPECT_TRUE(result[0]->is_public_suffix_match); // Try to remove PSL matched form EXPECT_FALSE(db().RemoveLogin(*result[0])); result.clear(); // Ensure that the original form is still there EXPECT_TRUE(db().GetLogins(form, &result)); EXPECT_EQ(1U, result.size()); result.clear(); } TEST_F(LoginDatabaseTest, TestFederatedMatching) { ScopedVector<autofill::PasswordForm> result; // Example password form. PasswordForm form; form.origin = GURL("https://foo.com/"); form.action = GURL("https://foo.com/login"); form.username_value = ASCIIToUTF16("test@gmail.com"); form.password_value = ASCIIToUTF16("test"); form.signon_realm = "https://foo.com/"; form.ssl_valid = true; form.preferred = false; form.scheme = PasswordForm::SCHEME_HTML; // We go to the mobile site. PasswordForm form2(form); form2.origin = GURL("https://mobile.foo.com/"); form2.action = GURL("https://mobile.foo.com/login"); form2.signon_realm = "federation://mobile.foo.com/accounts.google.com"; form2.username_value = ASCIIToUTF16("test1@gmail.com"); form2.type = autofill::PasswordForm::TYPE_API; form2.federation_origin = url::Origin(GURL("https://accounts.google.com/")); // Add it and make sure it is there. EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); EXPECT_EQ(AddChangeForForm(form2), db().AddLogin(form2)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(2U, result.size()); // Match against desktop. PasswordForm form_request; form_request.origin = GURL("https://foo.com/"); form_request.signon_realm = "https://foo.com/"; form_request.scheme = PasswordForm::SCHEME_HTML; EXPECT_TRUE(db().GetLogins(form_request, &result)); EXPECT_THAT(result, testing::ElementsAre(testing::Pointee(form))); // Match against the mobile site. form_request.origin = GURL("https://mobile.foo.com/"); form_request.signon_realm = "https://mobile.foo.com/"; EXPECT_TRUE(db().GetLogins(form_request, &result)); form.is_public_suffix_match = true; EXPECT_THAT(result, testing::UnorderedElementsAre(testing::Pointee(form), testing::Pointee(form2))); } TEST_F(LoginDatabaseTest, TestPublicSuffixDisabledForNonHTMLForms) { TestNonHTMLFormPSLMatching(PasswordForm::SCHEME_BASIC); TestNonHTMLFormPSLMatching(PasswordForm::SCHEME_DIGEST); TestNonHTMLFormPSLMatching(PasswordForm::SCHEME_OTHER); } TEST_F(LoginDatabaseTest, TestIPAddressMatches_HTML) { TestRetrievingIPAddress(PasswordForm::SCHEME_HTML); } TEST_F(LoginDatabaseTest, TestIPAddressMatches_basic) { TestRetrievingIPAddress(PasswordForm::SCHEME_BASIC); } TEST_F(LoginDatabaseTest, TestIPAddressMatches_digest) { TestRetrievingIPAddress(PasswordForm::SCHEME_DIGEST); } TEST_F(LoginDatabaseTest, TestIPAddressMatches_other) { TestRetrievingIPAddress(PasswordForm::SCHEME_OTHER); } TEST_F(LoginDatabaseTest, TestPublicSuffixDomainMatchingShouldMatchingApply) { ScopedVector<autofill::PasswordForm> result; // Verify the database is empty. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(0U, result.size()); // Example password form. PasswordForm form; form.origin = GURL("https://accounts.google.com/"); form.action = GURL("https://accounts.google.com/login"); form.username_element = ASCIIToUTF16("username"); form.username_value = ASCIIToUTF16("test@gmail.com"); form.password_element = ASCIIToUTF16("password"); form.password_value = ASCIIToUTF16("test"); form.submit_element = ASCIIToUTF16(""); form.signon_realm = "https://accounts.google.com/"; form.ssl_valid = true; form.preferred = false; form.scheme = PasswordForm::SCHEME_HTML; // Add it and make sure it is there. EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(1U, result.size()); result.clear(); // Match against an exact copy. EXPECT_TRUE(db().GetLogins(form, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // We go to a different site on the same domain where feature is not needed. PasswordForm form2(form); form2.origin = GURL("https://some.other.google.com/"); form2.action = GURL("https://some.other.google.com/login"); form2.signon_realm = "https://some.other.google.com/"; // Match against the other site. Should not match since feature should not be // enabled for this domain. ASSERT_FALSE(ShouldPSLDomainMatchingApply( GetRegistryControlledDomain(GURL(form2.signon_realm)))); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(0U, result.size()); } TEST_F(LoginDatabaseTest, TestFederatedMatchingWithoutPSLMatching) { ScopedVector<autofill::PasswordForm> result; // Example password form. PasswordForm form; form.origin = GURL("https://accounts.google.com/"); form.action = GURL("https://accounts.google.com/login"); form.username_value = ASCIIToUTF16("test@gmail.com"); form.password_value = ASCIIToUTF16("test"); form.signon_realm = "https://accounts.google.com/"; form.ssl_valid = true; form.preferred = false; form.scheme = PasswordForm::SCHEME_HTML; // We go to a different site on the same domain where PSL is disabled. PasswordForm form2(form); form2.origin = GURL("https://some.other.google.com/"); form2.action = GURL("https://some.other.google.com/login"); form2.signon_realm = "federation://some.other.google.com/accounts.google.com"; form2.username_value = ASCIIToUTF16("test1@gmail.com"); form2.type = autofill::PasswordForm::TYPE_API; form2.federation_origin = url::Origin(GURL("https://accounts.google.com/")); // Add it and make sure it is there. EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); EXPECT_EQ(AddChangeForForm(form2), db().AddLogin(form2)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(2U, result.size()); // Match against the first one. PasswordForm form_request; form_request.origin = form.origin; form_request.signon_realm = form.signon_realm; form_request.scheme = PasswordForm::SCHEME_HTML; EXPECT_TRUE(db().GetLogins(form_request, &result)); EXPECT_THAT(result, testing::ElementsAre(testing::Pointee(form))); // Match against the second one. ASSERT_FALSE(ShouldPSLDomainMatchingApply( GetRegistryControlledDomain(GURL(form2.signon_realm)))); form_request.origin = form2.origin; form_request.signon_realm = form2.signon_realm; EXPECT_TRUE(db().GetLogins(form_request, &result)); form.is_public_suffix_match = true; EXPECT_THAT(result, testing::ElementsAre(testing::Pointee(form2))); } // This test fails if the implementation of GetLogins uses GetCachedStatement // instead of GetUniqueStatement, since REGEXP is in use. See // http://crbug.com/248608. TEST_F(LoginDatabaseTest, TestPublicSuffixDomainMatchingDifferentSites) { ScopedVector<autofill::PasswordForm> result; // Verify the database is empty. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(0U, result.size()); // Example password form. PasswordForm form; form.origin = GURL("https://foo.com/"); form.action = GURL("https://foo.com/login"); form.username_element = ASCIIToUTF16("username"); form.username_value = ASCIIToUTF16("test@gmail.com"); form.password_element = ASCIIToUTF16("password"); form.password_value = ASCIIToUTF16("test"); form.submit_element = ASCIIToUTF16(""); form.signon_realm = "https://foo.com/"; form.ssl_valid = true; form.preferred = false; form.scheme = PasswordForm::SCHEME_HTML; // Add it and make sure it is there. EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(1U, result.size()); result.clear(); // Match against an exact copy. EXPECT_TRUE(db().GetLogins(form, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // We go to the mobile site. PasswordForm form2(form); form2.origin = GURL("https://mobile.foo.com/"); form2.action = GURL("https://mobile.foo.com/login"); form2.signon_realm = "https://mobile.foo.com/"; // Match against the mobile site. EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); EXPECT_EQ("https://foo.com/", result[0]->signon_realm); EXPECT_TRUE(result[0]->is_public_suffix_match); result.clear(); // Add baz.com desktop site. form.origin = GURL("https://baz.com/login/"); form.action = GURL("https://baz.com/login/"); form.username_element = ASCIIToUTF16("email"); form.username_value = ASCIIToUTF16("test@gmail.com"); form.password_element = ASCIIToUTF16("password"); form.password_value = ASCIIToUTF16("test"); form.submit_element = ASCIIToUTF16(""); form.signon_realm = "https://baz.com/"; form.ssl_valid = true; form.preferred = false; form.scheme = PasswordForm::SCHEME_HTML; // Add it and make sure it is there. EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(2U, result.size()); result.clear(); // We go to the mobile site of baz.com. PasswordForm form3(form); form3.origin = GURL("https://m.baz.com/login/"); form3.action = GURL("https://m.baz.com/login/"); form3.signon_realm = "https://m.baz.com/"; // Match against the mobile site of baz.com. EXPECT_TRUE(db().GetLogins(form3, &result)); EXPECT_EQ(1U, result.size()); EXPECT_EQ("https://baz.com/", result[0]->signon_realm); EXPECT_TRUE(result[0]->is_public_suffix_match); result.clear(); } PasswordForm GetFormWithNewSignonRealm(PasswordForm form, std::string signon_realm) { PasswordForm form2(form); form2.origin = GURL(signon_realm); form2.action = GURL(signon_realm); form2.signon_realm = signon_realm; return form2; } TEST_F(LoginDatabaseTest, TestPublicSuffixDomainMatchingRegexp) { ScopedVector<autofill::PasswordForm> result; // Verify the database is empty. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(0U, result.size()); // Example password form. PasswordForm form; form.origin = GURL("http://foo.com/"); form.action = GURL("http://foo.com/login"); form.username_element = ASCIIToUTF16("username"); form.username_value = ASCIIToUTF16("test@gmail.com"); form.password_element = ASCIIToUTF16("password"); form.password_value = ASCIIToUTF16("test"); form.submit_element = ASCIIToUTF16(""); form.signon_realm = "http://foo.com/"; form.ssl_valid = false; form.preferred = false; form.scheme = PasswordForm::SCHEME_HTML; // Add it and make sure it is there. EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(1U, result.size()); result.clear(); // Example password form that has - in the domain name. PasswordForm form_dash = GetFormWithNewSignonRealm(form, "http://www.foo-bar.com/"); // Add it and make sure it is there. EXPECT_EQ(AddChangeForForm(form_dash), db().AddLogin(form_dash)); EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(2U, result.size()); result.clear(); // Match against an exact copy. EXPECT_TRUE(db().GetLogins(form, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // www.foo.com should match. PasswordForm form2 = GetFormWithNewSignonRealm(form, "http://www.foo.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // a.b.foo.com should match. form2 = GetFormWithNewSignonRealm(form, "http://a.b.foo.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // a-b.foo.com should match. form2 = GetFormWithNewSignonRealm(form, "http://a-b.foo.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // foo-bar.com should match. form2 = GetFormWithNewSignonRealm(form, "http://foo-bar.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // www.foo-bar.com should match. form2 = GetFormWithNewSignonRealm(form, "http://www.foo-bar.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // a.b.foo-bar.com should match. form2 = GetFormWithNewSignonRealm(form, "http://a.b.foo-bar.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // a-b.foo-bar.com should match. form2 = GetFormWithNewSignonRealm(form, "http://a-b.foo-bar.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(1U, result.size()); result.clear(); // foo.com with port 1337 should not match. form2 = GetFormWithNewSignonRealm(form, "http://foo.com:1337/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(0U, result.size()); // http://foo.com should not match since the scheme is wrong. form2 = GetFormWithNewSignonRealm(form, "https://foo.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(0U, result.size()); // notfoo.com should not match. form2 = GetFormWithNewSignonRealm(form, "http://notfoo.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(0U, result.size()); // baz.com should not match. form2 = GetFormWithNewSignonRealm(form, "http://baz.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(0U, result.size()); // foo-baz.com should not match. form2 = GetFormWithNewSignonRealm(form, "http://foo-baz.com/"); EXPECT_TRUE(db().GetLogins(form2, &result)); EXPECT_EQ(0U, result.size()); } static bool AddTimestampedLogin(LoginDatabase* db, std::string url, const std::string& unique_string, const base::Time& time, bool date_is_creation) { // Example password form. PasswordForm form; form.origin = GURL(url + std::string("/LoginAuth")); form.username_element = ASCIIToUTF16(unique_string); form.username_value = ASCIIToUTF16(unique_string); form.password_element = ASCIIToUTF16(unique_string); form.submit_element = ASCIIToUTF16("signIn"); form.signon_realm = url; form.display_name = ASCIIToUTF16(unique_string); form.icon_url = GURL("https://accounts.google.com/Icon"); form.federation_origin = url::Origin(GURL("https://accounts.google.com/")); form.skip_zero_click = true; if (date_is_creation) form.date_created = time; else form.date_synced = time; return db->AddLogin(form) == AddChangeForForm(form); } TEST_F(LoginDatabaseTest, ClearPrivateData_SavedPasswords) { ScopedVector<autofill::PasswordForm> result; // Verify the database is empty. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(0U, result.size()); base::Time now = base::Time::Now(); base::TimeDelta one_day = base::TimeDelta::FromDays(1); // Create one with a 0 time. EXPECT_TRUE( AddTimestampedLogin(&db(), "http://1.com", "foo1", base::Time(), true)); // Create one for now and +/- 1 day. EXPECT_TRUE( AddTimestampedLogin(&db(), "http://2.com", "foo2", now - one_day, true)); EXPECT_TRUE(AddTimestampedLogin(&db(), "http://3.com", "foo3", now, true)); EXPECT_TRUE( AddTimestampedLogin(&db(), "http://4.com", "foo4", now + one_day, true)); // Verify inserts worked. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(4U, result.size()); result.clear(); // Get everything from today's date and on. EXPECT_TRUE(db().GetLoginsCreatedBetween(now, base::Time(), &result)); EXPECT_EQ(2U, result.size()); result.clear(); // Delete everything from today's date and on. db().RemoveLoginsCreatedBetween(now, base::Time()); // Should have deleted half of what we inserted. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(2U, result.size()); result.clear(); // Delete with 0 date (should delete all). db().RemoveLoginsCreatedBetween(base::Time(), base::Time()); // Verify nothing is left. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(0U, result.size()); } TEST_F(LoginDatabaseTest, RemoveLoginsSyncedBetween) { ScopedVector<autofill::PasswordForm> result; base::Time now = base::Time::Now(); base::TimeDelta one_day = base::TimeDelta::FromDays(1); // Create one with a 0 time. EXPECT_TRUE( AddTimestampedLogin(&db(), "http://1.com", "foo1", base::Time(), false)); // Create one for now and +/- 1 day. EXPECT_TRUE( AddTimestampedLogin(&db(), "http://2.com", "foo2", now - one_day, false)); EXPECT_TRUE(AddTimestampedLogin(&db(), "http://3.com", "foo3", now, false)); EXPECT_TRUE( AddTimestampedLogin(&db(), "http://4.com", "foo4", now + one_day, false)); // Verify inserts worked. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(4U, result.size()); result.clear(); // Get everything from today's date and on. EXPECT_TRUE(db().GetLoginsSyncedBetween(now, base::Time(), &result)); ASSERT_EQ(2U, result.size()); EXPECT_EQ("http://3.com", result[0]->signon_realm); EXPECT_EQ("http://4.com", result[1]->signon_realm); result.clear(); // Delete everything from today's date and on. db().RemoveLoginsSyncedBetween(now, base::Time()); // Should have deleted half of what we inserted. EXPECT_TRUE(db().GetAutofillableLogins(&result)); ASSERT_EQ(2U, result.size()); EXPECT_EQ("http://1.com", result[0]->signon_realm); EXPECT_EQ("http://2.com", result[1]->signon_realm); result.clear(); // Delete with 0 date (should delete all). db().RemoveLoginsSyncedBetween(base::Time(), now); // Verify nothing is left. EXPECT_TRUE(db().GetAutofillableLogins(&result)); EXPECT_EQ(0U, result.size()); } TEST_F(LoginDatabaseTest, GetAutoSignInLogins) { ScopedVector<autofill::PasswordForm> result; EXPECT_TRUE(AddZeroClickableLogin(&db(), "foo1")); EXPECT_TRUE(AddZeroClickableLogin(&db(), "foo2")); EXPECT_TRUE(AddZeroClickableLogin(&db(), "foo3")); EXPECT_TRUE(AddZeroClickableLogin(&db(), "foo4")); EXPECT_TRUE(db().GetAutoSignInLogins(&result)); EXPECT_EQ(4U, result.size()); for (const auto& form : result) EXPECT_FALSE(form->skip_zero_click); EXPECT_TRUE(db().DisableAutoSignInForAllLogins()); EXPECT_TRUE(db().GetAutoSignInLogins(&result)); EXPECT_EQ(0U, result.size()); } TEST_F(LoginDatabaseTest, DisableAutoSignInForAllLogins) { ScopedVector<autofill::PasswordForm> result; EXPECT_TRUE(AddZeroClickableLogin(&db(), "foo1")); EXPECT_TRUE(AddZeroClickableLogin(&db(), "foo2")); EXPECT_TRUE(AddZeroClickableLogin(&db(), "foo3")); EXPECT_TRUE(AddZeroClickableLogin(&db(), "foo4")); EXPECT_TRUE(db().GetAutofillableLogins(&result)); for (const auto& form : result) EXPECT_FALSE(form->skip_zero_click); EXPECT_TRUE(db().DisableAutoSignInForAllLogins()); EXPECT_TRUE(db().GetAutofillableLogins(&result)); for (const auto& form : result) EXPECT_TRUE(form->skip_zero_click); } TEST_F(LoginDatabaseTest, BlacklistedLogins) { ScopedVector<autofill::PasswordForm> result; // Verify the database is empty. EXPECT_TRUE(db().GetBlacklistLogins(&result)); ASSERT_EQ(0U, result.size()); // Save a form as blacklisted. PasswordForm form; form.origin = GURL("http://accounts.google.com/LoginAuth"); form.action = GURL("http://accounts.google.com/Login"); form.username_element = ASCIIToUTF16("Email"); form.password_element = ASCIIToUTF16("Passwd"); form.submit_element = ASCIIToUTF16("signIn"); form.signon_realm = "http://www.google.com/"; form.ssl_valid = false; form.preferred = true; form.blacklisted_by_user = true; form.scheme = PasswordForm::SCHEME_HTML; form.date_synced = base::Time::Now(); form.display_name = ASCIIToUTF16("Mr. Smith"); form.icon_url = GURL("https://accounts.google.com/Icon"); form.federation_origin = url::Origin(GURL("https://accounts.google.com/")); form.skip_zero_click = true; EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); // Get all non-blacklisted logins (should be none). EXPECT_TRUE(db().GetAutofillableLogins(&result)); ASSERT_EQ(0U, result.size()); // GetLogins should give the blacklisted result. EXPECT_TRUE(db().GetLogins(form, &result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(form, *result[0]); result.clear(); // So should GetAllBlacklistedLogins. EXPECT_TRUE(db().GetBlacklistLogins(&result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(form, *result[0]); result.clear(); } TEST_F(LoginDatabaseTest, VectorSerialization) { // Empty vector. std::vector<base::string16> vec; base::Pickle temp = SerializeVector(vec); std::vector<base::string16> output = DeserializeVector(temp); EXPECT_THAT(output, Eq(vec)); // Normal data. vec.push_back(ASCIIToUTF16("first")); vec.push_back(ASCIIToUTF16("second")); vec.push_back(ASCIIToUTF16("third")); temp = SerializeVector(vec); output = DeserializeVector(temp); EXPECT_THAT(output, Eq(vec)); } TEST_F(LoginDatabaseTest, UpdateIncompleteCredentials) { ScopedVector<autofill::PasswordForm> result; // Verify the database is empty. EXPECT_TRUE(db().GetAutofillableLogins(&result)); ASSERT_EQ(0U, result.size()); // Save an incomplete form. Note that it only has a few fields set, ex. it's // missing 'action', 'username_element' and 'password_element'. Such forms // are sometimes inserted during import from other browsers (which may not // store this info). PasswordForm incomplete_form; incomplete_form.origin = GURL("http://accounts.google.com/LoginAuth"); incomplete_form.signon_realm = "http://accounts.google.com/"; incomplete_form.username_value = ASCIIToUTF16("my_username"); incomplete_form.password_value = ASCIIToUTF16("my_password"); incomplete_form.ssl_valid = false; incomplete_form.preferred = true; incomplete_form.blacklisted_by_user = false; incomplete_form.scheme = PasswordForm::SCHEME_HTML; EXPECT_EQ(AddChangeForForm(incomplete_form), db().AddLogin(incomplete_form)); // A form on some website. It should trigger a match with the stored one. PasswordForm encountered_form; encountered_form.origin = GURL("http://accounts.google.com/LoginAuth"); encountered_form.signon_realm = "http://accounts.google.com/"; encountered_form.action = GURL("http://accounts.google.com/Login"); encountered_form.username_element = ASCIIToUTF16("Email"); encountered_form.password_element = ASCIIToUTF16("Passwd"); encountered_form.submit_element = ASCIIToUTF16("signIn"); // Get matches for encountered_form. EXPECT_TRUE(db().GetLogins(encountered_form, &result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(incomplete_form.origin, result[0]->origin); EXPECT_EQ(incomplete_form.signon_realm, result[0]->signon_realm); EXPECT_EQ(incomplete_form.username_value, result[0]->username_value); EXPECT_EQ(incomplete_form.password_value, result[0]->password_value); EXPECT_TRUE(result[0]->preferred); EXPECT_FALSE(result[0]->ssl_valid); // We should return empty 'action', 'username_element', 'password_element' // and 'submit_element' as we can't be sure if the credentials were entered // in this particular form on the page. EXPECT_EQ(GURL(), result[0]->action); EXPECT_TRUE(result[0]->username_element.empty()); EXPECT_TRUE(result[0]->password_element.empty()); EXPECT_TRUE(result[0]->submit_element.empty()); result.clear(); // Let's say this login form worked. Now update the stored credentials with // 'action', 'username_element', 'password_element' and 'submit_element' from // the encountered form. PasswordForm completed_form(incomplete_form); completed_form.action = encountered_form.action; completed_form.username_element = encountered_form.username_element; completed_form.password_element = encountered_form.password_element; completed_form.submit_element = encountered_form.submit_element; EXPECT_EQ(AddChangeForForm(completed_form), db().AddLogin(completed_form)); EXPECT_TRUE(db().RemoveLogin(incomplete_form)); // Get matches for encountered_form again. EXPECT_TRUE(db().GetLogins(encountered_form, &result)); ASSERT_EQ(1U, result.size()); // This time we should have all the info available. PasswordForm expected_form(completed_form); EXPECT_EQ(expected_form, *result[0]); result.clear(); } TEST_F(LoginDatabaseTest, UpdateOverlappingCredentials) { // Save an incomplete form. Note that it only has a few fields set, ex. it's // missing 'action', 'username_element' and 'password_element'. Such forms // are sometimes inserted during import from other browsers (which may not // store this info). PasswordForm incomplete_form; incomplete_form.origin = GURL("http://accounts.google.com/LoginAuth"); incomplete_form.signon_realm = "http://accounts.google.com/"; incomplete_form.username_value = ASCIIToUTF16("my_username"); incomplete_form.password_value = ASCIIToUTF16("my_password"); incomplete_form.ssl_valid = false; incomplete_form.preferred = true; incomplete_form.blacklisted_by_user = false; incomplete_form.scheme = PasswordForm::SCHEME_HTML; EXPECT_EQ(AddChangeForForm(incomplete_form), db().AddLogin(incomplete_form)); // Save a complete version of the previous form. Both forms could exist if // the user created the complete version before importing the incomplete // version from a different browser. PasswordForm complete_form = incomplete_form; complete_form.action = GURL("http://accounts.google.com/Login"); complete_form.username_element = ASCIIToUTF16("username_element"); complete_form.password_element = ASCIIToUTF16("password_element"); complete_form.submit_element = ASCIIToUTF16("submit"); // An update fails because the primary key for |complete_form| is different. EXPECT_EQ(PasswordStoreChangeList(), db().UpdateLogin(complete_form)); EXPECT_EQ(AddChangeForForm(complete_form), db().AddLogin(complete_form)); // Make sure both passwords exist. ScopedVector<autofill::PasswordForm> result; EXPECT_TRUE(db().GetAutofillableLogins(&result)); ASSERT_EQ(2U, result.size()); result.clear(); // Simulate the user changing their password. complete_form.password_value = ASCIIToUTF16("new_password"); complete_form.date_synced = base::Time::Now(); EXPECT_EQ(UpdateChangeForForm(complete_form), db().UpdateLogin(complete_form)); // Both still exist now. EXPECT_TRUE(db().GetAutofillableLogins(&result)); ASSERT_EQ(2U, result.size()); if (result[0]->username_element.empty()) std::swap(result[0], result[1]); EXPECT_EQ(complete_form, *result[0]); EXPECT_EQ(incomplete_form, *result[1]); } TEST_F(LoginDatabaseTest, DoubleAdd) { PasswordForm form; form.origin = GURL("http://accounts.google.com/LoginAuth"); form.signon_realm = "http://accounts.google.com/"; form.username_value = ASCIIToUTF16("my_username"); form.password_value = ASCIIToUTF16("my_password"); form.ssl_valid = false; form.preferred = true; form.blacklisted_by_user = false; form.scheme = PasswordForm::SCHEME_HTML; EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); // Add almost the same form again. form.times_used++; PasswordStoreChangeList list; list.push_back(PasswordStoreChange(PasswordStoreChange::REMOVE, form)); list.push_back(PasswordStoreChange(PasswordStoreChange::ADD, form)); EXPECT_EQ(list, db().AddLogin(form)); } TEST_F(LoginDatabaseTest, AddWrongForm) { PasswordForm form; // |origin| shouldn't be empty. form.origin = GURL(); form.signon_realm = "http://accounts.google.com/"; form.username_value = ASCIIToUTF16("my_username"); form.password_value = ASCIIToUTF16("my_password"); form.ssl_valid = false; form.preferred = true; form.blacklisted_by_user = false; form.scheme = PasswordForm::SCHEME_HTML; EXPECT_EQ(PasswordStoreChangeList(), db().AddLogin(form)); // |signon_realm| shouldn't be empty. form.origin = GURL("http://accounts.google.com/LoginAuth"); form.signon_realm.clear(); EXPECT_EQ(PasswordStoreChangeList(), db().AddLogin(form)); } TEST_F(LoginDatabaseTest, UpdateLogin) { PasswordForm form; form.origin = GURL("http://accounts.google.com/LoginAuth"); form.signon_realm = "http://accounts.google.com/"; form.username_value = ASCIIToUTF16("my_username"); form.password_value = ASCIIToUTF16("my_password"); form.ssl_valid = false; form.preferred = true; form.blacklisted_by_user = false; form.scheme = PasswordForm::SCHEME_HTML; EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); form.action = GURL("http://accounts.google.com/login"); form.password_value = ASCIIToUTF16("my_new_password"); form.ssl_valid = true; form.preferred = false; form.other_possible_usernames.push_back(ASCIIToUTF16("my_new_username")); form.times_used = 20; form.submit_element = ASCIIToUTF16("submit_element"); form.date_synced = base::Time::Now(); form.date_created = base::Time::Now() - base::TimeDelta::FromDays(1); form.blacklisted_by_user = true; form.scheme = PasswordForm::SCHEME_BASIC; form.type = PasswordForm::TYPE_GENERATED; form.display_name = ASCIIToUTF16("Mr. Smith"); form.icon_url = GURL("https://accounts.google.com/Icon"); form.federation_origin = url::Origin(GURL("https://accounts.google.com/")); form.skip_zero_click = true; EXPECT_EQ(UpdateChangeForForm(form), db().UpdateLogin(form)); ScopedVector<autofill::PasswordForm> result; EXPECT_TRUE(db().GetLogins(form, &result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(form, *result[0]); } TEST_F(LoginDatabaseTest, RemoveWrongForm) { PasswordForm form; // |origin| shouldn't be empty. form.origin = GURL("http://accounts.google.com/LoginAuth"); form.signon_realm = "http://accounts.google.com/"; form.username_value = ASCIIToUTF16("my_username"); form.password_value = ASCIIToUTF16("my_password"); form.ssl_valid = false; form.preferred = true; form.blacklisted_by_user = false; form.scheme = PasswordForm::SCHEME_HTML; // The form isn't in the database. EXPECT_FALSE(db().RemoveLogin(form)); EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); EXPECT_TRUE(db().RemoveLogin(form)); EXPECT_FALSE(db().RemoveLogin(form)); } TEST_F(LoginDatabaseTest, ReportMetricsTest) { PasswordForm password_form; password_form.origin = GURL("http://example.com"); password_form.username_value = ASCIIToUTF16("test1@gmail.com"); password_form.password_value = ASCIIToUTF16("test"); password_form.signon_realm = "http://example.com/"; password_form.times_used = 0; EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.username_value = ASCIIToUTF16("test2@gmail.com"); password_form.times_used = 1; EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.origin = GURL("http://second.example.com"); password_form.signon_realm = "http://second.example.com"; password_form.times_used = 3; EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.username_value = ASCIIToUTF16("test3@gmail.com"); password_form.type = PasswordForm::TYPE_GENERATED; password_form.times_used = 2; EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.origin = GURL("ftp://third.example.com/"); password_form.signon_realm = "ftp://third.example.com/"; password_form.times_used = 4; password_form.scheme = PasswordForm::SCHEME_OTHER; EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.origin = GURL("http://fourth.example.com/"); password_form.signon_realm = "http://fourth.example.com/"; password_form.type = PasswordForm::TYPE_MANUAL; password_form.username_value = ASCIIToUTF16(""); password_form.times_used = 10; password_form.scheme = PasswordForm::SCHEME_HTML; EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.origin = GURL("https://fifth.example.com/"); password_form.signon_realm = "https://fifth.example.com/"; password_form.password_value = ASCIIToUTF16(""); password_form.blacklisted_by_user = true; EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.origin = GURL("https://sixth.example.com/"); password_form.signon_realm = "https://sixth.example.com/"; password_form.username_value = ASCIIToUTF16(""); password_form.password_value = ASCIIToUTF16("my_password"); password_form.blacklisted_by_user = false; EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.username_element = ASCIIToUTF16("some_other_input"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.username_value = ASCIIToUTF16("my_username"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.origin = GURL(); password_form.signon_realm = "android://hash@com.example.android/"; password_form.username_value = ASCIIToUTF16("JohnDoe"); password_form.password_value = ASCIIToUTF16("my_password"); password_form.blacklisted_by_user = false; EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.username_value = ASCIIToUTF16("JaneDoe"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); base::HistogramTester histogram_tester; db().ReportMetrics("", false); histogram_tester.ExpectUniqueSample( "PasswordManager.TotalAccounts.UserCreated.WithoutCustomPassphrase", 9, 1); histogram_tester.ExpectBucketCount( "PasswordManager.AccountsPerSite.UserCreated.WithoutCustomPassphrase", 1, 2); histogram_tester.ExpectBucketCount( "PasswordManager.AccountsPerSite.UserCreated.WithoutCustomPassphrase", 2, 3); histogram_tester.ExpectBucketCount( "PasswordManager.TimesPasswordUsed.UserCreated.WithoutCustomPassphrase", 0, 1); histogram_tester.ExpectBucketCount( "PasswordManager.TimesPasswordUsed.UserCreated.WithoutCustomPassphrase", 1, 1); histogram_tester.ExpectBucketCount( "PasswordManager.TimesPasswordUsed.UserCreated.WithoutCustomPassphrase", 3, 1); histogram_tester.ExpectUniqueSample( "PasswordManager.TotalAccounts.AutoGenerated.WithoutCustomPassphrase", 2, 1); histogram_tester.ExpectUniqueSample( "PasswordManager.TotalAccountsHiRes.WithScheme.Android", 2, 1); histogram_tester.ExpectUniqueSample( "PasswordManager.TotalAccountsHiRes.WithScheme.Ftp", 1, 1); histogram_tester.ExpectUniqueSample( "PasswordManager.TotalAccountsHiRes.WithScheme.Http", 5, 1); histogram_tester.ExpectUniqueSample( "PasswordManager.TotalAccountsHiRes.WithScheme.Https", 3, 1); histogram_tester.ExpectUniqueSample( "PasswordManager.TotalAccountsHiRes.WithScheme.Other", 0, 1); histogram_tester.ExpectUniqueSample( "PasswordManager.AccountsPerSite.AutoGenerated.WithoutCustomPassphrase", 1, 2); histogram_tester.ExpectBucketCount( "PasswordManager.TimesPasswordUsed.AutoGenerated.WithoutCustomPassphrase", 2, 1); histogram_tester.ExpectBucketCount( "PasswordManager.TimesPasswordUsed.AutoGenerated.WithoutCustomPassphrase", 4, 1); histogram_tester.ExpectUniqueSample( "PasswordManager.EmptyUsernames.CountInDatabase", 3, 1); histogram_tester.ExpectUniqueSample( "PasswordManager.EmptyUsernames.WithoutCorrespondingNonempty", 1, 1); } TEST_F(LoginDatabaseTest, PasswordReuseMetrics) { // -- Group of accounts that are reusing password #1. // // Destination account // +-----------------+-------+-------+-------+-------+-------+-------+-------+ // | | 1 | 2 | 3 | 4 | 5 | 6 | 7 | // +-----------------+-------+-------+-------+-------+-------+-------+-------+ // | Scheme? | HTTP | HTTP | HTTP | HTTP | HTTPS | HTTPS | HTTPS | // +-----------------+-------+-------+-------+-------+-------+-------+-------+ // | | 1 | - | Same | PSL | Diff. | Same | Diff. | Diff. | // | | 2 | Same | - | PSL | Diff. | Same | Diff. | Diff. | // | Relation | 3 | PSL | PSL | - | Diff. | Diff. | Same | Diff. | // | to host | 4 | Diff. | Diff. | Diff. | - | Diff. | Diff. | Same | // | of source | 5 | Same | Same | Diff. | Diff. | - | PSL | Diff. | // | account: | 6 | Diff. | Diff. | Same | Diff. | PSL | - | Diff. | // | | 7 | Diff. | Diff. | Diff. | Same | Diff. | Diff. | - | // +-----------------+-------+-------+-------+-------+-------+-------+-------+ PasswordForm password_form; password_form.signon_realm = "http://example.com/"; password_form.origin = GURL("http://example.com/"); password_form.username_value = ASCIIToUTF16("username_1"); password_form.password_value = ASCIIToUTF16("password_1"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.origin = GURL("http://example.com/"); password_form.username_value = ASCIIToUTF16("username_2"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); // Note: This PSL matches http://example.com, but not https://example.com. password_form.signon_realm = "http://www.example.com/"; password_form.origin = GURL("http://www.example.com/"); password_form.username_value = ASCIIToUTF16("username_3"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.signon_realm = "http://not-example.com/"; password_form.origin = GURL("http://not-example.com/"); password_form.username_value = ASCIIToUTF16("username_4"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.signon_realm = "https://example.com/"; password_form.origin = GURL("https://example.com/"); password_form.username_value = ASCIIToUTF16("username_5"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); // Note: This PSL matches https://example.com, but not http://example.com. password_form.signon_realm = "https://www.example.com/"; password_form.origin = GURL("https://www.example.com/"); password_form.username_value = ASCIIToUTF16("username_6"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.signon_realm = "https://not-example.com/"; password_form.origin = GURL("https://not-example.com/"); password_form.username_value = ASCIIToUTF16("username_7"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); // -- Group of accounts that are reusing password #2. // Both HTTP, different host. password_form.signon_realm = "http://example.com/"; password_form.origin = GURL("http://example.com/"); password_form.username_value = ASCIIToUTF16("username_8"); password_form.password_value = ASCIIToUTF16("password_2"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.signon_realm = "http://not-example.com/"; password_form.origin = GURL("http://not-example.com/"); password_form.username_value = ASCIIToUTF16("username_9"); password_form.password_value = ASCIIToUTF16("password_2"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); // -- Group of accounts that are reusing password #3. // HTTP sites identified by different IP addresses, so they should not be // considered a public suffix match. password_form.signon_realm = "http://1.2.3.4/"; password_form.origin = GURL("http://1.2.3.4/"); password_form.username_value = ASCIIToUTF16("username_10"); password_form.password_value = ASCIIToUTF16("password_3"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); password_form.signon_realm = "http://2.2.3.4/"; password_form.origin = GURL("http://2.2.3.4/"); password_form.username_value = ASCIIToUTF16("username_11"); password_form.password_value = ASCIIToUTF16("password_3"); EXPECT_EQ(AddChangeForForm(password_form), db().AddLogin(password_form)); // -- Not HTML form based logins or blacklisted logins. Should be ignored. PasswordForm ignored_form; ignored_form.scheme = PasswordForm::SCHEME_HTML; ignored_form.signon_realm = "http://example.org/"; ignored_form.origin = GURL("http://example.org/blacklist"); ignored_form.blacklisted_by_user = true; ignored_form.username_value = ASCIIToUTF16("username_x"); ignored_form.password_value = ASCIIToUTF16("password_y"); EXPECT_EQ(AddChangeForForm(ignored_form), db().AddLogin(ignored_form)); ignored_form.scheme = PasswordForm::SCHEME_BASIC; ignored_form.signon_realm = "http://example.org/HTTP Auth Realm"; ignored_form.origin = GURL("http://example.org/"); ignored_form.blacklisted_by_user = false; EXPECT_EQ(AddChangeForForm(ignored_form), db().AddLogin(ignored_form)); ignored_form.scheme = PasswordForm::SCHEME_HTML; ignored_form.signon_realm = "android://hash@com.example/"; ignored_form.origin = GURL(); ignored_form.blacklisted_by_user = false; EXPECT_EQ(AddChangeForForm(ignored_form), db().AddLogin(ignored_form)); ignored_form.scheme = PasswordForm::SCHEME_HTML; ignored_form.signon_realm = "federation://example.com/federation.com"; ignored_form.origin = GURL("https://example.com/"); ignored_form.blacklisted_by_user = false; EXPECT_EQ(AddChangeForForm(ignored_form), db().AddLogin(ignored_form)); base::HistogramTester histogram_tester; db().ReportMetrics("", false); const std::string kPrefix("PasswordManager.AccountsReusingPassword."); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpRealm.OnHttpRealmWithSameHost"), testing::ElementsAre(base::Bucket(0, 6), base::Bucket(1, 2))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpRealm.OnHttpsRealmWithSameHost"), testing::ElementsAre(base::Bucket(0, 4), base::Bucket(1, 4))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpRealm.OnPSLMatchingRealm"), testing::ElementsAre(base::Bucket(0, 5), base::Bucket(1, 2), base::Bucket(2, 1))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpRealm.OnHttpsRealmWithDifferentHost"), testing::ElementsAre(base::Bucket(0, 4), base::Bucket(2, 4))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpRealm.OnHttpRealmWithDifferentHost"), testing::ElementsAre(base::Bucket(1, 7), base::Bucket(3, 1))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpRealm.OnAnyRealmWithDifferentHost"), testing::ElementsAre(base::Bucket(1, 4), base::Bucket(3, 3), base::Bucket(5, 1))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpsRealm.OnHttpRealmWithSameHost"), testing::ElementsAre(base::Bucket(1, 2), base::Bucket(2, 1))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpsRealm.OnHttpsRealmWithSameHost"), testing::ElementsAre(base::Bucket(0, 3))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpsRealm.OnPSLMatchingRealm"), testing::ElementsAre(base::Bucket(0, 1), base::Bucket(1, 2))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpsRealm.OnHttpRealmWithDifferentHost"), testing::ElementsAre(base::Bucket(2, 1), base::Bucket(3, 2))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpsRealm.OnHttpsRealmWithDifferentHost"), testing::ElementsAre(base::Bucket(1, 2), base::Bucket(2, 1))); EXPECT_THAT(histogram_tester.GetAllSamples( kPrefix + "FromHttpsRealm.OnAnyRealmWithDifferentHost"), testing::ElementsAre(base::Bucket(3, 1), base::Bucket(4, 1), base::Bucket(5, 1))); } TEST_F(LoginDatabaseTest, ClearPasswordValues) { db().set_clear_password_values(true); // Add a PasswordForm, the password should be cleared. base::HistogramTester histogram_tester; PasswordForm form; form.origin = GURL("http://accounts.google.com/LoginAuth"); form.signon_realm = "http://accounts.google.com/"; form.username_value = ASCIIToUTF16("my_username"); form.password_value = ASCIIToUTF16("12345"); EXPECT_EQ(AddChangeForForm(form), db().AddLogin(form)); ScopedVector<autofill::PasswordForm> result; EXPECT_TRUE(db().GetLogins(form, &result)); ASSERT_EQ(1U, result.size()); PasswordForm expected_form = form; expected_form.password_value.clear(); EXPECT_EQ(expected_form, *result[0]); // Update the password, it should stay empty. form.password_value = ASCIIToUTF16("password"); EXPECT_EQ(UpdateChangeForForm(form), db().UpdateLogin(form)); EXPECT_TRUE(db().GetLogins(form, &result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(expected_form, *result[0]); // Encrypting/decrypting shouldn't happen. Thus there should be no keychain // access on Mac. histogram_tester.ExpectTotalCount("OSX.Keychain.Access", 0); } #if defined(OS_POSIX) // Only the current user has permission to read the database. // // Only POSIX because GetPosixFilePermissions() only exists on POSIX. // This tests that sql::Connection::set_restrict_to_user() was called, // and that function is a noop on non-POSIX platforms in any case. TEST_F(LoginDatabaseTest, FilePermissions) { int mode = base::FILE_PERMISSION_MASK; EXPECT_TRUE(base::GetPosixFilePermissions(file_, &mode)); EXPECT_EQ((mode & base::FILE_PERMISSION_USER_MASK), mode); } #endif // defined(OS_POSIX) // Test the migration from GetParam() version to kCurrentVersionNumber. class LoginDatabaseMigrationTest : public testing::TestWithParam<int> { protected: void SetUp() override { ASSERT_TRUE(temp_dir_.CreateUniqueTempDir()); database_dump_location_ = database_dump_location_.AppendASCII("components") .AppendASCII("test") .AppendASCII("data") .AppendASCII("password_manager"); database_path_ = temp_dir_.path().AppendASCII("test.db"); #if defined(OS_MACOSX) OSCrypt::UseMockKeychain(true); #endif // defined(OS_MACOSX) } // Creates the databse from |sql_file|. void CreateDatabase(base::StringPiece sql_file) { base::FilePath database_dump; ASSERT_TRUE(PathService::Get(base::DIR_SOURCE_ROOT, &database_dump)); database_dump = database_dump.Append(database_dump_location_).AppendASCII(sql_file); ASSERT_TRUE( sql::test::CreateDatabaseFromSQL(database_path_, database_dump)); } void DestroyDatabase() { if (!database_path_.empty()) sql::Connection::Delete(database_path_); } // Returns an empty vector on failure. Otherwise returns values in the column // |column_name| of the logins table. The order of the // returned rows is well-defined. template <class T> std::vector<T> GetValues(const std::string& column_name) { sql::Connection db; std::vector<T> results; if (!db.Open(database_path_)) return results; std::string statement = base::StringPrintf( "SELECT %s FROM logins ORDER BY username_value, %s DESC", column_name.c_str(), column_name.c_str()); sql::Statement s(db.GetCachedStatement(SQL_FROM_HERE, statement.c_str())); if (!s.is_valid()) { db.Close(); return results; } while (s.Step()) results.push_back(GetFirstColumn<T>(s)); s.Clear(); db.Close(); return results; } // Returns the database version for the test. int version() const { return GetParam(); } // Actual test body. void MigrationToVCurrent(base::StringPiece sql_file); base::FilePath database_path_; private: base::FilePath database_dump_location_; base::ScopedTempDir temp_dir_; }; void LoginDatabaseMigrationTest::MigrationToVCurrent( base::StringPiece sql_file) { SCOPED_TRACE(testing::Message("Version file = ") << sql_file); CreateDatabase(sql_file); // Original date, in seconds since UTC epoch. std::vector<int64_t> date_created(GetValues<int64_t>("date_created")); ASSERT_EQ(2U, date_created.size()); // Migration to version 8 performs changes dates to the new format. // So for versions less of equal to 8 create date should be in old // format before migration and in new format after. if (version() <= 8) { ASSERT_EQ(1402955745, date_created[0]); ASSERT_EQ(1402950000, date_created[1]); } else { ASSERT_EQ(13047429345000000, date_created[0]); ASSERT_EQ(13047423600000000, date_created[1]); } { // Assert that the database was successfully opened and updated // to current version. LoginDatabase db(database_path_); ASSERT_TRUE(db.Init()); // Verifies that the final version can save all the appropriate fields. PasswordForm form; GenerateExamplePasswordForm(&form); // Add the same form twice to test the constraints in the database. EXPECT_EQ(AddChangeForForm(form), db.AddLogin(form)); PasswordStoreChangeList list; list.push_back(PasswordStoreChange(PasswordStoreChange::REMOVE, form)); list.push_back(PasswordStoreChange(PasswordStoreChange::ADD, form)); EXPECT_EQ(list, db.AddLogin(form)); ScopedVector<autofill::PasswordForm> result; EXPECT_TRUE(db.GetLogins(form, &result)); ASSERT_EQ(1U, result.size()); EXPECT_EQ(form, *result[0]); EXPECT_TRUE(db.RemoveLogin(form)); } // New date, in microseconds since platform independent epoch. std::vector<int64_t> new_date_created(GetValues<int64_t>("date_created")); if (version() <= 8) { ASSERT_EQ(2U, new_date_created.size()); // Check that the two dates match up. for (size_t i = 0; i < date_created.size(); ++i) { EXPECT_EQ(base::Time::FromInternalValue(new_date_created[i]), base::Time::FromTimeT(date_created[i])); } } else if (version() == 10) { // The test data is setup on this version to cause a unique key collision. EXPECT_EQ(1U, new_date_created.size()); } else { ASSERT_EQ(2U, new_date_created.size()); ASSERT_EQ(13047429345000000, new_date_created[0]); ASSERT_EQ(13047423600000000, new_date_created[1]); } if (version() >= 7 && version() <= 13) { // The "avatar_url" column first appeared in version 7. In version 14, // it was renamed to "icon_url". Migration from a version <= 13 // to >= 14 should not break theses URLs. std::vector<std::string> urls(GetValues<std::string>("icon_url")); if (version() == 10) { // The testcase for version 10 tests duplicate entries, so we only expect // one URL. EXPECT_THAT(urls, testing::ElementsAre("https://www.google.com/icon")); } else { // Otherwise, we expect one empty and one valid URL. EXPECT_THAT( urls, testing::ElementsAre("", "https://www.google.com/icon")); } } { // On versions < 15 |kCompatibleVersionNumber| was set to 1, but // the migration should bring it to the correct value. sql::Connection db; sql::MetaTable meta_table; ASSERT_TRUE(db.Open(database_path_)); ASSERT_TRUE( meta_table.Init(&db, kCurrentVersionNumber, kCompatibleVersionNumber)); EXPECT_EQ(password_manager::kCompatibleVersionNumber, meta_table.GetCompatibleVersionNumber()); } DestroyDatabase(); } // Tests the migration of the login database from version() to // kCurrentVersionNumber. TEST_P(LoginDatabaseMigrationTest, MigrationToVCurrent) { MigrationToVCurrent(base::StringPrintf("login_db_v%d.sql", version())); } class LoginDatabaseMigrationTestV9 : public LoginDatabaseMigrationTest { }; // Tests migration from the alternative version #9, see crbug.com/423716. TEST_P(LoginDatabaseMigrationTestV9, V9WithoutUseAdditionalAuthField) { ASSERT_EQ(9, version()); MigrationToVCurrent("login_db_v9_without_use_additional_auth_field.sql"); } class LoginDatabaseMigrationTestBroken : public LoginDatabaseMigrationTest {}; // Test migrating certain databases with incorrect version. // http://crbug.com/295851 TEST_P(LoginDatabaseMigrationTestBroken, Broken) { MigrationToVCurrent(base::StringPrintf("login_db_v%d_broken.sql", version())); } INSTANTIATE_TEST_CASE_P(MigrationToVCurrent, LoginDatabaseMigrationTest, testing::Range(1, kCurrentVersionNumber + 1)); INSTANTIATE_TEST_CASE_P(MigrationToVCurrent, LoginDatabaseMigrationTestV9, testing::Values(9)); INSTANTIATE_TEST_CASE_P(MigrationToVCurrent, LoginDatabaseMigrationTestBroken, testing::Range(1, 4)); } // namespace password_manager
65,474
22,965
/********************************************************************** * File: statistc.c (Formerly stats.c) * Description: Simple statistical package for integer values. * Author: Ray Smith * Created: Mon Feb 04 16:56:05 GMT 1991 * * (C) Copyright 1991, Hewlett-Packard Ltd. ** Licensed under the Apache License, Version 2.0 (the "License"); ** you may not use this file except in compliance with the License. ** You may obtain a copy of the License at ** http://www.apache.org/licenses/LICENSE-2.0 ** Unless required by applicable law or agreed to in writing, software ** distributed under the License is distributed on an "AS IS" BASIS, ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ** See the License for the specific language governing permissions and ** limitations under the License. * **********************************************************************/ // Include automatically generated configuration file if running autoconf. #ifdef HAVE_CONFIG_H #include "config_auto.h" #endif #include "statistc.h" #include <string.h> #include <math.h> #include <stdlib.h> #include "helpers.h" #include "scrollview.h" #include "tprintf.h" using tesseract::KDPairInc; /********************************************************************** * STATS::STATS * * Construct a new stats element by allocating and zeroing the memory. **********************************************************************/ STATS::STATS(inT32 min_bucket_value, inT32 max_bucket_value_plus_1) { if (max_bucket_value_plus_1 <= min_bucket_value) { min_bucket_value = 0; max_bucket_value_plus_1 = 1; } rangemin_ = min_bucket_value; // setup rangemax_ = max_bucket_value_plus_1; buckets_ = new inT32[rangemax_ - rangemin_]; clear(); } STATS::STATS() { rangemax_ = 0; rangemin_ = 0; buckets_ = NULL; } /********************************************************************** * STATS::set_range * * Alter the range on an existing stats element. **********************************************************************/ bool STATS::set_range(inT32 min_bucket_value, inT32 max_bucket_value_plus_1) { if (max_bucket_value_plus_1 <= min_bucket_value) { return false; } if (rangemax_ - rangemin_ != max_bucket_value_plus_1 - min_bucket_value) { delete [] buckets_; buckets_ = new inT32[max_bucket_value_plus_1 - min_bucket_value]; } rangemin_ = min_bucket_value; // setup rangemax_ = max_bucket_value_plus_1; clear(); // zero it return true; } /********************************************************************** * STATS::clear * * Clear out the STATS class by zeroing all the buckets. **********************************************************************/ void STATS::clear() { // clear out buckets total_count_ = 0; if (buckets_ != NULL) memset(buckets_, 0, (rangemax_ - rangemin_) * sizeof(buckets_[0])); } /********************************************************************** * STATS::~STATS * * Destructor for a stats class. **********************************************************************/ STATS::~STATS () { if (buckets_ != NULL) { delete [] buckets_; buckets_ = NULL; } } /********************************************************************** * STATS::add * * Add a set of samples to (or delete from) a pile. **********************************************************************/ void STATS::add(inT32 value, inT32 count) { if (buckets_ == NULL) { return; } value = ClipToRange(value, rangemin_, rangemax_ - 1); buckets_[value - rangemin_] += count; total_count_ += count; // keep count of total } /********************************************************************** * STATS::mode * * Find the mode of a stats class. **********************************************************************/ inT32 STATS::mode() const { // get mode of samples if (buckets_ == NULL) { return rangemin_; } inT32 max = buckets_[0]; // max cell count inT32 maxindex = 0; // index of max for (int index = rangemax_ - rangemin_ - 1; index > 0; --index) { if (buckets_[index] > max) { max = buckets_[index]; // find biggest maxindex = index; } } return maxindex + rangemin_; // index of biggest } /********************************************************************** * STATS::mean * * Find the mean of a stats class. **********************************************************************/ double STATS::mean() const { //get mean of samples if (buckets_ == NULL || total_count_ <= 0) { return static_cast<double>(rangemin_); } inT64 sum = 0; for (int index = rangemax_ - rangemin_ - 1; index >= 0; --index) { sum += static_cast<inT64>(index) * buckets_[index]; } return static_cast<double>(sum) / total_count_ + rangemin_; } /********************************************************************** * STATS::sd * * Find the standard deviation of a stats class. **********************************************************************/ double STATS::sd() const { //standard deviation if (buckets_ == NULL || total_count_ <= 0) { return 0.0; } inT64 sum = 0; double sqsum = 0.0; for (int index = rangemax_ - rangemin_ - 1; index >= 0; --index) { sum += static_cast<inT64>(index) * buckets_[index]; sqsum += static_cast<double>(index) * index * buckets_[index]; } double variance = static_cast<double>(sum) / total_count_; variance = sqsum / total_count_ - variance * variance; if (variance > 0.0) return sqrt(variance); return 0.0; } /********************************************************************** * STATS::ile * * Returns the fractile value such that frac fraction (in [0,1]) of samples * has a value less than the return value. **********************************************************************/ double STATS::ile(double frac) const { if (buckets_ == NULL || total_count_ == 0) { return static_cast<double>(rangemin_); } #if 0 // TODO(rays) The existing code doesn't seem to be doing the right thing // with target a double but this substitute crashes the code that uses it. // Investigate and fix properly. int target = IntCastRounded(frac * total_count_); target = ClipToRange(target, 1, total_count_); #else double target = frac * total_count_; target = ClipToRange(target, 1.0, static_cast<double>(total_count_)); #endif int sum = 0; int index = 0; for (index = 0; index < rangemax_ - rangemin_ && sum < target; sum += buckets_[index++]); if (index > 0) { ASSERT_HOST(buckets_[index - 1] > 0); return rangemin_ + index - static_cast<double>(sum - target) / buckets_[index - 1]; } else { return static_cast<double>(rangemin_); } } /********************************************************************** * STATS::min_bucket * * Find REAL minimum bucket - ile(0.0) isn't necessarily correct **********************************************************************/ inT32 STATS::min_bucket() const { // Find min if (buckets_ == NULL || total_count_ == 0) { return rangemin_; } inT32 min = 0; for (min = 0; (min < rangemax_ - rangemin_) && (buckets_[min] == 0); min++); return rangemin_ + min; } /********************************************************************** * STATS::max_bucket * * Find REAL maximum bucket - ile(1.0) isn't necessarily correct **********************************************************************/ inT32 STATS::max_bucket() const { // Find max if (buckets_ == NULL || total_count_ == 0) { return rangemin_; } inT32 max; for (max = rangemax_ - rangemin_ - 1; max > 0 && buckets_[max] == 0; max--); return rangemin_ + max; } /********************************************************************** * STATS::median * * Finds a more useful estimate of median than ile(0.5). * * Overcomes a problem with ile() - if the samples are, for example, * 6,6,13,14 ile(0.5) return 7.0 - when a more useful value would be midway * between 6 and 13 = 9.5 **********************************************************************/ double STATS::median() const { //get median if (buckets_ == NULL) { return static_cast<double>(rangemin_); } double median = ile(0.5); int median_pile = static_cast<int>(floor(median)); if ((total_count_ > 1) && (pile_count(median_pile) == 0)) { inT32 min_pile; inT32 max_pile; /* Find preceding non zero pile */ for (min_pile = median_pile; pile_count(min_pile) == 0; min_pile--); /* Find following non zero pile */ for (max_pile = median_pile; pile_count(max_pile) == 0; max_pile++); median = (min_pile + max_pile) / 2.0; } return median; } /********************************************************************** * STATS::local_min * * Return TRUE if this point is a local min. **********************************************************************/ bool STATS::local_min(inT32 x) const { if (buckets_ == NULL) { return false; } x = ClipToRange(x, rangemin_, rangemax_ - 1) - rangemin_; if (buckets_[x] == 0) return true; inT32 index; // table index for (index = x - 1; index >= 0 && buckets_[index] == buckets_[x]; --index); if (index >= 0 && buckets_[index] < buckets_[x]) return false; for (index = x + 1; index < rangemax_ - rangemin_ && buckets_[index] == buckets_[x]; ++index); if (index < rangemax_ - rangemin_ && buckets_[index] < buckets_[x]) return false; else return true; } /********************************************************************** * STATS::smooth * * Apply a triangular smoothing filter to the stats. * This makes the modes a bit more useful. * The factor gives the height of the triangle, i.e. the weight of the * centre. **********************************************************************/ void STATS::smooth(inT32 factor) { if (buckets_ == NULL || factor < 2) { return; } STATS result(rangemin_, rangemax_); int entrycount = rangemax_ - rangemin_; for (int entry = 0; entry < entrycount; entry++) { //centre weight int count = buckets_[entry] * factor; for (int offset = 1; offset < factor; offset++) { if (entry - offset >= 0) count += buckets_[entry - offset] * (factor - offset); if (entry + offset < entrycount) count += buckets_[entry + offset] * (factor - offset); } result.add(entry + rangemin_, count); } total_count_ = result.total_count_; memcpy(buckets_, result.buckets_, entrycount * sizeof(buckets_[0])); } /********************************************************************** * STATS::cluster * * Cluster the samples into max_cluster clusters. * Each call runs one iteration. The array of clusters must be * max_clusters+1 in size as cluster 0 is used to indicate which samples * have been used. * The return value is the current number of clusters. **********************************************************************/ inT32 STATS::cluster(float lower, // thresholds float upper, float multiple, // distance threshold inT32 max_clusters, // max no to make STATS *clusters) { // array of clusters BOOL8 new_cluster; // added one float *centres; // cluster centres inT32 entry; // bucket index inT32 cluster; // cluster index inT32 best_cluster; // one to assign to inT32 new_centre = 0; // residual mode inT32 new_mode; // pile count of new_centre inT32 count; // pile to place float dist; // from cluster float min_dist; // from best_cluster inT32 cluster_count; // no of clusters if (buckets_ == NULL || max_clusters < 1) return 0; centres = new float[max_clusters + 1]; for (cluster_count = 1; cluster_count <= max_clusters && clusters[cluster_count].buckets_ != NULL && clusters[cluster_count].total_count_ > 0; cluster_count++) { centres[cluster_count] = static_cast<float>(clusters[cluster_count].ile(0.5)); new_centre = clusters[cluster_count].mode(); for (entry = new_centre - 1; centres[cluster_count] - entry < lower && entry >= rangemin_ && pile_count(entry) <= pile_count(entry + 1); entry--) { count = pile_count(entry) - clusters[0].pile_count(entry); if (count > 0) { clusters[cluster_count].add(entry, count); clusters[0].add (entry, count); } } for (entry = new_centre + 1; entry - centres[cluster_count] < lower && entry < rangemax_ && pile_count(entry) <= pile_count(entry - 1); entry++) { count = pile_count(entry) - clusters[0].pile_count(entry); if (count > 0) { clusters[cluster_count].add(entry, count); clusters[0].add(entry, count); } } } cluster_count--; if (cluster_count == 0) { clusters[0].set_range(rangemin_, rangemax_); } do { new_cluster = FALSE; new_mode = 0; for (entry = 0; entry < rangemax_ - rangemin_; entry++) { count = buckets_[entry] - clusters[0].buckets_[entry]; //remaining pile if (count > 0) { //any to handle min_dist = static_cast<float>(MAX_INT32); best_cluster = 0; for (cluster = 1; cluster <= cluster_count; cluster++) { dist = entry + rangemin_ - centres[cluster]; //find distance if (dist < 0) dist = -dist; if (dist < min_dist) { min_dist = dist; //find least best_cluster = cluster; } } if (min_dist > upper //far enough for new && (best_cluster == 0 || entry + rangemin_ > centres[best_cluster] * multiple || entry + rangemin_ < centres[best_cluster] / multiple)) { if (count > new_mode) { new_mode = count; new_centre = entry + rangemin_; } } } } // need new and room if (new_mode > 0 && cluster_count < max_clusters) { cluster_count++; new_cluster = TRUE; if (!clusters[cluster_count].set_range(rangemin_, rangemax_)) { delete [] centres; return 0; } centres[cluster_count] = static_cast<float>(new_centre); clusters[cluster_count].add(new_centre, new_mode); clusters[0].add(new_centre, new_mode); for (entry = new_centre - 1; centres[cluster_count] - entry < lower && entry >= rangemin_ && pile_count (entry) <= pile_count(entry + 1); entry--) { count = pile_count(entry) - clusters[0].pile_count(entry); if (count > 0) { clusters[cluster_count].add(entry, count); clusters[0].add(entry, count); } } for (entry = new_centre + 1; entry - centres[cluster_count] < lower && entry < rangemax_ && pile_count (entry) <= pile_count(entry - 1); entry++) { count = pile_count(entry) - clusters[0].pile_count(entry); if (count > 0) { clusters[cluster_count].add(entry, count); clusters[0].add (entry, count); } } centres[cluster_count] = static_cast<float>(clusters[cluster_count].ile(0.5)); } } while (new_cluster && cluster_count < max_clusters); delete [] centres; return cluster_count; } // Helper tests that the current index is still part of the peak and gathers // the data into the peak, returning false when the peak is ended. // src_buckets[index] - used_buckets[index] is the unused part of the histogram. // prev_count is the histogram count of the previous index on entry and is // updated to the current index on return. // total_count and total_value are accumulating the mean of the peak. static bool GatherPeak(int index, const int* src_buckets, int* used_buckets, int* prev_count, int* total_count, double* total_value) { int pile_count = src_buckets[index] - used_buckets[index]; if (pile_count <= *prev_count && pile_count > 0) { // Accumulate count and index.count product. *total_count += pile_count; *total_value += index * pile_count; // Mark this index as used used_buckets[index] = src_buckets[index]; *prev_count = pile_count; return true; } else { return false; } } // Finds (at most) the top max_modes modes, well actually the whole peak around // each mode, returning them in the given modes vector as a <mean of peak, // total count of peak> pair in order of decreasing total count. // Since the mean is the key and the count the data in the pair, a single call // to sort on the output will re-sort by increasing mean of peak if that is // more useful than decreasing total count. // Returns the actual number of modes found. int STATS::top_n_modes(int max_modes, GenericVector<KDPairInc<float, int> >* modes) const { if (max_modes <= 0) return 0; int src_count = rangemax_ - rangemin_; // Used copies the counts in buckets_ as they get used. STATS used(rangemin_, rangemax_); modes->truncate(0); // Total count of the smallest peak found so far. int least_count = 1; // Mode that is used as a seed for each peak int max_count = 0; do { // Find an unused mode. max_count = 0; int max_index = 0; for (int src_index = 0; src_index < src_count; src_index++) { int pile_count = buckets_[src_index] - used.buckets_[src_index]; if (pile_count > max_count) { max_count = pile_count; max_index = src_index; } } if (max_count > 0) { // Copy the bucket count to used so it doesn't get found again. used.buckets_[max_index] = max_count; // Get the entire peak. double total_value = max_index * max_count; int total_count = max_count; int prev_pile = max_count; for (int offset = 1; max_index + offset < src_count; ++offset) { if (!GatherPeak(max_index + offset, buckets_, used.buckets_, &prev_pile, &total_count, &total_value)) break; } prev_pile = buckets_[max_index]; for (int offset = 1; max_index - offset >= 0; ++offset) { if (!GatherPeak(max_index - offset, buckets_, used.buckets_, &prev_pile, &total_count, &total_value)) break; } if (total_count > least_count || modes->size() < max_modes) { // We definitely want this mode, so if we have enough discard the least. if (modes->size() == max_modes) modes->truncate(max_modes - 1); int target_index = 0; // Linear search for the target insertion point. while (target_index < modes->size() && (*modes)[target_index].data >= total_count) ++target_index; float peak_mean = static_cast<float>(total_value / total_count + rangemin_); modes->insert(KDPairInc<float, int>(peak_mean, total_count), target_index); least_count = modes->back().data; } } } while (max_count > 0); return modes->size(); } /********************************************************************** * STATS::print * * Prints a summary and table of the histogram. **********************************************************************/ void STATS::print() const { if (buckets_ == NULL) { return; } inT32 min = min_bucket() - rangemin_; inT32 max = max_bucket() - rangemin_; int num_printed = 0; for (int index = min; index <= max; index++) { if (buckets_[index] != 0) { tprintf("%4d:%-3d ", rangemin_ + index, buckets_[index]); if (++num_printed % 8 == 0) tprintf ("\n"); } } tprintf ("\n"); print_summary(); } /********************************************************************** * STATS::print_summary * * Print a summary of the stats. **********************************************************************/ void STATS::print_summary() const { if (buckets_ == NULL) { return; } inT32 min = min_bucket(); inT32 max = max_bucket(); tprintf("Total count=%d\n", total_count_); tprintf("Min=%.2f Really=%d\n", ile(0.0), min); tprintf("Lower quartile=%.2f\n", ile(0.25)); tprintf("Median=%.2f, ile(0.5)=%.2f\n", median(), ile(0.5)); tprintf("Upper quartile=%.2f\n", ile(0.75)); tprintf("Max=%.2f Really=%d\n", ile(1.0), max); tprintf("Range=%d\n", max + 1 - min); tprintf("Mean= %.2f\n", mean()); tprintf("SD= %.2f\n", sd()); } /********************************************************************** * STATS::plot * * Draw a histogram of the stats table. **********************************************************************/ #ifndef GRAPHICS_DISABLED void STATS::plot(ScrollView* window, // to draw in float xorigin, // bottom left float yorigin, float xscale, // one x unit float yscale, // one y unit ScrollView::Color colour) const { // colour to draw in if (buckets_ == NULL) { return; } window->Pen(colour); for (int index = 0; index < rangemax_ - rangemin_; index++) { window->Rectangle( xorigin + xscale * index, yorigin, xorigin + xscale * (index + 1), yorigin + yscale * buckets_[index]); } } #endif /********************************************************************** * STATS::plotline * * Draw a histogram of the stats table. (Line only) **********************************************************************/ #ifndef GRAPHICS_DISABLED void STATS::plotline(ScrollView* window, // to draw in float xorigin, // bottom left float yorigin, float xscale, // one x unit float yscale, // one y unit ScrollView::Color colour) const { // colour to draw in if (buckets_ == NULL) { return; } window->Pen(colour); window->SetCursor(xorigin, yorigin + yscale * buckets_[0]); for (int index = 0; index < rangemax_ - rangemin_; index++) { window->DrawTo(xorigin + xscale * index, yorigin + yscale * buckets_[index]); } } #endif /********************************************************************** * choose_nth_item * * Returns the index of what would b the nth item in the array * if the members were sorted, without actually sorting. **********************************************************************/ inT32 choose_nth_item(inT32 index, float *array, inT32 count) { inT32 next_sample; // next one to do inT32 next_lesser; // space for new inT32 prev_greater; // last one saved inT32 equal_count; // no of equal ones float pivot; // proposed median float sample; // current sample if (count <= 1) return 0; if (count == 2) { if (array[0] < array[1]) { return index >= 1 ? 1 : 0; } else { return index >= 1 ? 0 : 1; } } else { if (index < 0) index = 0; // ensure legal else if (index >= count) index = count - 1; equal_count = (inT32) (rand() % count); pivot = array[equal_count]; // fill gap array[equal_count] = array[0]; next_lesser = 0; prev_greater = count; equal_count = 1; for (next_sample = 1; next_sample < prev_greater;) { sample = array[next_sample]; if (sample < pivot) { // shuffle array[next_lesser++] = sample; next_sample++; } else if (sample > pivot) { prev_greater--; // juggle array[next_sample] = array[prev_greater]; array[prev_greater] = sample; } else { equal_count++; next_sample++; } } for (next_sample = next_lesser; next_sample < prev_greater;) array[next_sample++] = pivot; if (index < next_lesser) return choose_nth_item (index, array, next_lesser); else if (index < prev_greater) return next_lesser; // in equal bracket else return choose_nth_item (index - prev_greater, array + prev_greater, count - prev_greater) + prev_greater; } } /********************************************************************** * choose_nth_item * * Returns the index of what would be the nth item in the array * if the members were sorted, without actually sorting. **********************************************************************/ inT32 choose_nth_item(inT32 index, void *array, inT32 count, size_t size, int (*compar)(const void*, const void*)) { int result; // of compar inT32 next_sample; // next one to do inT32 next_lesser; // space for new inT32 prev_greater; // last one saved inT32 equal_count; // no of equal ones inT32 pivot; // proposed median if (count <= 1) return 0; if (count == 2) { if (compar (array, (char *) array + size) < 0) { return index >= 1 ? 1 : 0; } else { return index >= 1 ? 0 : 1; } } if (index < 0) index = 0; // ensure legal else if (index >= count) index = count - 1; pivot = (inT32) (rand () % count); swap_entries (array, size, pivot, 0); next_lesser = 0; prev_greater = count; equal_count = 1; for (next_sample = 1; next_sample < prev_greater;) { result = compar ((char *) array + size * next_sample, (char *) array + size * next_lesser); if (result < 0) { swap_entries (array, size, next_lesser++, next_sample++); // shuffle } else if (result > 0) { prev_greater--; swap_entries(array, size, prev_greater, next_sample); } else { equal_count++; next_sample++; } } if (index < next_lesser) return choose_nth_item (index, array, next_lesser, size, compar); else if (index < prev_greater) return next_lesser; // in equal bracket else return choose_nth_item (index - prev_greater, (char *) array + size * prev_greater, count - prev_greater, size, compar) + prev_greater; } /********************************************************************** * swap_entries * * Swap 2 entries of arbitrary size in-place in a table. **********************************************************************/ void swap_entries(void *array, // array of entries size_t size, // size of entry inT32 index1, // entries to swap inT32 index2) { char tmp; char *ptr1; // to entries char *ptr2; size_t count; // of bytes ptr1 = reinterpret_cast<char*>(array) + index1 * size; ptr2 = reinterpret_cast<char*>(array) + index2 * size; for (count = 0; count < size; count++) { tmp = *ptr1; *ptr1++ = *ptr2; *ptr2++ = tmp; // tedious! } }
27,437
8,664
#include<stdio.h> #include<stdlib.h> #include<string.h> int n; char word[2000][10], sorted[2000][10]; int cmp_char(const void* _a, const void* _b) { char* a = (char*)_a; char* b = (char*)_b; return *a - *b; } int cmp_string(const void* _a, const void* _b) { char* a = (char*)_a; char* b = (char*)_b; return strcmp(a, b); } int main() { n = 0; for(;;) { scanf("%s", word[n]); if(word[n][0] == 'X') break; n++; } qsort(word, n, sizeof(word[0]), cmp_string); for(int i = 0; i < n; i++) { strcpy(sorted[i], word[i]); qsort(sorted[i], strlen(sorted[i]), sizeof(char), cmp_char); } char s[10]; while(scanf("%s", s) == 1) { if(s[0] == 'X') break; qsort(s, strlen(s), sizeof(char), cmp_char); int found = 0; for(int i = 0; i < n; i++) if(strcmp(sorted[i], s) == 0) { found = 1; printf("%s\n", word[i]); } if(!found) printf("NOT A VALID WORD\n"); printf("******\n"); } return 0; }
980
458
#include <cmath> #include <iomanip> #include <iostream> #include <limits> #include <random> #include <string> #include <cblas.h> #include <halide_blas.h> #include "Halide.h" #define RUN_TEST(method) \ std::cout << std::setw(30) << ("Testing " #method ": ") << std::flush; \ if (test_##method(N)) { \ std::cout << "PASSED\n"; \ } \ #define L1_VECTOR_TEST(method, code) \ bool test_##method(int N) { \ Scalar alpha = random_scalar(); \ Vector ex(random_vector(N)); \ Vector ey(random_vector(N)); \ Vector ax(ex), ay(ey); \ \ { \ Scalar *x = &(ex[0]); \ Scalar *y = &(ey[0]); \ cblas_##code; \ } \ \ { \ Scalar *x = &(ax[0]); \ Scalar *y = &(ay[0]); \ hblas_##code; \ } \ \ return compareVectors(N, ey, ay); \ } #define L1_SCALAR_TEST(method, code) \ bool test_##method(int N) { \ Scalar alpha = random_scalar(); \ Vector ex(random_vector(N)); \ Vector ey(random_vector(N)); \ Vector ax(ex), ay(ey); \ Scalar er, ar; \ \ { \ Scalar *x = &(ex[0]); \ Scalar *y = &(ey[0]); \ er = cblas_##code; \ } \ \ { \ Scalar *x = &(ax[0]); \ Scalar *y = &(ay[0]); \ ar = hblas_##code; \ } \ \ return compareScalars(er, ar); \ } #define L2_TEST(method, cblas_code, hblas_code) \ bool test_##method(int N) { \ Scalar alpha = random_scalar(); \ Scalar beta = random_scalar(); \ Vector ex(random_vector(N)); \ Vector ey(random_vector(N)); \ Matrix eA(random_matrix(N)); \ Vector ax(ex), ay(ey); \ Matrix aA(eA); \ \ { \ Scalar *x = &(ex[0]); \ Scalar *y = &(ey[0]); \ Scalar *A = &(eA[0]); \ cblas_code; \ } \ \ { \ Scalar *x = &(ax[0]); \ Scalar *y = &(ay[0]); \ Scalar *A = &(aA[0]); \ hblas_code; \ } \ \ return compareVectors(N, ey, ay); \ } #define L3_TEST(method, cblas_code, hblas_code) \ bool test_##method(int N) { \ Scalar alpha = random_scalar(); \ Scalar beta = random_scalar(); \ Matrix eA(random_matrix(N)); \ Matrix eB(random_matrix(N)); \ Matrix eC(random_matrix(N)); \ Matrix aA(eA), aB(eB), aC(eC); \ \ { \ Scalar *A = &(eA[0]); \ Scalar *B = &(eB[0]); \ Scalar *C = &(eC[0]); \ cblas_code; \ } \ \ { \ Scalar *A = &(aA[0]); \ Scalar *B = &(aB[0]); \ Scalar *C = &(aC[0]); \ hblas_code; \ } \ \ return compareMatrices(N, eC, aC); \ } template<class T> struct BLASTestBase { typedef T Scalar; typedef std::vector<T> Vector; typedef std::vector<T> Matrix; std::random_device rand_dev; std::default_random_engine rand_eng; BLASTestBase() : rand_eng(rand_dev()) {} Scalar random_scalar() { std::uniform_real_distribution<T> uniform_dist(0.0, 1.0); return uniform_dist(rand_eng); } Vector random_vector(int N) { Vector buff(N); for (int i=0; i<N; ++i) { buff[i] = random_scalar(); } return buff; } Matrix random_matrix(int N) { Matrix buff(N * N); for (int i=0; i<N*N; ++i) { buff[i] = random_scalar(); } return buff; } bool compareScalars(Scalar x, Scalar y, Scalar epsilon = 4 * std::numeric_limits<Scalar>::epsilon()) { if (x == y) { return true; } else { const Scalar min_normal = std::numeric_limits<Scalar>::min(); Scalar ax = std::abs(x); Scalar ay = std::abs(y); Scalar diff = std::abs(x - y); bool equal = false; if (x == 0.0 || y == 0.0 || diff < min_normal) { equal = diff < (epsilon * min_normal); } else { equal = diff / (ax + ay) < epsilon; } if (!equal) { std::cerr << "FAIL! expected = " << x << ", actual = " << y << "\n"; } return equal; } } bool compareVectors(int N, const Vector &x, const Vector &y, Scalar epsilon = 16 * std::numeric_limits<Scalar>::epsilon()) { bool equal = true; for (int i = 0; i < N; ++i) { if (!compareScalars(x[i], y[i], epsilon)) { std::cerr << "Vectors differ at index: " << i << "\n"; equal = false; break; } } return equal; } bool compareMatrices(int N, const Matrix &A, const Matrix &B, Scalar epsilon = 16 * std::numeric_limits<Scalar>::epsilon()) { bool equal = true; for (int i = 0; i < N*N; ++i) { if (!compareScalars(A[i], A[i], epsilon)) { std::cerr << "Matrices differ at coords: (" << i%N << ", " << i/N << ")\n"; equal = false; break; } } return equal; } }; struct BLASFloatTests : public BLASTestBase<float> { void run_tests(int N) { RUN_TEST(scopy); RUN_TEST(sscal); RUN_TEST(saxpy); RUN_TEST(sdot); RUN_TEST(sasum); RUN_TEST(sgemv_notrans); RUN_TEST(sgemv_trans); RUN_TEST(sger); RUN_TEST(sgemm_notrans); RUN_TEST(sgemm_transA); RUN_TEST(sgemm_transB); RUN_TEST(sgemm_transAB); } L1_VECTOR_TEST(scopy, scopy(N, x, 1, y, 1)) L1_VECTOR_TEST(sscal, sscal(N, alpha, y, 1)) L1_VECTOR_TEST(saxpy, saxpy(N, alpha, x, 1, y, 1)) L1_SCALAR_TEST(sdot, sdot(N, x, 1, y, 1)) L1_SCALAR_TEST(sasum, sasum(N, x, 1)) L2_TEST(sgemv_notrans, cblas_sgemv(CblasColMajor, CblasNoTrans, N, N, alpha, A, N, x, 1, beta, y, 1), hblas_sgemv(HblasColMajor, HblasNoTrans, N, N, alpha, A, N, x, 1, beta, y, 1)); L2_TEST(sgemv_trans, cblas_sgemv(CblasColMajor, CblasTrans, N, N, alpha, A, N, x, 1, beta, y, 1), hblas_sgemv(HblasColMajor, HblasTrans, N, N, alpha, A, N, x, 1, beta, y, 1)); L2_TEST(sger, cblas_sger(CblasColMajor, N, N, alpha, x, 1, y, 1, A, N), hblas_sger(HblasColMajor, N, N, alpha, x, 1, y, 1, A, N)); L3_TEST(sgemm_notrans, cblas_sgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, N, N, N, alpha, A, N, B, N, beta, C, N), hblas_sgemm(HblasColMajor, HblasNoTrans, HblasNoTrans, N, N, N, alpha, A, N, B, N, beta, C, N)); L3_TEST(sgemm_transA, cblas_sgemm(CblasColMajor, CblasTrans, CblasNoTrans, N, N, N, alpha, A, N, B, N, beta, C, N), hblas_sgemm(HblasColMajor, HblasTrans, HblasNoTrans, N, N, N, alpha, A, N, B, N, beta, C, N)); L3_TEST(sgemm_transB, cblas_sgemm(CblasColMajor, CblasNoTrans, CblasTrans, N, N, N, alpha, A, N, B, N, beta, C, N), hblas_sgemm(HblasColMajor, HblasNoTrans, HblasTrans, N, N, N, alpha, A, N, B, N, beta, C, N)); L3_TEST(sgemm_transAB, cblas_sgemm(CblasColMajor, CblasTrans, CblasTrans, N, N, N, alpha, A, N, B, N, beta, C, N), hblas_sgemm(HblasColMajor, HblasTrans, HblasTrans, N, N, N, alpha, A, N, B, N, beta, C, N)); }; struct BLASDoubleTests : public BLASTestBase<double> { void run_tests(int N) { RUN_TEST(dcopy); RUN_TEST(dscal); RUN_TEST(daxpy); RUN_TEST(ddot); RUN_TEST(dasum); RUN_TEST(dgemv_notrans); RUN_TEST(dgemv_trans); RUN_TEST(dger); RUN_TEST(dgemm_notrans); RUN_TEST(dgemm_transA); RUN_TEST(dgemm_transB); RUN_TEST(dgemm_transAB); } L1_VECTOR_TEST(dcopy, dcopy(N, x, 1, y, 1)) L1_VECTOR_TEST(dscal, dscal(N, alpha, y, 1)) L1_VECTOR_TEST(daxpy, daxpy(N, alpha, x, 1, y, 1)) L1_SCALAR_TEST(ddot, ddot(N, x, 1, y, 1)) L1_SCALAR_TEST(dasum, dasum(N, x, 1)) L2_TEST(dgemv_notrans, cblas_dgemv(CblasColMajor, CblasNoTrans, N, N, alpha, A, N, x, 1, beta, y, 1), hblas_dgemv(HblasColMajor, HblasNoTrans, N, N, alpha, A, N, x, 1, beta, y, 1)); L2_TEST(dgemv_trans, cblas_dgemv(CblasColMajor, CblasTrans, N, N, alpha, A, N, x, 1, beta, y, 1), hblas_dgemv(HblasColMajor, HblasTrans, N, N, alpha, A, N, x, 1, beta, y, 1)); L2_TEST(dger, cblas_dger(CblasColMajor, N, N, alpha, x, 1, y, 1, A, N), hblas_dger(HblasColMajor, N, N, alpha, x, 1, y, 1, A, N)); L3_TEST(dgemm_notrans, cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, N, N, N, alpha, A, N, B, N, beta, C, N), hblas_dgemm(HblasColMajor, HblasNoTrans, HblasNoTrans, N, N, N, alpha, A, N, B, N, beta, C, N)); L3_TEST(dgemm_transA, cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, N, N, N, alpha, A, N, B, N, beta, C, N), hblas_dgemm(HblasColMajor, HblasTrans, HblasNoTrans, N, N, N, alpha, A, N, B, N, beta, C, N)); L3_TEST(dgemm_transB, cblas_dgemm(CblasColMajor, CblasNoTrans, CblasTrans, N, N, N, alpha, A, N, B, N, beta, C, N), hblas_dgemm(HblasColMajor, HblasNoTrans, HblasTrans, N, N, N, alpha, A, N, B, N, beta, C, N)); L3_TEST(dgemm_transAB, cblas_dgemm(CblasColMajor, CblasTrans, CblasTrans, N, N, N, alpha, A, N, B, N, beta, C, N), hblas_dgemm(HblasColMajor, HblasTrans, HblasTrans, N, N, N, alpha, A, N, B, N, beta, C, N)); }; int main(int argc, char *argv[]) { BLASFloatTests s; BLASDoubleTests d; if (argc > 1) { for (int i = 1; i < argc; ++i) { int size = std::stoi(argv[i]); std::cout << "Testing halide_blas with N = " << size << ":\n"; s.run_tests(size); d.run_tests(size); } } else { int size = 277; std::cout << "Testing halide_blas with N = " << size << ":\n"; s.run_tests(size); d.run_tests(size); } }
12,440
4,413
#include "stdafx.h" #include "SRatingBar.h" namespace SOUI { SRatingBar::SRatingBar(void):m_pStar(NULL),m_nStars(5),m_fValue(0.0f) { } SRatingBar::~SRatingBar(void) { } void SRatingBar::OnPaint(IRenderTarget *pRT) { CRect rcClient = GetClientRect(); int nWid = (int)(rcClient.Width()*m_fValue/m_nStars); CRect rcFore = rcClient; rcFore.right = rcFore.left + nWid; pRT->PushClipRect(rcFore); DrawStars(pRT,rcClient,TRUE); pRT->PopClip(); CRect rcBack = rcClient; rcBack.left = rcFore.right; pRT->PushClipRect(rcBack); DrawStars(pRT,rcClient,FALSE); pRT->PopClip(); } CSize SRatingBar::GetDesiredSize(LPCRECT pRcContainer) { if (!m_pStar) return CSize(16,16); CSize szStar = m_pStar->GetSkinSize(); szStar.cx *= m_nStars; return szStar; } void SRatingBar::DrawStars(IRenderTarget *pRT,CRect rc,BOOL bForeground) { if (!m_pStar) return; CSize szStar = rc.Size(); szStar.cx/=m_nStars; CRect rcStar(rc.TopLeft(),szStar); for(int i=0;i<m_nStars;i++) { m_pStar->Draw(pRT,rcStar,bForeground?0:1); rcStar.OffsetRect(szStar.cx,0); } } void SRatingBar::SetValue(float fValue) { m_fValue = fValue; if(m_fValue>(float)m_nStars) m_fValue = (float)m_nStars; Invalidate(); } }
1,476
606
#include <iostream> #include <fstream> #include <sstream> #include <cstdlib> #include "tpbst.hpp" /* * Case 19 : Empty tree, insert many items, create primary node with empty secondary tree, remove item with two children, print. */ int main() { TwoPhaseBST<int> tpbst; tpbst.insert("ceng351", "sec2", 32) .insert("ceng351", "sec1", 31) .insert("ceng351", "sec3", 33) .insert("ceng351", "sec4", 34) .insert("ceng213", "sec1", 21) .remove("ceng213", "sec1") .insert("ceng435", "sec1", 41) .insert("ceng435", "sec2", 42) .insert("ceng435", "sec4", 44) .insert("ceng435", "sec3", 43) .insert("ceng477", "sec3", 73) .insert("ceng477", "sec4", 74) .insert("ceng477", "sec1", 71) .insert("ceng477", "sec2", 72) .insert("ceng453", "sec1", 91) .remove("ceng351", "sec2") .remove("ceng477", "sec3") .print(); return 0; }
1,033
423
/* * Copyright (C) 2004-2015 ZNC, see the NOTICE file for details. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <znc/FileUtils.h> #include <znc/Utils.h> #include <znc/MD5.h> #include <znc/SHA256.h> #include <sstream> using std::stringstream; CString::CString(char c) : string() { stringstream s; s << c; *this = s.str(); } CString::CString(unsigned char c) : string() { stringstream s; s << c; *this = s.str(); } CString::CString(short i) : string() { stringstream s; s << i; *this = s.str(); } CString::CString(unsigned short i) : string() { stringstream s; s << i; *this = s.str(); } CString::CString(int i) : string() { stringstream s; s << i; *this = s.str(); } CString::CString(unsigned int i) : string() { stringstream s; s << i; *this = s.str(); } CString::CString(long i) : string() { stringstream s; s << i; *this = s.str(); } CString::CString(unsigned long i) : string() { stringstream s; s << i; *this = s.str(); } CString::CString(long long i) : string() { stringstream s; s << i; *this = s.str(); } CString::CString(unsigned long long i) : string() { stringstream s; s << i; *this = s.str(); } CString::CString(double i, int precision) : string() { stringstream s; s.precision(precision); s << std::fixed << i; *this = s.str(); } CString::CString(float i, int precision) : string() { stringstream s; s.precision(precision); s << std::fixed << i; *this = s.str(); } unsigned char* CString::strnchr(const unsigned char* src, unsigned char c, unsigned int iMaxBytes, unsigned char* pFill, unsigned int* piCount) const { for (unsigned int a = 0; a < iMaxBytes && *src; a++, src++) { if (pFill) { pFill[a] = *src; } if (*src == c) { if (pFill) { pFill[a +1] = 0; } if (piCount) { *piCount = a; } return (unsigned char*) src; } } if (pFill) { *pFill = 0; } if (piCount) { *piCount = 0; } return NULL; } int CString::CaseCmp(const CString& s, CString::size_type uLen) const { if (uLen != CString::npos) { return strncasecmp(c_str(), s.c_str(), uLen); } return strcasecmp(c_str(), s.c_str()); } int CString::StrCmp(const CString& s, CString::size_type uLen) const { if (uLen != CString::npos) { return strncmp(c_str(), s.c_str(), uLen); } return strcmp(c_str(), s.c_str()); } bool CString::Equals(const CString& s, CaseSensitivity cs) const { if (cs == CaseSensitive) { return (StrCmp(s) == 0); } else { return (CaseCmp(s) == 0); } } bool CString::Equals(const CString& s, bool bCaseSensitive, CString::size_type uLen) const { if (bCaseSensitive) { return (StrCmp(s, uLen) == 0); } else { return (CaseCmp(s, uLen) == 0); } } bool CString::WildCmp(const CString& sWild, const CString& sString) { // Written by Jack Handy - jakkhandy@hotmail.com const char *wild = sWild.c_str(), *CString = sString.c_str(); const char *cp = NULL, *mp = NULL; while ((*CString) && (*wild != '*')) { if ((*wild != *CString) && (*wild != '?')) { return false; } wild++; CString++; } while (*CString) { if (*wild == '*') { if (!*++wild) { return true; } mp = wild; cp = CString+1; } else if ((*wild == *CString) || (*wild == '?')) { wild++; CString++; } else { wild = mp; CString = cp++; } } while (*wild == '*') { wild++; } return (*wild == 0); } bool CString::WildCmp(const CString& sWild) const { return CString::WildCmp(sWild, *this); } CString& CString::MakeUpper() { for (size_type a = 0; a < length(); a++) { char& c = (*this)[a]; //TODO use unicode c = (char)toupper(c); } return *this; } CString& CString::MakeLower() { for (size_type a = 0; a < length(); a++) { char& c = (*this)[a]; //TODO use unicode c = (char)tolower(c); } return *this; } CString CString::AsUpper() const { CString sRet = *this; sRet.MakeUpper(); return sRet; } CString CString::AsLower() const { CString sRet = *this; sRet.MakeLower(); return sRet; } CString::EEscape CString::ToEscape(const CString& sEsc) { if (sEsc.Equals("ASCII")) { return EASCII; } else if (sEsc.Equals("HTML")) { return EHTML; } else if (sEsc.Equals("URL")) { return EURL; } else if (sEsc.Equals("SQL")) { return ESQL; } else if (sEsc.Equals("NAMEDFMT")) { return ENAMEDFMT; } else if (sEsc.Equals("DEBUG")) { return EDEBUG; } else if (sEsc.Equals("MSGTAG")) { return EMSGTAG; } else if (sEsc.Equals("HEXCOLON")) { return EHEXCOLON; } return EASCII; } CString CString::Escape_n(EEscape eFrom, EEscape eTo) const { CString sRet; const char szHex[] = "0123456789ABCDEF"; const unsigned char *pStart = (const unsigned char*) data(); const unsigned char *p = (const unsigned char*) data(); size_type iLength = length(); sRet.reserve(iLength *3); unsigned char pTmp[21]; unsigned int iCounted = 0; for (unsigned int a = 0; a < iLength; a++, p = pStart + a) { unsigned char ch = 0; switch (eFrom) { case EHTML: if ((*p == '&') && (strnchr((unsigned char*) p, ';', sizeof(pTmp) - 1, pTmp, &iCounted))) { // please note that we do not have any Unicode or UTF-8 support here at all. if ((iCounted >= 3) && (pTmp[1] == '#')) { // do XML and HTML &#97; &#x3c int base = 10; if ((pTmp[2] & 0xDF) == 'X') { base = 16; } char* endptr = NULL; unsigned long int b = strtol((const char*) (pTmp +2 + (base == 16)), &endptr, base); if ((*endptr == ';') && (b <= 255)) { // incase they do something like &#7777777777; ch = (unsigned char)b; a += iCounted; break; } } if (ch == 0) { if (!strncasecmp((const char*) &pTmp, "&lt;", 2)) ch = '<'; else if (!strncasecmp((const char*) &pTmp, "&gt;", 2)) ch = '>'; else if (!strncasecmp((const char*) &pTmp, "&quot;", 4)) ch = '"'; else if (!strncasecmp((const char*) &pTmp, "&amp;", 3)) ch = '&'; } if (ch > 0) { a += iCounted; } else { ch = *p; // Not a valid escape, just record the & } } else { ch = *p; } break; case EASCII: ch = *p; break; case EURL: if (*p == '%' && (a +2) < iLength && isxdigit(*(p +1)) && isxdigit(*(p +2))) { p++; if (isdigit(*p)) { ch = (unsigned char)((*p - '0') << 4); } else { ch = (unsigned char)((tolower(*p) - 'a' +10) << 4); } p++; if (isdigit(*p)) { ch |= (unsigned char)(*p - '0'); } else { ch |= (unsigned char)(tolower(*p) - 'a' +10); } a += 2; } else if (pStart[a] == '+') { ch = ' '; } else { ch = *p; } break; case ESQL: if (*p != '\\' || iLength < (a +1)) { ch = *p; } else { a++; p++; if (*p == 'n') { ch = '\n'; } else if (*p == 'r') { ch = '\r'; } else if (*p == '0') { ch = '\0'; } else if (*p == 't') { ch = '\t'; } else if (*p == 'b') { ch = '\b'; } else { ch = *p; } } break; case ENAMEDFMT: if (*p != '\\' || iLength < (a +1)) { ch = *p; } else { a++; p++; ch = *p; } break; case EDEBUG: if (*p == '\\' && (a +3) < iLength && *(p +1) == 'x' && isxdigit(*(p +2)) && isxdigit(*(p +3))) { p += 2; if (isdigit(*p)) { ch = (unsigned char)((*p - '0') << 4); } else { ch = (unsigned char)((tolower(*p) - 'a' +10) << 4); } p++; if (isdigit(*p)) { ch |= (unsigned char)(*p - '0'); } else { ch |= (unsigned char)(tolower(*p) - 'a' +10); } a += 3; } else if (*p == '\\' && a+1 < iLength && *(p+1) == '.') { a++; p++; ch = '\\'; } else { ch = *p; } break; case EMSGTAG: if (*p != '\\' || iLength < (a +1)) { ch = *p; } else { a++; p++; if (*p == ':') { ch = ';'; } else if (*p == 's') { ch = ' '; } else if (*p == '0') { ch = '\0'; } else if (*p == '\\') { ch = '\\'; } else if (*p == 'r') { ch = '\r'; } else if (*p == 'n') { ch = '\n'; } else { ch = *p; } } break; case EHEXCOLON: { while (!isxdigit(*p) && a < iLength) { a++; p++; } if (a == iLength) { continue; } if (isdigit(*p)) { ch = (unsigned char)((*p - '0') << 4); } else { ch = (unsigned char)((tolower(*p) - 'a' +10) << 4); } a++; p++; while (!isxdigit(*p) && a < iLength) { a++; p++; } if (a == iLength) { continue; } if (isdigit(*p)) { ch |= (unsigned char)(*p - '0'); } else { ch |= (unsigned char)(tolower(*p) - 'a' +10); } } break; } switch (eTo) { case EHTML: if (ch == '<') sRet += "&lt;"; else if (ch == '>') sRet += "&gt;"; else if (ch == '"') sRet += "&quot;"; else if (ch == '&') sRet += "&amp;"; else { sRet += ch; } break; case EASCII: sRet += ch; break; case EURL: if (isalnum(ch) || ch == '_' || ch == '.' || ch == '-') { sRet += ch; } else if (ch == ' ') { sRet += '+'; } else { sRet += '%'; sRet += szHex[ch >> 4]; sRet += szHex[ch & 0xf]; } break; case ESQL: if (ch == '\0') { sRet += '\\'; sRet += '0'; } else if (ch == '\n') { sRet += '\\'; sRet += 'n'; } else if (ch == '\t') { sRet += '\\'; sRet += 't'; } else if (ch == '\r') { sRet += '\\'; sRet += 'r'; } else if (ch == '\b') { sRet += '\\'; sRet += 'b'; } else if (ch == '\"') { sRet += '\\'; sRet += '\"'; } else if (ch == '\'') { sRet += '\\'; sRet += '\''; } else if (ch == '\\') { sRet += '\\'; sRet += '\\'; } else { sRet += ch; } break; case ENAMEDFMT: if (ch == '\\') { sRet += '\\'; sRet += '\\'; } else if (ch == '{') { sRet += '\\'; sRet += '{'; } else if (ch == '}') { sRet += '\\'; sRet += '}'; } else { sRet += ch; } break; case EDEBUG: if (ch < 0x20 || ch == 0x7F) { sRet += "\\x"; sRet += szHex[ch >> 4]; sRet += szHex[ch & 0xf]; } else if (ch == '\\') { sRet += "\\."; } else { sRet += ch; } break; case EMSGTAG: if (ch == ';') { sRet += '\\'; sRet += ':'; } else if (ch == ' ') { sRet += '\\'; sRet += 's'; } else if (ch == '\0') { sRet += '\\'; sRet += '0'; } else if (ch == '\\') { sRet += '\\'; sRet += '\\'; } else if (ch == '\r') { sRet += '\\'; sRet += 'r'; } else if (ch == '\n') { sRet += '\\'; sRet += 'n'; } else { sRet += ch; } break; case EHEXCOLON: { sRet += tolower(szHex[ch >> 4]); sRet += tolower(szHex[ch & 0xf]); sRet += ":"; } break; } } if (eTo == EHEXCOLON) { sRet.TrimRight(":"); } return sRet; } CString CString::Escape_n(EEscape eTo) const { return Escape_n(EASCII, eTo); } CString& CString::Escape(EEscape eFrom, EEscape eTo) { return (*this = Escape_n(eFrom, eTo)); } CString& CString::Escape(EEscape eTo) { return (*this = Escape_n(eTo)); } CString CString::Replace_n(const CString& sReplace, const CString& sWith, const CString& sLeft, const CString& sRight, bool bRemoveDelims) const { CString sRet = *this; CString::Replace(sRet, sReplace, sWith, sLeft, sRight, bRemoveDelims); return sRet; } unsigned int CString::Replace(const CString& sReplace, const CString& sWith, const CString& sLeft, const CString& sRight, bool bRemoveDelims) { return CString::Replace(*this, sReplace, sWith, sLeft, sRight, bRemoveDelims); } unsigned int CString::Replace(CString& sStr, const CString& sReplace, const CString& sWith, const CString& sLeft, const CString& sRight, bool bRemoveDelims) { unsigned int uRet = 0; CString sCopy = sStr; sStr.clear(); size_type uReplaceWidth = sReplace.length(); size_type uLeftWidth = sLeft.length(); size_type uRightWidth = sRight.length(); const char* p = sCopy.c_str(); bool bInside = false; while (*p) { if (!bInside && uLeftWidth && strncmp(p, sLeft.c_str(), uLeftWidth) == 0) { if (!bRemoveDelims) { sStr += sLeft; } p += uLeftWidth -1; bInside = true; } else if (bInside && uRightWidth && strncmp(p, sRight.c_str(), uRightWidth) == 0) { if (!bRemoveDelims) { sStr += sRight; } p += uRightWidth -1; bInside = false; } else if (!bInside && strncmp(p, sReplace.c_str(), uReplaceWidth) == 0) { sStr += sWith; p += uReplaceWidth -1; uRet++; } else { sStr.append(p, 1); } p++; } return uRet; } CString CString::Token(size_t uPos, bool bRest, const CString& sSep, bool bAllowEmpty, const CString& sLeft, const CString& sRight, bool bTrimQuotes) const { VCString vsTokens; if (Split(sSep, vsTokens, bAllowEmpty, sLeft, sRight, bTrimQuotes) > uPos) { CString sRet; for (size_t a = uPos; a < vsTokens.size(); a++) { if (a > uPos) { sRet += sSep; } sRet += vsTokens[a]; if (!bRest) { break; } } return sRet; } return Token(uPos, bRest, sSep, bAllowEmpty); } CString CString::Token(size_t uPos, bool bRest, const CString& sSep, bool bAllowEmpty) const { const char *sep_str = sSep.c_str(); size_t sep_len = sSep.length(); const char *str = c_str(); size_t str_len = length(); size_t start_pos = 0; size_t end_pos; if (!bAllowEmpty) { while (strncmp(&str[start_pos], sep_str, sep_len) == 0) { start_pos += sep_len; } } // First, find the start of our token while (uPos != 0 && start_pos < str_len) { bool bFoundSep = false; while (strncmp(&str[start_pos], sep_str, sep_len) == 0 && (!bFoundSep || !bAllowEmpty)) { start_pos += sep_len; bFoundSep = true; } if (bFoundSep) { uPos--; } else { start_pos++; } } // String is over? if (start_pos >= str_len) return ""; // If they want everything from here on, give it to them if (bRest) { return substr(start_pos); } // Now look for the end of the token they want end_pos = start_pos; while (end_pos < str_len) { if (strncmp(&str[end_pos], sep_str, sep_len) == 0) return substr(start_pos, end_pos - start_pos); end_pos++; } // They want the last token in the string, not something in between return substr(start_pos); } CString CString::Ellipsize(unsigned int uLen) const { if (uLen >= size()) { return *this; } string sRet; // @todo this looks suspect if (uLen < 4) { for (unsigned int a = 0; a < uLen; a++) { sRet += "."; } return sRet; } sRet = substr(0, uLen -3) + "..."; return sRet; } CString CString::Left(size_type uCount) const { uCount = (uCount > length()) ? length() : uCount; return substr(0, uCount); } CString CString::Right(size_type uCount) const { uCount = (uCount > length()) ? length() : uCount; return substr(length() - uCount, uCount); } CString::size_type CString::URLSplit(MCString& msRet) const { msRet.clear(); VCString vsPairs; Split("&", vsPairs); for (size_t a = 0; a < vsPairs.size(); a++) { const CString& sPair = vsPairs[a]; msRet[sPair.Token(0, false, "=").Escape(CString::EURL, CString::EASCII)] = sPair.Token(1, true, "=").Escape(CString::EURL, CString::EASCII); } return msRet.size(); } CString::size_type CString::OptionSplit(MCString& msRet, bool bUpperKeys) const { CString sName; CString sCopy(*this); msRet.clear(); while (!sCopy.empty()) { sName = sCopy.Token(0, false, "=", false, "\"", "\"", false).Trim_n(); sCopy = sCopy.Token(1, true, "=", false, "\"", "\"", false).TrimLeft_n(); if (sName.empty()) { continue; } VCString vsNames; sName.Split(" ", vsNames, false, "\"", "\""); for (unsigned int a = 0; a < vsNames.size(); a++) { CString sKeyName = vsNames[a]; if (bUpperKeys) { sKeyName.MakeUpper(); } if ((a +1) == vsNames.size()) { msRet[sKeyName] = sCopy.Token(0, false, " ", false, "\"", "\""); sCopy = sCopy.Token(1, true, " ", false, "\"", "\"", false); } else { msRet[sKeyName] = ""; } } } return msRet.size(); } CString::size_type CString::QuoteSplit(VCString& vsRet) const { vsRet.clear(); return Split(" ", vsRet, false, "\"", "\"", true); } CString::size_type CString::Split(const CString& sDelim, VCString& vsRet, bool bAllowEmpty, const CString& sLeft, const CString& sRight, bool bTrimQuotes, bool bTrimWhiteSpace) const { vsRet.clear(); if (empty()) { return 0; } CString sTmp; bool bInside = false; size_type uDelimLen = sDelim.length(); size_type uLeftLen = sLeft.length(); size_type uRightLen = sRight.length(); const char* p = c_str(); if (!bAllowEmpty) { while (strncasecmp(p, sDelim.c_str(), uDelimLen) == 0) { p += uDelimLen; } } while (*p) { if (uLeftLen && uRightLen && !bInside && strncasecmp(p, sLeft.c_str(), uLeftLen) == 0) { if (!bTrimQuotes) { sTmp += sLeft; } p += uLeftLen; bInside = true; continue; } if (uLeftLen && uRightLen && bInside && strncasecmp(p, sRight.c_str(), uRightLen) == 0) { if (!bTrimQuotes) { sTmp += sRight; } p += uRightLen; bInside = false; continue; } if (uDelimLen && !bInside && strncasecmp(p, sDelim.c_str(), uDelimLen) == 0) { if (bTrimWhiteSpace) { sTmp.Trim(); } vsRet.push_back(sTmp); sTmp.clear(); p += uDelimLen; if (!bAllowEmpty) { while (strncasecmp(p, sDelim.c_str(), uDelimLen) == 0) { p += uDelimLen; } } bInside = false; continue; } else { sTmp += *p; } p++; } if (!sTmp.empty()) { if (bTrimWhiteSpace) { sTmp.Trim(); } vsRet.push_back(sTmp); } return vsRet.size(); } CString::size_type CString::Split(const CString& sDelim, SCString& ssRet, bool bAllowEmpty, const CString& sLeft, const CString& sRight, bool bTrimQuotes, bool bTrimWhiteSpace) const { VCString vsTokens; Split(sDelim, vsTokens, bAllowEmpty, sLeft, sRight, bTrimQuotes, bTrimWhiteSpace); ssRet.clear(); for (size_t a = 0; a < vsTokens.size(); a++) { ssRet.insert(vsTokens[a]); } return ssRet.size(); } CString CString::NamedFormat(const CString& sFormat, const MCString& msValues) { CString sRet; CString sKey; bool bEscape = false; bool bParam = false; const char* p = sFormat.c_str(); while (*p) { if (!bParam) { if (bEscape) { sRet += *p; bEscape = false; } else if (*p == '\\') { bEscape = true; } else if (*p == '{') { bParam = true; sKey.clear(); } else { sRet += *p; } } else { if (bEscape) { sKey += *p; bEscape = false; } else if (*p == '\\') { bEscape = true; } else if (*p == '}') { bParam = false; MCString::const_iterator it = msValues.find(sKey); if (it != msValues.end()) { sRet += (*it).second; } } else { sKey += *p; } } p++; } return sRet; } CString CString::RandomString(unsigned int uLength) { const char chars[] = "abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789!?.,:;/*-+_()"; // -1 because sizeof() includes the trailing '\0' byte const size_t len = sizeof(chars) / sizeof(chars[0]) - 1; size_t p; CString sRet; for (unsigned int a = 0; a < uLength; a++) { p = (size_t) (len * (rand() / (RAND_MAX + 1.0))); sRet += chars[p]; } return sRet; } bool CString::Base64Encode(unsigned int uWrap) { CString sCopy(*this); return sCopy.Base64Encode(*this, uWrap); } unsigned long CString::Base64Decode() { CString sCopy(*this); return sCopy.Base64Decode(*this); } CString CString::Base64Encode_n(unsigned int uWrap) const { CString sRet; Base64Encode(sRet, uWrap); return sRet; } CString CString::Base64Decode_n() const { CString sRet; Base64Decode(sRet); return sRet; } bool CString::Base64Encode(CString& sRet, unsigned int uWrap) const { const char b64table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; sRet.clear(); size_t len = size(); const unsigned char* input = (const unsigned char*) c_str(); unsigned char *output, *p; size_t i = 0, mod = len % 3, toalloc; toalloc = (len / 3) * 4 + (3 - mod) % 3 + 1 + 8; if (uWrap) { toalloc += len / 57; if (len % 57) { toalloc++; } } if (toalloc < len) { return 0; } p = output = new unsigned char [toalloc]; while (i < len - mod) { *p++ = b64table[input[i++] >> 2]; *p++ = b64table[((input[i - 1] << 4) | (input[i] >> 4)) & 0x3f]; *p++ = b64table[((input[i] << 2) | (input[i + 1] >> 6)) & 0x3f]; *p++ = b64table[input[i + 1] & 0x3f]; i += 2; if (uWrap && !(i % 57)) { *p++ = '\n'; } } if (!mod) { if (uWrap && i % 57) { *p++ = '\n'; } } else { *p++ = b64table[input[i++] >> 2]; *p++ = b64table[((input[i - 1] << 4) | (input[i] >> 4)) & 0x3f]; if (mod == 1) { *p++ = '='; } else { *p++ = b64table[(input[i] << 2) & 0x3f]; } *p++ = '='; if (uWrap) { *p++ = '\n'; } } *p = 0; sRet = (char*) output; delete[] output; return true; } unsigned long CString::Base64Decode(CString& sRet) const { CString sTmp(*this); // remove new lines sTmp.Replace("\r", ""); sTmp.Replace("\n", ""); const char* in = sTmp.c_str(); char c, c1, *p; unsigned long i; unsigned long uLen = sTmp.size(); char* out = new char[uLen + 1]; for (i = 0, p = out; i < uLen; i++) { c = (char)base64_table[(unsigned char)in[i++]]; c1 = (char)base64_table[(unsigned char)in[i++]]; *p++ = char((c << 2) | ((c1 >> 4) & 0x3)); if (i < uLen) { if (in[i] == '=') { break; } c = (char)base64_table[(unsigned char)in[i]]; *p++ = char(((c1 << 4) & 0xf0) | ((c >> 2) & 0xf)); } if (++i < uLen) { if (in[i] == '=') { break; } *p++ = char(((c << 6) & 0xc0) | (char)base64_table[(unsigned char)in[i]]); } } *p = '\0'; unsigned long uRet = p - out; sRet.clear(); sRet.append(out, uRet); delete[] out; return uRet; } CString CString::MD5() const { return (const char*) CMD5(*this); } CString CString::SHA256() const { unsigned char digest[SHA256_DIGEST_SIZE]; char digest_hex[SHA256_DIGEST_SIZE * 2 + 1]; const unsigned char *message = (const unsigned char *) c_str(); sha256(message, length(), digest); snprintf(digest_hex, sizeof(digest_hex), "%02x%02x%02x%02x%02x%02x%02x%02x" "%02x%02x%02x%02x%02x%02x%02x%02x" "%02x%02x%02x%02x%02x%02x%02x%02x" "%02x%02x%02x%02x%02x%02x%02x%02x", digest[ 0], digest[ 1], digest[ 2], digest[ 3], digest[ 4], digest[ 5], digest[ 6], digest[ 7], digest[ 8], digest[ 9], digest[10], digest[11], digest[12], digest[13], digest[14], digest[15], digest[16], digest[17], digest[18], digest[19], digest[20], digest[21], digest[22], digest[23], digest[24], digest[25], digest[26], digest[27], digest[28], digest[29], digest[30], digest[31]); return digest_hex; } #ifdef HAVE_LIBSSL CString CString::Encrypt_n(const CString& sPass, const CString& sIvec) const { CString sRet; sRet.Encrypt(sPass, sIvec); return sRet; } CString CString::Decrypt_n(const CString& sPass, const CString& sIvec) const { CString sRet; sRet.Decrypt(sPass, sIvec); return sRet; } void CString::Encrypt(const CString& sPass, const CString& sIvec) { Crypt(sPass, true, sIvec); } void CString::Decrypt(const CString& sPass, const CString& sIvec) { Crypt(sPass, false, sIvec); } void CString::Crypt(const CString& sPass, bool bEncrypt, const CString& sIvec) { unsigned char szIvec[8] = {0,0,0,0,0,0,0,0}; BF_KEY bKey; if (sIvec.length() >= 8) { memcpy(szIvec, sIvec.data(), 8); } BF_set_key(&bKey, (unsigned int)sPass.length(), (unsigned char*) sPass.data()); unsigned int uPad = (length() % 8); if (uPad) { uPad = 8 - uPad; append(uPad, '\0'); } size_t uLen = length(); unsigned char* szBuff = (unsigned char*) malloc(uLen); BF_cbc_encrypt((const unsigned char*) data(), szBuff, uLen, &bKey, szIvec, ((bEncrypt) ? BF_ENCRYPT : BF_DECRYPT)); clear(); append((const char*) szBuff, uLen); free(szBuff); } #endif // HAVE_LIBSSL CString CString::ToPercent(double d) { char szRet[32]; snprintf(szRet, 32, "%.02f%%", d); return szRet; } CString CString::ToByteStr(unsigned long long d) { const unsigned long long KiB = 1024; const unsigned long long MiB = KiB * 1024; const unsigned long long GiB = MiB * 1024; const unsigned long long TiB = GiB * 1024; if (d > TiB) { return CString(d / TiB) + " TiB"; } else if (d > GiB) { return CString(d / GiB) + " GiB"; } else if (d > MiB) { return CString(d / MiB) + " MiB"; } else if (d > KiB) { return CString(d / KiB) + " KiB"; } return CString(d) + " B"; } CString CString::ToTimeStr(unsigned long s) { const unsigned long m = 60; const unsigned long h = m * 60; const unsigned long d = h * 24; const unsigned long w = d * 7; const unsigned long y = d * 365; CString sRet; #define TIMESPAN(time, str) \ if (s >= time) { \ sRet += CString(s / time) + str " "; \ s = s % time; \ } TIMESPAN(y, "y"); TIMESPAN(w, "w"); TIMESPAN(d, "d"); TIMESPAN(h, "h"); TIMESPAN(m, "m"); TIMESPAN(1, "s"); if (sRet.empty()) return "0s"; return sRet.RightChomp_n(); } bool CString::ToBool() const { CString sTrimmed = Trim_n(); return (!sTrimmed.Trim_n("0").empty() && !sTrimmed.Equals("false") && !sTrimmed.Equals("off") && !sTrimmed.Equals("no") && !sTrimmed.Equals("n")); } short CString::ToShort() const { return (short int)strtol(this->c_str(), (char**) NULL, 10); } unsigned short CString::ToUShort() const { return (unsigned short int)strtoul(this->c_str(), (char**) NULL, 10); } unsigned int CString::ToUInt() const { return (unsigned int)strtoul(this->c_str(), (char**) NULL, 10); } int CString::ToInt() const { return (int)strtol(this->c_str(), (char**) NULL, 10); } long CString::ToLong() const { return strtol(this->c_str(), (char**) NULL, 10); } unsigned long CString::ToULong() const { return strtoul(c_str(), NULL, 10); } unsigned long long CString::ToULongLong() const { return strtoull(c_str(), NULL, 10); } long long CString::ToLongLong() const { return strtoll(c_str(), NULL, 10); } double CString::ToDouble() const { return strtod(c_str(), NULL); } bool CString::Trim(const CString& s) { bool bLeft = TrimLeft(s); return (TrimRight(s) || bLeft); } bool CString::TrimLeft(const CString& s) { size_type i = find_first_not_of(s); if (i == 0) return false; if (i != npos) this->erase(0, i); else this->clear(); return true; } bool CString::TrimRight(const CString& s) { size_type i = find_last_not_of(s); if (i + 1 == length()) return false; if (i != npos) this->erase(i + 1, npos); else this->clear(); return true; } CString CString::Trim_n(const CString& s) const { CString sRet = *this; sRet.Trim(s); return sRet; } CString CString::TrimLeft_n(const CString& s) const { CString sRet = *this; sRet.TrimLeft(s); return sRet; } CString CString::TrimRight_n(const CString& s) const { CString sRet = *this; sRet.TrimRight(s); return sRet; } bool CString::TrimPrefix(const CString& sPrefix) { if (Equals(sPrefix, false, sPrefix.length())) { LeftChomp(sPrefix.length()); return true; } else { return false; } } bool CString::TrimSuffix(const CString& sSuffix) { if (Right(sSuffix.length()).Equals(sSuffix)) { RightChomp(sSuffix.length()); return true; } else { return false; } } size_t CString::Find(const CString& s, CaseSensitivity cs) const { if (cs == CaseSensitive) { return find(s); } else { return AsLower().find(s.AsLower()); } } bool CString::StartsWith(const CString& sPrefix, CaseSensitivity cs) const { return Left(sPrefix.length()).Equals(sPrefix, cs); } bool CString::EndsWith(const CString& sSuffix, CaseSensitivity cs) const { return Right(sSuffix.length()).Equals(sSuffix, cs); } bool CString::Contains(const CString& s, CaseSensitivity cs) const { return Find(s, cs) != npos; } CString CString::TrimPrefix_n(const CString& sPrefix) const { CString sRet = *this; sRet.TrimPrefix(sPrefix); return sRet; } CString CString::TrimSuffix_n(const CString& sSuffix) const { CString sRet = *this; sRet.TrimSuffix(sSuffix); return sRet; } CString CString::LeftChomp_n(size_type uLen) const { CString sRet = *this; sRet.LeftChomp(uLen); return sRet; } CString CString::RightChomp_n(size_type uLen) const { CString sRet = *this; sRet.RightChomp(uLen); return sRet; } bool CString::LeftChomp(size_type uLen) { bool bRet = false; while ((uLen--) && (length())) { erase(0, 1); bRet = true; } return bRet; } bool CString::RightChomp(size_type uLen) { bool bRet = false; while ((uLen--) && (length())) { erase(length() -1); bRet = true; } return bRet; } CString CString::StripControls_n() const { CString sRet; const unsigned char *pStart = (const unsigned char*) data(); unsigned char ch = *pStart; size_type iLength = length(); sRet.reserve(iLength); bool colorCode = false; unsigned int digits = 0; bool comma = false; for (unsigned int a = 0; a < iLength; a++, ch = pStart[a]) { // Color code. Format: \x03([0-9]{1,2}(,[0-9]{1,2})?)? if (ch == 0x03) { colorCode = true; digits = 0; comma = false; continue; } if (colorCode) { if (isdigit(ch) && digits < 2) { digits++; continue; } if (ch == ',' && !comma && digits > 0) { comma = true; digits = 0; continue; } colorCode = false; if (digits == 0 && comma) { // There was a ',' which wasn't followed by digits, we should print it. sRet += ','; } } // CO controls codes if (ch < 0x20 || ch == 0x7F) continue; sRet += ch; } if (colorCode && digits == 0 && comma) { sRet += ','; } sRet.reserve(0); return sRet; } CString& CString::StripControls() { return (*this = StripControls_n()); } //////////////// MCString //////////////// const MCString MCString::EmptyMap; MCString::status_t MCString::WriteToDisk(const CString& sPath, mode_t iMode) const { CFile cFile(sPath); if (this->empty()) { if (!cFile.Exists()) return MCS_SUCCESS; if (cFile.Delete()) return MCS_SUCCESS; } if (!cFile.Open(O_WRONLY|O_CREAT|O_TRUNC, iMode)) { return MCS_EOPEN; } for (MCString::const_iterator it = this->begin(); it != this->end(); ++it) { CString sKey = it->first; CString sValue = it->second; if (!WriteFilter(sKey, sValue)) { return MCS_EWRITEFIL; } if (sKey.empty()) { continue; } if (cFile.Write(Encode(sKey) + " " + Encode(sValue) + "\n") <= 0) { return MCS_EWRITE; } } cFile.Close(); return MCS_SUCCESS; } MCString::status_t MCString::ReadFromDisk(const CString& sPath) { clear(); CFile cFile(sPath); if (!cFile.Open(O_RDONLY)) { return MCS_EOPEN; } CString sBuffer; while (cFile.ReadLine(sBuffer)) { sBuffer.Trim(); CString sKey = sBuffer.Token(0); CString sValue = sBuffer.Token(1); Decode(sKey); Decode(sValue); if (!ReadFilter(sKey, sValue)) return MCS_EREADFIL; (*this)[sKey] = sValue; } cFile.Close(); return MCS_SUCCESS; } static const char hexdigits[] = "0123456789abcdef"; CString& MCString::Encode(CString& sValue) const { CString sTmp; for (CString::iterator it = sValue.begin(); it != sValue.end(); ++it) { // isalnum() needs unsigned char as argument and this code // assumes unsigned, too. unsigned char c = *it; if (isalnum(c)) { sTmp += c; } else { sTmp += "%"; sTmp += hexdigits[c >> 4]; sTmp += hexdigits[c & 0xf]; sTmp += ";"; } } sValue = sTmp; return sValue; } CString& MCString::Decode(CString& sValue) const { const char *pTmp = sValue.c_str(); char *endptr; CString sTmp; while (*pTmp) { if (*pTmp != '%') { sTmp += *pTmp++; } else { char ch = (char) strtol(pTmp + 1, &endptr, 16); if (*endptr == ';') { sTmp += ch; pTmp = ++endptr; } else { sTmp += *pTmp++; } } } sValue = sTmp; return sValue; }
32,159
15,138
#include "sendmessagesdialog.h" #include "ui_sendmessagesdialog.h" //#include "init.h" #include "walletmodel.h" #include "messagemodel.h" #include "addressbookpage.h" #include "optionsmodel.h" #include "sendmessagesentry.h" //#include "guiutil.h" #include <QMessageBox> #include <QLocale> #include <QTextDocument> #include <QScrollBar> #include <QClipboard> #include <QDataWidgetMapper> SendMessagesDialog::SendMessagesDialog(Mode mode, Type type, QWidget *parent) : QDialog(parent), ui(new Ui::SendMessagesDialog), model(0), mode(mode), type(type) { ui->setupUi(this); #ifdef Q_OS_MAC // Icons on push buttons are very uncommon on Mac ui->addButton->setIcon(QIcon()); ui->clearButton->setIcon(QIcon()); ui->sendButton->setIcon(QIcon()); #endif #if QT_VERSION >= 0x040700 /* Do not move this to the XML file, Qt before 4.7 will choke on it */ if(mode == SendMessagesDialog::Encrypted) ui->addressFrom->setPlaceholderText(tr("Enter an address")); #endif addEntry(); connect(ui->addButton, SIGNAL(clicked()), this, SLOT(addEntry())); connect(ui->clearButton, SIGNAL(clicked()), this, SLOT(clear())); connect(ui->closeButton, SIGNAL(clicked()), this, SLOT(reject())); fNewRecipientAllowed = true; if(mode == SendMessagesDialog::Anonymous) ui->frameAddressFrom->hide(); if(type == SendMessagesDialog::Page) ui->closeButton->hide(); } void SendMessagesDialog::setModel(MessageModel *model) { this->model = model; for(int i = 0; i < ui->entries->count(); ++i) { SendMessagesEntry *entry = qobject_cast<SendMessagesEntry*>(ui->entries->itemAt(i)->widget()); if(entry) entry->setModel(model); } } void SendMessagesDialog::loadRow(int row) { if(model->data(model->index(row, model->Type, QModelIndex()), Qt::DisplayRole).toString() == MessageModel::Received) ui->addressFrom->setText(model->data(model->index(row, model->ToAddress, QModelIndex()), Qt::DisplayRole).toString()); else ui->addressFrom->setText(model->data(model->index(row, model->FromAddress, QModelIndex()), Qt::DisplayRole).toString()); for(int i = 0; i < ui->entries->count(); ++i) { SendMessagesEntry *entry = qobject_cast<SendMessagesEntry*>(ui->entries->itemAt(i)->widget()); if(entry) entry->loadRow(row); } } bool SendMessagesDialog::checkMode(Mode mode) { return (mode == this->mode); } bool SendMessagesDialog::validate() { if(mode == SendMessagesDialog::Encrypted && ui->addressFrom->text() == "") { ui->addressFrom->setValid(false); return false; } return true; } SendMessagesDialog::~SendMessagesDialog() { delete ui; } void SendMessagesDialog::on_pasteButton_clicked() { // Paste text from clipboard into recipient field ui->addressFrom->setText(QApplication::clipboard()->text()); } void SendMessagesDialog::on_addressBookButton_clicked() { if(!model) return; AddressBookPage dlg(AddressBookPage::ForSending, AddressBookPage::ReceivingTab, this); dlg.setModel(model->getWalletModel()->getAddressTableModel()); if(dlg.exec()) { ui->addressFrom->setText(dlg.getReturnValue()); SendMessagesEntry *entry = qobject_cast<SendMessagesEntry*>(ui->entries->itemAt(0)->widget()); entry->setFocus(); // findChild( const QString "sentTo")->setFocus(); } } void SendMessagesDialog::on_sendButton_clicked() { QList<SendMessagesRecipient> recipients; bool valid = true; if(!model) return; valid = validate(); for(int i = 0; i < ui->entries->count(); ++i) { SendMessagesEntry *entry = qobject_cast<SendMessagesEntry*>(ui->entries->itemAt(i)->widget()); if(entry) { if(entry->validate()) recipients.append(entry->getValue()); else valid = false; } } if(!valid || recipients.isEmpty()) return; // Format confirmation message QStringList formatted; foreach(const SendMessagesRecipient &rcp, recipients) { formatted.append(tr("<b>%1</b> to %2 (%3)").arg(rcp.message, Qt::escape(rcp.label), rcp.address)); } fNewRecipientAllowed = false; QMessageBox::StandardButton retval = QMessageBox::question(this, tr("Confirm send messages"), tr("Are you sure you want to send %1?").arg(formatted.join(tr(" and "))), QMessageBox::Yes|QMessageBox::Cancel, QMessageBox::Cancel); if(retval != QMessageBox::Yes) { fNewRecipientAllowed = true; return; } MessageModel::StatusCode sendstatus; if(mode == SendMessagesDialog::Anonymous) sendstatus = model->sendMessages(recipients); else sendstatus = model->sendMessages(recipients, ui->addressFrom->text()); switch(sendstatus) { case MessageModel::InvalidAddress: QMessageBox::warning(this, tr("Send Message"), tr("The recipient address is not valid, please recheck."), QMessageBox::Ok, QMessageBox::Ok); break; case MessageModel::InvalidMessage: QMessageBox::warning(this, tr("Send Message"), tr("The message can't be empty."), QMessageBox::Ok, QMessageBox::Ok); break; case MessageModel::DuplicateAddress: QMessageBox::warning(this, tr("Send Message"), tr("Duplicate address found, can only send to each address once per send operation."), QMessageBox::Ok, QMessageBox::Ok); break; case MessageModel::MessageCreationFailed: QMessageBox::warning(this, tr("Send Message"), tr("Error: Message creation failed."), QMessageBox::Ok, QMessageBox::Ok); break; case MessageModel::MessageCommitFailed: QMessageBox::warning(this, tr("Send Message"), tr("Error: The message was rejected."), QMessageBox::Ok, QMessageBox::Ok); break; case MessageModel::Aborted: // User aborted, nothing to do break; case MessageModel::FailedErrorShown: // Send failed, error message was displayed break; case MessageModel::OK: accept(); break; } fNewRecipientAllowed = true; } void SendMessagesDialog::clear() { // Remove entries until only one left while(ui->entries->count()) delete ui->entries->takeAt(0)->widget(); addEntry(); updateRemoveEnabled(); ui->sendButton->setDefault(true); } void SendMessagesDialog::reject() { if(type == SendMessagesDialog::Dialog) done(1); else clear(); } void SendMessagesDialog::accept() { if(type == SendMessagesDialog::Dialog) done(0); else clear(); } void SendMessagesDialog::done(int retval) { if(type == SendMessagesDialog::Dialog) QDialog::done(retval); else clear(); } SendMessagesEntry *SendMessagesDialog::addEntry() { SendMessagesEntry *entry = new SendMessagesEntry(this); entry->setModel(model); ui->entries->addWidget(entry); connect(entry, SIGNAL(removeEntry(SendMessagesEntry*)), this, SLOT(removeEntry(SendMessagesEntry*))); updateRemoveEnabled(); // Focus the field, so that entry can start immediately entry->clear(); entry->setFocus(); ui->scrollAreaWidgetContents->resize(ui->scrollAreaWidgetContents->sizeHint()); QCoreApplication::instance()->processEvents(); QScrollBar* bar = ui->scrollArea->verticalScrollBar(); if(bar) bar->setSliderPosition(bar->maximum()); return entry; } void SendMessagesDialog::updateRemoveEnabled() { // Remove buttons are enabled as soon as there is more than one send-entry bool enabled = (ui->entries->count() > 1); for(int i = 0; i < ui->entries->count(); ++i) { SendMessagesEntry *entry = qobject_cast<SendMessagesEntry*>(ui->entries->itemAt(i)->widget()); if(entry) entry->setRemoveEnabled(enabled); } setupTabChain(0); } void SendMessagesDialog::removeEntry(SendMessagesEntry* entry) { delete entry; updateRemoveEnabled(); } QWidget *SendMessagesDialog::setupTabChain(QWidget *prev) { for(int i = 0; i < ui->entries->count(); ++i) { SendMessagesEntry *entry = qobject_cast<SendMessagesEntry*>(ui->entries->itemAt(i)->widget()); if(entry) { prev = entry->setupTabChain(prev); } } QWidget::setTabOrder(prev, ui->addButton); QWidget::setTabOrder(ui->addButton, ui->sendButton); return ui->sendButton; } void SendMessagesDialog::pasteEntry(const SendMessagesRecipient &rv) { if(!fNewRecipientAllowed) return; SendMessagesEntry *entry = 0; // Replace the first entry if it is still unused if(ui->entries->count() == 1) { SendMessagesEntry *first = qobject_cast<SendMessagesEntry*>(ui->entries->itemAt(0)->widget()); if(first->isClear()) entry = first; } if(!entry) entry = addEntry(); entry->setValue(rv); }
9,172
2,793
/* * Copyright (C) 2017-2020 Intel Corporation * * SPDX-License-Identifier: MIT * */ #include "opencl/source/helpers/task_information.h" #include "shared/source/command_stream/command_stream_receiver.h" #include "shared/source/command_stream/csr_deps.h" #include "shared/source/command_stream/linear_stream.h" #include "shared/source/command_stream/preemption.h" #include "shared/source/helpers/aligned_memory.h" #include "shared/source/helpers/engine_node_helper.h" #include "shared/source/helpers/string.h" #include "shared/source/memory_manager/internal_allocation_storage.h" #include "shared/source/memory_manager/surface.h" #include "opencl/source/built_ins/builtins_dispatch_builder.h" #include "opencl/source/cl_device/cl_device.h" #include "opencl/source/command_queue/command_queue.h" #include "opencl/source/command_queue/enqueue_common.h" #include "opencl/source/device_queue/device_queue.h" #include "opencl/source/gtpin/gtpin_notify.h" #include "opencl/source/helpers/enqueue_properties.h" #include "opencl/source/helpers/task_information.inl" #include "opencl/source/mem_obj/mem_obj.h" namespace NEO { template void KernelOperation::ResourceCleaner::operator()<LinearStream>(LinearStream *); template void KernelOperation::ResourceCleaner::operator()<IndirectHeap>(IndirectHeap *); CommandMapUnmap::CommandMapUnmap(MapOperationType operationType, MemObj &memObj, MemObjSizeArray &copySize, MemObjOffsetArray &copyOffset, bool readOnly, CommandQueue &commandQueue) : Command(commandQueue), memObj(memObj), copySize(copySize), copyOffset(copyOffset), readOnly(readOnly), operationType(operationType) { memObj.incRefInternal(); } CompletionStamp &CommandMapUnmap::submit(uint32_t taskLevel, bool terminated) { if (terminated) { memObj.decRefInternal(); return completionStamp; } auto &commandStreamReceiver = commandQueue.getGpgpuCommandStreamReceiver(); auto commandStreamReceiverOwnership = commandStreamReceiver.obtainUniqueOwnership(); auto &queueCommandStream = commandQueue.getCS(0); size_t offset = queueCommandStream.getUsed(); MultiDispatchInfo multiDispatch; Device &device = commandQueue.getDevice(); DispatchFlags dispatchFlags( {}, //csrDependencies nullptr, //barrierTimestampPacketNodes {}, //pipelineSelectArgs commandQueue.flushStamp->getStampReference(), //flushStampReference commandQueue.getThrottle(), //throttle PreemptionHelper::taskPreemptionMode(device, multiDispatch), //preemptionMode GrfConfig::DefaultGrfNumber, //numGrfRequired L3CachingSettings::l3CacheOn, //l3CacheSettings ThreadArbitrationPolicy::NotPresent, //threadArbitrationPolicy commandQueue.getSliceCount(), //sliceCount true, //blocking true, //dcFlush false, //useSLM true, //guardCommandBufferWithPipeControl false, //GSBA32BitRequired false, //requiresCoherency commandQueue.getPriority() == QueuePriority::LOW, //lowPriority false, //implicitFlush commandQueue.getGpgpuCommandStreamReceiver().isNTo1SubmissionModelEnabled(), //outOfOrderExecutionAllowed false, //epilogueRequired false //usePerDssBackedBuffer ); DEBUG_BREAK_IF(taskLevel >= CompletionStamp::levelNotReady); gtpinNotifyPreFlushTask(&commandQueue); completionStamp = commandStreamReceiver.flushTask(queueCommandStream, offset, commandQueue.getIndirectHeap(IndirectHeap::DYNAMIC_STATE, 0u), commandQueue.getIndirectHeap(IndirectHeap::INDIRECT_OBJECT, 0u), commandQueue.getIndirectHeap(IndirectHeap::SURFACE_STATE, 0u), taskLevel, dispatchFlags, commandQueue.getDevice()); if (!memObj.isMemObjZeroCopy()) { commandQueue.waitUntilComplete(completionStamp.taskCount, completionStamp.flushStamp, false); if (operationType == MAP) { memObj.transferDataToHostPtr(copySize, copyOffset); } else if (!readOnly) { DEBUG_BREAK_IF(operationType != UNMAP); memObj.transferDataFromHostPtr(copySize, copyOffset); } } memObj.decRefInternal(); return completionStamp; } CommandComputeKernel::CommandComputeKernel(CommandQueue &commandQueue, std::unique_ptr<KernelOperation> &kernelOperation, std::vector<Surface *> &surfaces, bool flushDC, bool usesSLM, bool ndRangeKernel, std::unique_ptr<PrintfHandler> printfHandler, PreemptionMode preemptionMode, Kernel *kernel, uint32_t kernelCount) : Command(commandQueue, kernelOperation), flushDC(flushDC), slmUsed(usesSLM), NDRangeKernel(ndRangeKernel), printfHandler(std::move(printfHandler)), kernel(kernel), kernelCount(kernelCount), preemptionMode(preemptionMode) { for (auto surface : surfaces) { this->surfaces.push_back(surface); } UNRECOVERABLE_IF(nullptr == this->kernel); kernel->incRefInternal(); } CommandComputeKernel::~CommandComputeKernel() { kernel->decRefInternal(); } CompletionStamp &CommandComputeKernel::submit(uint32_t taskLevel, bool terminated) { if (terminated) { for (auto surface : surfaces) { delete surface; } surfaces.clear(); return completionStamp; } auto &commandStreamReceiver = commandQueue.getGpgpuCommandStreamReceiver(); bool executionModelKernel = kernel->isParentKernel; auto devQueue = commandQueue.getContext().getDefaultDeviceQueue(); auto commandStreamReceiverOwnership = commandStreamReceiver.obtainUniqueOwnership(); bool isCcsUsed = EngineHelpers::isCcs(commandQueue.getGpgpuEngine().osContext->getEngineType()); if (executionModelKernel) { while (!devQueue->isEMCriticalSectionFree()) ; devQueue->resetDeviceQueue(); devQueue->acquireEMCriticalSection(); } IndirectHeap *dsh = kernelOperation->dsh.get(); IndirectHeap *ioh = kernelOperation->ioh.get(); IndirectHeap *ssh = kernelOperation->ssh.get(); auto requiresCoherency = false; auto anyUncacheableArgs = false; for (auto &surface : surfaces) { DEBUG_BREAK_IF(!surface); surface->makeResident(commandStreamReceiver); requiresCoherency |= surface->IsCoherent; if (!surface->allowsL3Caching()) { anyUncacheableArgs = true; } } if (printfHandler) { printfHandler.get()->makeResident(commandStreamReceiver); } makeTimestampPacketsResident(commandStreamReceiver); if (executionModelKernel) { uint32_t taskCount = commandStreamReceiver.peekTaskCount() + 1; devQueue->setupExecutionModelDispatch(*ssh, *dsh, kernel, kernelCount, commandStreamReceiver.getTagAllocation()->getGpuAddress(), taskCount, timestamp, isCcsUsed); SchedulerKernel &scheduler = commandQueue.getContext().getSchedulerKernel(); scheduler.setArgs(devQueue->getQueueBuffer(), devQueue->getStackBuffer(), devQueue->getEventPoolBuffer(), devQueue->getSlbBuffer(), dsh->getGraphicsAllocation(), kernel->getKernelReflectionSurface(), devQueue->getQueueStorageBuffer(), ssh->getGraphicsAllocation(), devQueue->getDebugQueue()); devQueue->dispatchScheduler( *kernelOperation->commandStream, scheduler, preemptionMode, ssh, dsh, isCcsUsed); scheduler.makeResident(commandStreamReceiver); // Update SLM usage slmUsed |= scheduler.slmTotalSize > 0; this->kernel->getProgram()->getBlockKernelManager()->makeInternalAllocationsResident(commandStreamReceiver); } if (kernelOperation->blitPropertiesContainer.size() > 0) { auto &bcsCsr = *commandQueue.getBcsCommandStreamReceiver(); CsrDependencies csrDeps; eventsRequest.fillCsrDependencies(csrDeps, bcsCsr, CsrDependencies::DependenciesType::All); BlitProperties::setupDependenciesForAuxTranslation(kernelOperation->blitPropertiesContainer, *timestampPacketDependencies, *currentTimestampPacketNodes, csrDeps, commandQueue.getGpgpuCommandStreamReceiver(), bcsCsr); auto bcsTaskCount = bcsCsr.blitBuffer(kernelOperation->blitPropertiesContainer, false); commandQueue.updateBcsTaskCount(bcsTaskCount); } DispatchFlags dispatchFlags( {}, //csrDependencies nullptr, //barrierTimestampPacketNodes {false, kernel->isVmeKernel()}, //pipelineSelectArgs commandQueue.flushStamp->getStampReference(), //flushStampReference commandQueue.getThrottle(), //throttle preemptionMode, //preemptionMode kernel->getKernelInfo().patchInfo.executionEnvironment->NumGRFRequired, //numGrfRequired L3CachingSettings::l3CacheOn, //l3CacheSettings kernel->getThreadArbitrationPolicy(), //threadArbitrationPolicy commandQueue.getSliceCount(), //sliceCount true, //blocking flushDC, //dcFlush slmUsed, //useSLM true, //guardCommandBufferWithPipeControl NDRangeKernel, //GSBA32BitRequired requiresCoherency, //requiresCoherency commandQueue.getPriority() == QueuePriority::LOW, //lowPriority false, //implicitFlush commandQueue.getGpgpuCommandStreamReceiver().isNTo1SubmissionModelEnabled(), //outOfOrderExecutionAllowed false, //epilogueRequired kernel->requiresPerDssBackedBuffer() //usePerDssBackedBuffer ); if (timestampPacketDependencies) { eventsRequest.fillCsrDependencies(dispatchFlags.csrDependencies, commandStreamReceiver, CsrDependencies::DependenciesType::OutOfCsr); dispatchFlags.barrierTimestampPacketNodes = &timestampPacketDependencies->barrierNodes; } dispatchFlags.pipelineSelectArgs.specialPipelineSelectMode = kernel->requiresSpecialPipelineSelectMode(); if (anyUncacheableArgs) { dispatchFlags.l3CacheSettings = L3CachingSettings::l3CacheOff; } else if (!kernel->areStatelessWritesUsed()) { dispatchFlags.l3CacheSettings = L3CachingSettings::l3AndL1On; } if (commandQueue.dispatchHints != 0) { dispatchFlags.engineHints = commandQueue.dispatchHints; dispatchFlags.epilogueRequired = true; } DEBUG_BREAK_IF(taskLevel >= CompletionStamp::levelNotReady); gtpinNotifyPreFlushTask(&commandQueue); completionStamp = commandStreamReceiver.flushTask(*kernelOperation->commandStream, 0, *dsh, *ioh, *ssh, taskLevel, dispatchFlags, commandQueue.getDevice()); if (gtpinIsGTPinInitialized()) { gtpinNotifyFlushTask(completionStamp.taskCount); } if (printfHandler) { commandQueue.waitUntilComplete(completionStamp.taskCount, completionStamp.flushStamp, false); printfHandler.get()->printEnqueueOutput(); } for (auto surface : surfaces) { delete surface; } surfaces.clear(); return completionStamp; } void CommandWithoutKernel::dispatchBlitOperation() { auto bcsCsr = commandQueue.getBcsCommandStreamReceiver(); UNRECOVERABLE_IF(bcsCsr == nullptr); UNRECOVERABLE_IF(kernelOperation->blitPropertiesContainer.size() != 1); auto &blitProperties = *kernelOperation->blitPropertiesContainer.begin(); eventsRequest.fillCsrDependencies(blitProperties.csrDependencies, *bcsCsr, CsrDependencies::DependenciesType::All); blitProperties.csrDependencies.push_back(&timestampPacketDependencies->cacheFlushNodes); blitProperties.csrDependencies.push_back(&timestampPacketDependencies->previousEnqueueNodes); blitProperties.csrDependencies.push_back(&timestampPacketDependencies->barrierNodes); blitProperties.outputTimestampPacket = currentTimestampPacketNodes->peekNodes()[0]; auto bcsTaskCount = bcsCsr->blitBuffer(kernelOperation->blitPropertiesContainer, false); commandQueue.updateBcsTaskCount(bcsTaskCount); } CompletionStamp &CommandWithoutKernel::submit(uint32_t taskLevel, bool terminated) { if (terminated) { return completionStamp; } auto &commandStreamReceiver = commandQueue.getGpgpuCommandStreamReceiver(); if (!kernelOperation) { completionStamp.taskCount = commandStreamReceiver.peekTaskCount(); completionStamp.taskLevel = commandStreamReceiver.peekTaskLevel(); completionStamp.flushStamp = commandStreamReceiver.obtainCurrentFlushStamp(); return completionStamp; } auto lockCSR = commandStreamReceiver.obtainUniqueOwnership(); if (kernelOperation->blitEnqueue) { if (commandStreamReceiver.isStallingPipeControlOnNextFlushRequired()) { timestampPacketDependencies->barrierNodes.add(commandStreamReceiver.getTimestampPacketAllocator()->getTag()); } dispatchBlitOperation(); } DispatchFlags dispatchFlags( {}, //csrDependencies &timestampPacketDependencies->barrierNodes, //barrierTimestampPacketNodes {}, //pipelineSelectArgs commandQueue.flushStamp->getStampReference(), //flushStampReference commandQueue.getThrottle(), //throttle commandQueue.getDevice().getPreemptionMode(), //preemptionMode GrfConfig::DefaultGrfNumber, //numGrfRequired L3CachingSettings::l3CacheOn, //l3CacheSettings ThreadArbitrationPolicy::NotPresent, //threadArbitrationPolicy commandQueue.getSliceCount(), //sliceCount true, //blocking false, //dcFlush false, //useSLM true, //guardCommandBufferWithPipeControl false, //GSBA32BitRequired false, //requiresCoherency commandQueue.getPriority() == QueuePriority::LOW, //lowPriority false, //implicitFlush commandStreamReceiver.isNTo1SubmissionModelEnabled(), //outOfOrderExecutionAllowed false, //epilogueRequired false //usePerDssBackedBuffer ); UNRECOVERABLE_IF(!commandStreamReceiver.peekTimestampPacketWriteEnabled()); eventsRequest.fillCsrDependencies(dispatchFlags.csrDependencies, commandStreamReceiver, CsrDependencies::DependenciesType::OutOfCsr); makeTimestampPacketsResident(commandStreamReceiver); gtpinNotifyPreFlushTask(&commandQueue); completionStamp = commandStreamReceiver.flushTask(*kernelOperation->commandStream, 0, commandQueue.getIndirectHeap(IndirectHeap::DYNAMIC_STATE, 0u), commandQueue.getIndirectHeap(IndirectHeap::INDIRECT_OBJECT, 0u), commandQueue.getIndirectHeap(IndirectHeap::SURFACE_STATE, 0u), taskLevel, dispatchFlags, commandQueue.getDevice()); return completionStamp; } void Command::setEventsRequest(EventsRequest &eventsRequest) { this->eventsRequest = eventsRequest; if (eventsRequest.numEventsInWaitList > 0) { eventsWaitlist.resize(eventsRequest.numEventsInWaitList); auto size = eventsRequest.numEventsInWaitList * sizeof(cl_event); memcpy_s(&eventsWaitlist[0], size, eventsRequest.eventWaitList, size); this->eventsRequest.eventWaitList = &eventsWaitlist[0]; } } void Command::setTimestampPacketNode(TimestampPacketContainer &current, TimestampPacketDependencies &&dependencies) { currentTimestampPacketNodes = std::make_unique<TimestampPacketContainer>(); currentTimestampPacketNodes->assignAndIncrementNodesRefCounts(current); timestampPacketDependencies = std::make_unique<TimestampPacketDependencies>(); *timestampPacketDependencies = std::move(dependencies); } Command::~Command() { auto &commandStreamReceiver = commandQueue.getGpgpuCommandStreamReceiver(); if (commandStreamReceiver.peekTimestampPacketWriteEnabled()) { for (cl_event &eventFromWaitList : eventsWaitlist) { auto event = castToObjectOrAbort<Event>(eventFromWaitList); event->decRefInternal(); } } } void Command::makeTimestampPacketsResident(CommandStreamReceiver &commandStreamReceiver) { if (commandStreamReceiver.peekTimestampPacketWriteEnabled()) { for (cl_event &eventFromWaitList : eventsWaitlist) { auto event = castToObjectOrAbort<Event>(eventFromWaitList); if (event->getTimestampPacketNodes()) { event->getTimestampPacketNodes()->makeResident(commandStreamReceiver); } } } if (currentTimestampPacketNodes) { currentTimestampPacketNodes->makeResident(commandStreamReceiver); } if (timestampPacketDependencies) { timestampPacketDependencies->cacheFlushNodes.makeResident(commandStreamReceiver); timestampPacketDependencies->previousEnqueueNodes.makeResident(commandStreamReceiver); } } Command::Command(CommandQueue &commandQueue) : commandQueue(commandQueue) {} Command::Command(CommandQueue &commandQueue, std::unique_ptr<KernelOperation> &kernelOperation) : commandQueue(commandQueue), kernelOperation(std::move(kernelOperation)) {} } // namespace NEO
21,287
5,214
/* * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * Additional permission under GNU GPL version 3 section 7 * * If you modify this Program, or any covered work, by linking or combining * it with OpenSSL (or a modified version of that library), containing parts * covered by the terms of OpenSSL License and SSLeay License, the licensors * of this Program grant you additional permission to convey the resulting work. * */ extern "C" { #include "c_groestl.h" #include "c_blake256.h" #include "c_jh.h" #include "c_skein.h" } #include "cryptonight.h" #include "cryptonight_aesni.h" #include "xmrstak/backend/cryptonight.hpp" #include "xmrstak/jconf.hpp" #include <stdio.h> #include <stdlib.h> #ifdef __GNUC__ #include <mm_malloc.h> #else #include <malloc.h> #endif // __GNUC__ #if defined(__APPLE__) #include <mach/vm_statistics.h> #endif #ifdef _WIN32 #include <windows.h> #include <ntsecapi.h> #else #include <sys/mman.h> #include <errno.h> #include <string.h> #endif // _WIN32 void do_blake_hash(const void* input, size_t len, char* output) { blake256_hash((uint8_t*)output, (const uint8_t*)input, len); } void do_groestl_hash(const void* input, size_t len, char* output) { groestl((const uint8_t*)input, len * 8, (uint8_t*)output); } void do_jh_hash(const void* input, size_t len, char* output) { jh_hash(32 * 8, (const uint8_t*)input, 8 * len, (uint8_t*)output); } void do_skein_hash(const void* input, size_t len, char* output) { skein_hash(8 * 32, (const uint8_t*)input, 8 * len, (uint8_t*)output); } void (* const extra_hashes[4])(const void *, size_t, char *) = {do_blake_hash, do_groestl_hash, do_jh_hash, do_skein_hash}; #ifdef _WIN32 BOOL bRebootDesirable = FALSE; //If VirtualAlloc fails, suggest a reboot BOOL AddPrivilege(TCHAR* pszPrivilege) { HANDLE hToken; TOKEN_PRIVILEGES tp; BOOL status; if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &hToken)) return FALSE; if (!LookupPrivilegeValue(NULL, pszPrivilege, &tp.Privileges[0].Luid)) return FALSE; tp.PrivilegeCount = 1; tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; status = AdjustTokenPrivileges(hToken, FALSE, &tp, 0, (PTOKEN_PRIVILEGES)NULL, 0); if (!status || (GetLastError() != ERROR_SUCCESS)) return FALSE; CloseHandle(hToken); return TRUE; } BOOL AddLargePageRights() { HANDLE hToken; PTOKEN_USER user = NULL; if (OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &hToken) == TRUE) { TOKEN_ELEVATION Elevation; DWORD cbSize = sizeof(TOKEN_ELEVATION); BOOL bIsElevated = FALSE; if (GetTokenInformation(hToken, TokenElevation, &Elevation, sizeof(Elevation), &cbSize)) bIsElevated = Elevation.TokenIsElevated; DWORD size = 0; GetTokenInformation(hToken, TokenUser, NULL, 0, &size); if (size > 0 && bIsElevated) { user = (PTOKEN_USER)LocalAlloc(LPTR, size); GetTokenInformation(hToken, TokenUser, user, size, &size); } CloseHandle(hToken); } if (!user) return FALSE; LSA_HANDLE handle; LSA_OBJECT_ATTRIBUTES attributes; ZeroMemory(&attributes, sizeof(attributes)); BOOL result = FALSE; if (LsaOpenPolicy(NULL, &attributes, POLICY_ALL_ACCESS, &handle) == 0) { LSA_UNICODE_STRING lockmem; lockmem.Buffer = L"SeLockMemoryPrivilege"; lockmem.Length = 42; lockmem.MaximumLength = 44; PLSA_UNICODE_STRING rights = NULL; ULONG cnt = 0; BOOL bHasRights = FALSE; if (LsaEnumerateAccountRights(handle, user->User.Sid, &rights, &cnt) == 0) { for (size_t i = 0; i < cnt; i++) { if (rights[i].Length == lockmem.Length && memcmp(rights[i].Buffer, lockmem.Buffer, 42) == 0) { bHasRights = TRUE; break; } } LsaFreeMemory(rights); } if(!bHasRights) result = LsaAddAccountRights(handle, user->User.Sid, &lockmem, 1) == 0; LsaClose(handle); } LocalFree(user); return result; } #endif size_t cryptonight_init(size_t use_fast_mem, size_t use_mlock, alloc_msg* msg) { #ifdef _WIN32 if(use_fast_mem == 0) return 1; if(AddPrivilege(TEXT("SeLockMemoryPrivilege")) == 0) { if(AddLargePageRights()) { msg->warning = "Added SeLockMemoryPrivilege to the current account. You need to reboot for it to work"; bRebootDesirable = TRUE; } else msg->warning = "Obtaning SeLockMemoryPrivilege failed."; return 0; } bRebootDesirable = TRUE; return 1; #else return 1; #endif // _WIN32 } cryptonight_ctx* cryptonight_alloc_ctx(size_t use_fast_mem, size_t use_mlock, alloc_msg* msg) { size_t hashMemSize; if(::jconf::inst()->IsCurrencyMonero()) { hashMemSize = MONERO_MEMORY; } else { hashMemSize = AEON_MEMORY; } cryptonight_ctx* ptr = (cryptonight_ctx*)_mm_malloc(sizeof(cryptonight_ctx), 4096); if(use_fast_mem == 0) { // use 2MiB aligned memory ptr->long_state = (uint8_t*)_mm_malloc(hashMemSize, hashMemSize); ptr->ctx_info[0] = 0; ptr->ctx_info[1] = 0; return ptr; } #ifdef _WIN32 SIZE_T iLargePageMin = GetLargePageMinimum(); if(hashMemSize > iLargePageMin) iLargePageMin *= 2; ptr->long_state = (uint8_t*)VirtualAlloc(NULL, iLargePageMin, MEM_COMMIT | MEM_RESERVE | MEM_LARGE_PAGES, PAGE_READWRITE); if(ptr->long_state == NULL) { _mm_free(ptr); if(bRebootDesirable) msg->warning = "VirtualAlloc failed. Reboot might help."; else msg->warning = "VirtualAlloc failed."; return NULL; } else { ptr->ctx_info[0] = 1; return ptr; } #else #if defined(__APPLE__) ptr->long_state = (uint8_t*)mmap(0, hashMemSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, VM_FLAGS_SUPERPAGE_SIZE_2MB, 0); #elif defined(__FreeBSD__) ptr->long_state = (uint8_t*)mmap(0, hashMemSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_ALIGNED_SUPER | MAP_PREFAULT_READ, -1, 0); #else ptr->long_state = (uint8_t*)mmap(0, hashMemSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, 0, 0); #endif if (ptr->long_state == MAP_FAILED) { _mm_free(ptr); msg->warning = "mmap failed"; return NULL; } ptr->ctx_info[0] = 1; if(madvise(ptr->long_state, hashMemSize, MADV_RANDOM|MADV_WILLNEED) != 0) msg->warning = "madvise failed"; ptr->ctx_info[1] = 0; if(use_mlock != 0 && mlock(ptr->long_state, hashMemSize) != 0) msg->warning = "mlock failed"; else ptr->ctx_info[1] = 1; return ptr; #endif // _WIN32 } void cryptonight_free_ctx(cryptonight_ctx* ctx) { size_t hashMemSize; if(::jconf::inst()->IsCurrencyMonero()) { hashMemSize = MONERO_MEMORY; } else { hashMemSize = AEON_MEMORY; } if(ctx->ctx_info[0] != 0) { #ifdef _WIN32 VirtualFree(ctx->long_state, 0, MEM_RELEASE); #else if(ctx->ctx_info[1] != 0) munlock(ctx->long_state, hashMemSize); munmap(ctx->long_state, hashMemSize); #endif // _WIN32 } else _mm_free(ctx->long_state); _mm_free(ctx); }
7,396
3,243
#include "stdafx.h" #include "CloakCompiler/Mesh.h" #include "CloakEngine/Files/ExtendedBuffers.h" #include "Engine/TempHandler.h" #include "Engine/BoundingVolume.h" #include <assert.h> #include <sstream> //#define DEBUG_ENFORCE_STRIPS //#define DEBUG_ENFORCE_NO_STRIPS #define ALLOW_STRIP_LIST_MIX #define STRIP_FUNC(name) size_t name(In size_t triID, In size_t swapEdge, In const CE::List<HalfEdge>& edges, In const CE::List<Triangle>& faces, In uint64_t floodFillVisited, In bool firstCall) #define PRINT_STRIPS namespace CloakCompiler { namespace API { namespace Mesh { typedef std::function<void(const RespondeInfo&)> RespondeFunc; constexpr float g_floatDelta = 0.0001f; constexpr size_t STRIP_CUT_VALUE = 0xFFFFFFFF; constexpr size_t STRIP_CUT_VALUE_16 = 0xFFFF; constexpr size_t MAX_IB16_SIZE = 1 << 16; //Minimum required number of single triangles to not use them as strip // Since draw calls have a cost, we prefere to have a bit longer index buffers than additional // draw calls with a few triangles. This value is just a guess, so it might change in future constexpr size_t MIN_TRI_COUNT = 128; enum class IndexBufferType { IB16, IB16ShortCut, IB32, }; struct FinalVertex { CloakEngine::Global::Math::Vector Position; CloakEngine::Global::Math::Vector Normal; CloakEngine::Global::Math::Vector Binormal; CloakEngine::Global::Math::Vector Tangent; size_t MaterialID; TexCoord TexCoord; Bone Bones[4]; bool CLOAK_CALL CompareForIndex(In const FinalVertex& b) const { bool r = Position == b.Position; r = r && Normal == b.Normal; r = r && Binormal == b.Binormal; r = r && Tangent == b.Tangent; r = r && abs(TexCoord.U - b.TexCoord.U) < g_floatDelta; r = r && abs(TexCoord.V - b.TexCoord.V) < g_floatDelta; for (size_t a = 0; a < 4 && r; a++) { r = r && Bones[a].BoneID == b.Bones[a].BoneID; r = r && abs(Bones[a].Weight - b.Bones[a].Weight) < g_floatDelta; } return r; } }; struct MaterialRange { size_t MaterialID; size_t ListCount; size_t StripCount; }; constexpr size_t VertexSize = sizeof(FinalVertex) - sizeof(size_t); constexpr CloakEngine::Files::FileType g_TempFileType{ "MeshTemp","CEMT",1000 }; namespace Strips { //Number of indices the triangle-list index buffer should at least include to justify the additional draw call: constexpr size_t MIN_TRIANGLE_LIST_SIZE = 128; //Percentage of indices (relative to the triangle-strip index buffer) the triangle-list index buffer should at least include to justify the additional draw call: constexpr float MIN_TRIANGLE_LIST_PERCENTAGE = 0.1f; struct HalfEdge { size_t Vertex; size_t OppositeEdge; }; struct Triangle { size_t Edge; size_t AdjacentCount; size_t MaterialID; uint64_t FloodFillVisited; }; CLOAK_FORCEINLINE constexpr size_t TriangleByEdge(In size_t edgeID) { return edgeID / 3; } CLOAK_FORCEINLINE constexpr size_t FirstEdgeOfTrinalge(In size_t edgeID) { return edgeID * 3; } CLOAK_FORCEINLINE constexpr size_t NextEdge(In size_t edgeID) { const size_t tID = TriangleByEdge(edgeID); return (3 * tID) + ((edgeID + 1) % 3); } CLOAK_FORCEINLINE constexpr size_t PrevEdge(In size_t edgeID) { const size_t tID = TriangleByEdge(edgeID); return (3 * tID) + ((edgeID + 2) % 3); } inline size_t CLOAK_CALL AdjacentTriangleCount(In size_t triangleID, In const CE::List<HalfEdge>& edges, In const CE::List<Triangle>& faces, In uint64_t floodFillVisited) { if (triangleID >= edges.size() / 3) { return 0; } size_t res = 0; for (size_t a = 0; a < 3; a++) { const size_t i = (3 * triangleID) + a; const size_t j = edges[i].OppositeEdge; if (j < edges.size() && faces[TriangleByEdge(j)].MaterialID == faces[TriangleByEdge(j)].MaterialID && faces[TriangleByEdge(j)].FloodFillVisited != floodFillVisited) { res++; } } return res; } inline void CLOAK_CALL UpdateVisited(In size_t triangleID, In const CE::List<HalfEdge>& edges, Inout CE::List<Triangle>* faces, In uint64_t floodFillVisited) { faces->at(triangleID).FloodFillVisited = floodFillVisited; for (size_t a = 0, b = FirstEdgeOfTrinalge(triangleID); a < 3; a++, b = NextEdge(b)) { const size_t e = edges[b].OppositeEdge; if (e != edges.size()) { const size_t t = TriangleByEdge(e); faces->at(t).AdjacentCount = AdjacentTriangleCount(t, edges, *faces, floodFillVisited); } } } typedef STRIP_FUNC((*StripFunc)); STRIP_FUNC(LNLS) { size_t bstEdge = edges.size(); size_t adjNum = static_cast<size_t>(-1); for (size_t a = FirstEdgeOfTrinalge(triID), b = 0; b < 3; b++, a = NextEdge(a)) { const size_t opEdge = edges[a].OppositeEdge; if (opEdge < edges.size() && faces[TriangleByEdge(opEdge)].FloodFillVisited != floodFillVisited) { const size_t adjTri = TriangleByEdge(opEdge); const size_t adj = AdjacentTriangleCount(adjTri, edges, faces, floodFillVisited); //Check if edge require swap: const bool swap = !firstCall && a == swapEdge; if (adj < adjNum || (adj == adjNum && swap == false)) { adjNum = adj; bstEdge = a; } } } return bstEdge; } STRIP_FUNC(LNLN) { size_t bstEdge = edges.size(); size_t adjNum = static_cast<size_t>(-1); size_t adjNumSec = static_cast<size_t>(-1); for (size_t a = FirstEdgeOfTrinalge(triID), b = 0; b < 3; b++, a = NextEdge(a)) { const size_t opEdge = edges[a].OppositeEdge; if (opEdge < edges.size() && faces[TriangleByEdge(opEdge)].FloodFillVisited != floodFillVisited) { const size_t adjTri = TriangleByEdge(opEdge); const size_t adj = AdjacentTriangleCount(adjTri, edges, faces, floodFillVisited); const bool swap = !firstCall && a == swapEdge; size_t adjSec = static_cast<size_t>(-1); if (adj <= adjNum) { //Look one step ahead: for (size_t c = adjTri * 3, d = 0; d < 3; d++, c = NextEdge(c)) { const size_t opSec = edges[c].OppositeEdge; if (opSec < edges.size() && opSec != a && faces[TriangleByEdge(opSec)].FloodFillVisited != floodFillVisited) { const size_t adjTriSec = TriangleByEdge(opSec); const size_t sadj = AdjacentTriangleCount(adjTriSec, edges, faces, floodFillVisited); adjSec = min(adjSec, sadj); } } } if (adj < adjNum || (adj == adjNum && adjSec < adjNumSec) || (adj == adjNum && adjSec == adjNumSec && swap == false)) { adjNum = adj; adjNumSec = adjSec; bstEdge = a; } } } return bstEdge; } constexpr StripFunc Functions[] = { LNLS, LNLN }; inline bool CLOAK_CALL CalculateIndexBufferStrips(In const CE::List<size_t>& indexBaseBuffer, Inout CE::List<size_t>* indexBuffer, Inout CloakEngine::List<MaterialRange>* materialRanges) { bool res = false; size_t firstIndexPos = 0; #ifndef DEBUG_ENFORCE_NO_STRIPS #ifdef DEBUG_ENFORCE_STRIPS indexBuffer->clear(); #endif //Create half-edge structure: CE::List<Triangle> faces(indexBaseBuffer.size() / 3); CE::List<HalfEdge> edges(faces.size() * 3); //This array allows to find an edge by two entries of an index buffer: CE::FlatMap<std::pair<size_t, size_t>, size_t> vertexToEdge; //To test whether we included an face already in our new index buffer, we use a flood fill counter uint64_t floodFillVisited = 1; #define VERTEX_TO_EDGE(v0, v1) vertexToEdge[std::make_pair((v0),(v1))] #define VERTEX_EDGE_EXIST(v0, v1) (vertexToEdge.find(std::make_pair((v0), (v1))) != vertexToEdge.end()) //Calculate edges: for (size_t a = 0; a < indexBaseBuffer.size(); a += 3) { size_t vli = a + 2; for (size_t b = 0; b < 3; b++) { const size_t vni = a + b; const size_t vl = indexBaseBuffer[vli]; const size_t vn = indexBaseBuffer[vni]; edges[vni].Vertex = vl; edges[vni].OppositeEdge = edges.size(); VERTEX_TO_EDGE(vl, vn) = vni; vli = vni; } } //Initialize face values: for (size_t a = 0; a < faces.size(); a++) { faces[a].Edge = a * 3; faces[a].FloodFillVisited = 0; faces[a].AdjacentCount = 0; faces[a].MaterialID = ~0; } //Initialize material IDs: size_t firstEdge = 0; for (size_t a = 0; a < materialRanges->size(); a++) { CLOAK_ASSUME(materialRanges->at(a).StripCount == 0); const size_t lastEdge = min(firstEdge + materialRanges->at(a).ListCount, edges.size()); CLOAK_ASSUME(lastEdge % 3 == 0); for (size_t c = firstEdge; c < lastEdge; c += 3) { const size_t t = TriangleByEdge(c); faces[t].MaterialID = materialRanges->at(a).MaterialID; } firstEdge = lastEdge; } //Calculate opposite edges: for (size_t a = 0; a < indexBaseBuffer.size(); a += 3) { size_t vli = a + 2; for (size_t b = 0; b < 3; b++) { const size_t vni = a + b; const size_t vl = indexBaseBuffer[vli]; const size_t vn = indexBaseBuffer[vni]; const size_t eM = VERTEX_TO_EDGE(vl, vn); if (VERTEX_EDGE_EXIST(vn, vl)) { const size_t eO = VERTEX_TO_EDGE(vn, vl); const size_t tM = TriangleByEdge(eM); const size_t tO = TriangleByEdge(eO); if (faces[tM].MaterialID == faces[tO].MaterialID) { edges[eM].OppositeEdge = eO; vli = vni; continue; } } edges[eM].OppositeEdge = edges.size(); vli = vni; } } CE::List<size_t> newIBStrip; CE::List<size_t> newIBList; firstEdge = 0; for (size_t a = 0; a < materialRanges->size(); a++) { CLOAK_ASSUME(materialRanges->at(a).StripCount == 0); if (materialRanges->at(a).ListCount < 3) { continue; } const size_t lastEdge = min(firstEdge + materialRanges->at(a).ListCount, edges.size()); CLOAK_ASSUME(lastEdge % 3 == 0); //Calculate triangle strips: for (size_t b = 0; b < ARRAYSIZE(Functions); b++) { //Main Algorithm: do { //Find best face (with lowest adjacent count) to start with: size_t face = faces.size(); size_t bstAdjC = ~0; for (size_t c = firstEdge; c < lastEdge; c += 3) { const size_t t = TriangleByEdge(c); faces[t].AdjacentCount = AdjacentTriangleCount(t, edges, faces, floodFillVisited); if (faces[t].FloodFillVisited != floodFillVisited && faces[t].AdjacentCount < bstAdjC) { bstAdjC = faces[t].AdjacentCount; face = t; } } if (face == faces.size()) { break; } // No more faces UpdateVisited(face, edges, &faces, floodFillVisited); size_t edge = Functions[b](face, edges.size(), edges, faces, floodFillVisited, true); if (edge == edges.size()) { //Single triangle, all adjacent triangles were already used: edge = FirstEdgeOfTrinalge(face); #ifdef ALLOW_STRIP_LIST_MIX newIBList.push_back(edges[edge].Vertex); edge = NextEdge(edge); newIBList.push_back(edges[edge].Vertex); edge = NextEdge(edge); newIBList.push_back(edges[edge].Vertex); #else newIBStrip.push_back(edges[edge].Vertex); edge = NextEdge(edge); newIBStrip.push_back(edges[edge].Vertex); edge = NextEdge(edge); newIBStrip.push_back(edges[edge].Vertex); newIBStrip.push_back(STRIP_CUT_VALUE); #endif } else { const size_t startEdge = edge; bool extendSecondDir = false; //Insert first triangle: newIBStrip.push_back(edges[PrevEdge(edge)].Vertex); newIBStrip.push_back(edges[edge].Vertex); size_t curEdge = edges[edge].OppositeEdge; CLOAK_ASSUME(curEdge < edges.size()); newIBStrip.push_back(edges[curEdge].Vertex); //Walk along strip: do { insert_ccw_triangle: size_t swapEdge = NextEdge(curEdge); face = TriangleByEdge(curEdge); UpdateVisited(face, edges, &faces, floodFillVisited); edge = Functions[b](face, swapEdge, edges, faces, floodFillVisited, false); if (edge == edges.size()) { //Strip ended (insert strip cut twice to enforce even number of indices) newIBStrip.push_back(edges[PrevEdge(curEdge)].Vertex); newIBStrip.push_back(STRIP_CUT_VALUE); newIBStrip.push_back(STRIP_CUT_VALUE); break; } else if (edge == swapEdge) { //Swap newIBStrip.push_back(edges[edge].Vertex); curEdge = edges[edge].OppositeEdge; CLOAK_ASSUME(curEdge < edges.size()); newIBStrip.push_back(edges[curEdge].Vertex); goto insert_ccw_triangle; } else { newIBStrip.push_back(edges[edge].Vertex); curEdge = edges[edge].OppositeEdge; CLOAK_ASSUME(curEdge < edges.size()); } insert_cw_triangle: swapEdge = PrevEdge(curEdge); face = TriangleByEdge(curEdge); UpdateVisited(face, edges, &faces, floodFillVisited); edge = Functions[b](face, swapEdge, edges, faces, floodFillVisited, false); if (edge == edges.size()) { //Strip ended newIBStrip.push_back(edges[PrevEdge(curEdge)].Vertex); newIBStrip.push_back(STRIP_CUT_VALUE); break; } else if (edge == swapEdge) { //Swap newIBStrip.push_back(edges[curEdge].Vertex); newIBStrip.push_back(edges[edge].Vertex); curEdge = edges[edge].OppositeEdge; CLOAK_ASSUME(curEdge < edges.size()); goto insert_cw_triangle; } else { curEdge = edges[edge].OppositeEdge; CLOAK_ASSUME(curEdge < edges.size()); newIBStrip.push_back(edges[curEdge].Vertex); } } while (true); //Try to extend in second direction: if (extendSecondDir == false) { extendSecondDir = true; size_t swapEdge = NextEdge(startEdge); face = TriangleByEdge(startEdge); edge = Functions[b](face, swapEdge, edges, faces, floodFillVisited, false); if (edge < edges.size()) { //Remove last strip cuts from strip: while (newIBStrip.empty() == false && newIBStrip.back() == STRIP_CUT_VALUE) { newIBStrip.pop_back(); } //Reverse strip: // To reverse the strip, we need an even amount of indices in the strip. Otherwise, we would also reverse face directions if (newIBStrip.size() % 2 != 0) { newIBStrip.push_back(newIBStrip.back()); } for (size_t a = 0; a < newIBStrip.size() >> 1; a++) { std::swap(newIBStrip[a], newIBStrip[newIBStrip.size() - (a + 1)]); } if (edge == swapEdge) { newIBStrip.pop_back(); newIBStrip.push_back(edges[edge].Vertex); curEdge = edges[edge].OppositeEdge; CLOAK_ASSUME(curEdge < edges.size()); newIBStrip.push_back(edges[curEdge].Vertex); goto insert_ccw_triangle; } else { curEdge = edges[edge].OppositeEdge; CLOAK_ASSUME(curEdge < edges.size()); goto insert_cw_triangle; } } } } } while (true); //If the number of single triangles in the list does not justify a second draw call, we will merge it into the strips: if (newIBStrip.size() > 0 && (newIBList.size() < MIN_TRIANGLE_LIST_SIZE || newIBList.size() < static_cast<size_t>(newIBStrip.size() * MIN_TRIANGLE_LIST_PERCENTAGE))) { for (size_t a = 0; a < newIBList.size(); a += 3) { newIBStrip.push_back(newIBList[a + 0]); newIBStrip.push_back(newIBList[a + 1]); newIBStrip.push_back(newIBList[a + 2]); newIBStrip.push_back(STRIP_CUT_VALUE); } newIBList.clear(); } //Remove last strip cuts from strip: while (newIBStrip.empty() == false && newIBStrip.back() == STRIP_CUT_VALUE) { newIBStrip.pop_back(); } //Compare and copy new index buffer: #ifndef DEBUG_ENFORCE_STRIPS if (newIBStrip.size() + newIBList.size() < materialRanges->at(a).ListCount + materialRanges->at(a).StripCount) #else if (b == 0 || newIBStrip.size() + newIBList.size() < materialRanges->at(a).ListCount + materialRanges->at(a).StripCount) #endif { res = true; materialRanges->at(a).ListCount = newIBList.size(); materialRanges->at(a).StripCount = newIBStrip.size(); CLOAK_ASSUME(firstIndexPos + newIBList.size() + newIBStrip.size() <= indexBuffer->size()); #ifndef DEBUG_ENFORCE_STRIPS size_t p = firstIndexPos; for (size_t b = 0; b < newIBStrip.size(); b++, p++) { indexBuffer->at(p) = newIBStrip[b]; } for (size_t b = 0; b < newIBList.size(); b++, p++) { indexBuffer->at(p) = newIBList[b]; } #else indexBuffer->resize(firstIndexPos); for (size_t b = 0; b < newIBStrip.size(); b++) { indexBuffer->push_back(newIBStrip[b]); } for (size_t b = 0; b < newIBList.size(); b++) { indexBuffer->push_back(newIBList[b]); } #endif } newIBStrip.clear(); newIBList.clear(); floodFillVisited++; } firstIndexPos += materialRanges->at(a).ListCount + materialRanges->at(a).StripCount; firstEdge = lastEdge; } indexBuffer->resize(firstIndexPos); #undef VERTEX_TO_EDGE #endif #ifdef PRINT_STRIPS CE::Global::Log::WriteToLog("Final index buffer (" + std::to_string(indexBuffer->size()) + " Indices):"); firstIndexPos = 0; for (size_t a = 0; a < materialRanges->size(); a++) { CE::Global::Log::WriteToLog("\tMaterial " + std::to_string(a)); for (size_t b = 0, s = 0; b < materialRanges->at(a).StripCount; s++) { std::stringstream r; r << "\t\tStrip " << s << ": "; if (indexBuffer->at(firstIndexPos + b) != STRIP_CUT_VALUE) { r << indexBuffer->at(firstIndexPos + b++); while (b < materialRanges->at(a).StripCount && indexBuffer->at(firstIndexPos + b) != STRIP_CUT_VALUE) { r << " | " << indexBuffer->at(firstIndexPos + b++); } } while (b < materialRanges->at(a).StripCount && indexBuffer->at(firstIndexPos + b) == STRIP_CUT_VALUE) { r << " | CUT"; b++; } CE::Global::Log::WriteToLog(r.str()); } if (materialRanges->at(a).ListCount > 0) { std::stringstream r; r << "\t\tList: " << indexBuffer->at(firstIndexPos + materialRanges->at(a).StripCount); for (size_t b = 1; b < materialRanges->at(a).ListCount; b++) { r << " | " << indexBuffer->at(firstIndexPos + materialRanges->at(a).StripCount + b); } CE::Global::Log::WriteToLog(r.str()); } firstIndexPos += materialRanges->at(a).ListCount + materialRanges->at(a).StripCount; } #endif return res; } } inline void CLOAK_CALL SendResponse(In RespondeFunc func, In RespondeCode code, In size_t Polygon, In_opt size_t Vertex = 0, In_opt std::string msg = "") { RespondeInfo info; info.Code = code; info.Polygon = Polygon; info.Vertex = Vertex; info.Msg = msg; func(info); } inline void CLOAK_CALL SendResponse(In RespondeFunc func, In RespondeCode code, In_opt std::string msg = "") { SendResponse(func, code, 0, 0, msg); } inline void CLOAK_CALL CopyVertex(In const Vertex& a, In const CloakEngine::Global::Math::Vector& normal, In const CloakEngine::Global::Math::Vector& binormal, In const CloakEngine::Global::Math::Vector& tangent, In uint32_t material, Out FinalVertex* b) { Bone tb[4]; //Remove bones with same ID for (size_t c = 0; c < 4; c++) { if (a.Bones[c].Weight > 0) { bool f = false; for (size_t d = 0; d < c && f == false; d++) { if (tb[d].Weight > 0 && tb[d].BoneID == a.Bones[c].BoneID) { f = true; tb[d].Weight += a.Bones[c].Weight; } } if (f == false) { tb[c].BoneID = a.Bones[c].BoneID; tb[c].Weight = a.Bones[c].Weight; } else { tb[c].BoneID = 0; tb[c].Weight = 0; } } else { tb[c].BoneID = 0; tb[c].Weight = 0; } } float wNorm = 0; //Normalize bone weights for (size_t c = 0; c < 4; c++) { if (tb[c].Weight > 0) { wNorm += tb[c].Weight; } } //Set final vertex b->Position = a.Position; b->TexCoord = a.TexCoord; b->Normal = normal; b->Binormal = binormal; b->Tangent = tangent; b->MaterialID = material; for (size_t c = 0; c < 4; c++) { if (tb[c].Weight > 0) { b->Bones[c].BoneID = tb[c].BoneID; b->Bones[c].Weight = tb[c].Weight / wNorm; } else { b->Bones[c].BoneID = 0; b->Bones[c].Weight = 0; } } } inline void CLOAK_CALL CalculatePolygonVertices(In const Vertex& a, In const Vertex& b, In const Vertex& c, In bool calcNorms, In size_t material, Out FinalVertex res[3]) { TexCoord tex[2]; tex[0].U = b.TexCoord.U - a.TexCoord.U; tex[0].V = b.TexCoord.V - a.TexCoord.V; tex[1].U = c.TexCoord.U - a.TexCoord.U; tex[1].V = c.TexCoord.V - a.TexCoord.V; CloakEngine::Global::Math::Vector vec[2]; vec[0] = static_cast<const CloakEngine::Global::Math::Vector>(b.Position) - a.Position; vec[1] = static_cast<const CloakEngine::Global::Math::Vector>(c.Position) - a.Position; float det = (tex[0].U*tex[1].V) - (tex[1].U*tex[0].V); if (fabsf(det) < 1e-4f) { tex[0].U = 1; tex[0].V = 0; tex[1].U = 1; tex[1].V = -1; det = -1; } const float den = 1.0f / det; CloakEngine::Global::Math::Vector tangent = (den * ((tex[1].V * vec[0]) - (tex[0].V * vec[1]))).Normalize(); CloakEngine::Global::Math::Vector binormal = (den * ((tex[0].U * vec[1]) - (tex[1].U * vec[0]))).Normalize(); if (calcNorms) { CloakEngine::Global::Math::Vector normal = tangent.Cross(binormal).Normalize(); tangent -= binormal * (binormal.Dot(tangent)); if (normal.Cross(tangent).Dot(binormal) < 0) { tangent = -tangent; } tangent = tangent.Normalize(); CopyVertex(a, normal, binormal, tangent, static_cast<uint32_t>(material), &res[0]); CopyVertex(b, normal, binormal, tangent, static_cast<uint32_t>(material), &res[1]); CopyVertex(c, normal, binormal, tangent, static_cast<uint32_t>(material), &res[2]); } else { const CloakEngine::Global::Math::Vector an = static_cast<CloakEngine::Global::Math::Vector>(a.Normal).Normalize(); const CloakEngine::Global::Math::Vector bn = static_cast<CloakEngine::Global::Math::Vector>(b.Normal).Normalize(); const CloakEngine::Global::Math::Vector cn = static_cast<CloakEngine::Global::Math::Vector>(c.Normal).Normalize(); const CloakEngine::Global::Math::Vector at = (tangent - (an.Dot(tangent)*an)).Normalize(); const CloakEngine::Global::Math::Vector bt = (tangent - (bn.Dot(tangent)*bn)).Normalize(); const CloakEngine::Global::Math::Vector ct = (tangent - (cn.Dot(tangent)*cn)).Normalize(); const CloakEngine::Global::Math::Vector ab = (binormal - ((an.Dot(binormal)*an) + (at.Dot(binormal)*at))).Normalize(); const CloakEngine::Global::Math::Vector bb = (binormal - ((bn.Dot(binormal)*bn) + (bt.Dot(binormal)*bt))).Normalize(); const CloakEngine::Global::Math::Vector cb = (binormal - ((cn.Dot(binormal)*cn) + (ct.Dot(binormal)*ct))).Normalize(); CopyVertex(a, a.Normal, ab, at, static_cast<uint32_t>(material), &res[0]); CopyVertex(b, b.Normal, bb, bt, static_cast<uint32_t>(material), &res[1]); CopyVertex(c, c.Normal, cb, ct, static_cast<uint32_t>(material), &res[2]); } } inline IndexBufferType CLOAK_CALL CalculateIndexBuffer(In const FinalVertex* vb, In size_t vbs, Out CE::List<size_t>* ib, Out CE::List<bool>* usedVB, Out CloakEngine::List<MaterialRange>* materialRanges) { usedVB->resize(vbs); ib->clear(); ib->reserve(vbs); materialRanges->clear(); //Create Index Reference Buffer: CE::List<size_t> irb(vbs); for (size_t a = 0; a < vbs; a++) { const FinalVertex& v = vb[a]; for (size_t b = 0; b < a; b++) { const FinalVertex& i = vb[irb[b]]; if (v.CompareForIndex(i)) { usedVB->at(a) = false; irb[a] = irb[b]; goto irb_found_index; } } usedVB->at(a) = true; irb[a] = a; irb_found_index: continue; } //Create rebased index buffer: CE::List<size_t> ibb(vbs); ibb[0] = 0; for (size_t a = 1; a < vbs; a++) { if (usedVB->at(a) == true) { ibb[a] = ibb[a - 1] + 1; } else { ibb[a] = ibb[a - 1]; } } for (size_t a = 0; a < vbs; a++) { size_t p = a; while (p != irb[p]) { CLOAK_ASSUME(usedVB->at(p) == false); p = irb[p]; } ibb[a] = ibb[p]; } #ifdef PRINT_STRIPS CE::Global::Log::WriteToLog("Simple Index Buffer ("+std::to_string(ibb.size())+" Indices):"); #endif size_t ibStart = 0; for (size_t a = 0; a < vbs; a++) { ib->push_back(ibb[a]); #ifdef PRINT_STRIPS CE::Global::Log::WriteToLog("\tIB[" + std::to_string(a) + "] = " + std::to_string(ibb[a])); #endif if (a > 0 && vb[a].MaterialID != vb[a - 1].MaterialID) { MaterialRange mr; mr.ListCount = a - ibStart; mr.StripCount = 0; mr.MaterialID = vb[a - 1].MaterialID; materialRanges->push_back(mr); ibStart = a; } } //Add last material range: MaterialRange mr; mr.ListCount = vbs - ibStart; mr.StripCount = 0; mr.MaterialID = vb[vbs - 1].MaterialID; materialRanges->push_back(mr); //Calculate triangle strips: IndexBufferType res; if (Strips::CalculateIndexBufferStrips(ibb, ib, materialRanges) == false) { res = ib->size() < MAX_IB16_SIZE ? IndexBufferType::IB16 : IndexBufferType::IB32; } else { res = ib->size() < MAX_IB16_SIZE ? IndexBufferType::IB16 : IndexBufferType::IB32; if (res == IndexBufferType::IB16) { for (size_t a = 0; a < ib->size(); a++) { if (ib->at(a) == STRIP_CUT_VALUE) { res = IndexBufferType::IB16ShortCut; } } } } return res; } inline bool CLOAK_CALL CheckFloat(In CloakEngine::Files::IReader* r, In const float& f) { const float i = static_cast<float>(r->ReadDouble(32)); return abs(i - f) < g_floatDelta; } inline bool CLOAK_CALL CheckVector(In CloakEngine::Files::IReader* r, In const CloakEngine::Global::Math::Vector& v) { CloakEngine::Global::Math::Point p(v); return CheckFloat(r, p.X) && CheckFloat(r, p.Y) && CheckFloat(r, p.Z); } inline void CLOAK_CALL WriteVector(In CloakEngine::Files::IWriter* w, In const CloakEngine::Global::Math::Vector& v) { CloakEngine::Global::Math::Point p(v); w->WriteDouble(32, p.X); w->WriteDouble(32, p.Y); w->WriteDouble(32, p.Z); } inline bool CLOAK_CALL CheckTemp(In CloakEngine::Files::IWriter* output, In const EncodeDesc& encode, In const Desc& desc, In RespondeFunc func) { bool suc = false; if ((encode.flags & EncodeFlags::NO_TEMP_READ) == EncodeFlags::NONE) { CloakEngine::Files::IReader* read = nullptr; CREATE_INTERFACE(CE_QUERY_ARGS(&read)); if (read != nullptr) { const std::u16string tmpPath = encode.tempPath; suc = read->SetTarget(tmpPath, g_TempFileType, false, true) == g_TempFileType.Version; if (suc) { SendResponse(func, RespondeCode::CHECK_TMP); } if (suc) { suc = !Engine::TempHandler::CheckGameID(read, encode.targetGameID); } if (suc) { suc = static_cast<BoundingVolume>(read->ReadBits(8)) == desc.Bounding; } if (suc) { suc = read->ReadDynamic() == desc.Vertices.size(); } if (suc) { suc = read->ReadDynamic() == desc.Polygons.size(); } if (suc) { for (size_t a = 0; a < desc.Polygons.size() && suc; a++) { const Polygon& p = desc.Polygons[a]; suc = suc && ((read->ReadBits(1) == 1) == p.AutoGenerateNormal); suc = suc && (read->ReadBits(32) == p.Material); suc = suc && (read->ReadBits(32) == p.Point[0]); suc = suc && (read->ReadBits(32) == p.Point[1]); suc = suc && (read->ReadBits(32) == p.Point[2]); } } if (suc) { for (size_t a = 0; a < desc.Vertices.size() && suc; a++) { const Vertex& v = desc.Vertices[a]; suc = suc && CheckVector(read, v.Position); suc = suc && CheckVector(read, v.Normal); suc = suc && CheckFloat(read, v.TexCoord.U); suc = suc && CheckFloat(read, v.TexCoord.V); for (size_t b = 0; b < 4 && suc; b++) { suc = suc && (read->ReadBits(32) == v.Bones[b].BoneID); suc = suc && CheckFloat(read, v.Bones[b].Weight); } } } if (suc) { uint32_t bys = static_cast<uint32_t>(read->ReadBits(32)); uint8_t bis = static_cast<uint8_t>(read->ReadBits(3)); for (uint32_t a = 0; a < bys; a++) { output->WriteBits(8, read->ReadBits(8)); } if (bis > 0) { output->WriteBits(bis, read->ReadBits(bis)); } } } SAVE_RELEASE(read); } return !suc; } inline void CLOAK_CALL WriteTemp(In CloakEngine::Files::IVirtualWriteBuffer* data, In uint32_t bys, In uint8_t bis, In const EncodeDesc& encode, In const Desc& desc, In RespondeFunc response) { if ((encode.flags & EncodeFlags::NO_TEMP_WRITE) == EncodeFlags::NONE) { SendResponse(response, RespondeCode::WRITE_TMP); CloakEngine::Files::IWriter* write = nullptr; CREATE_INTERFACE(CE_QUERY_ARGS(&write)); write->SetTarget(encode.tempPath, g_TempFileType, CloakEngine::Files::CompressType::NONE); Engine::TempHandler::WriteGameID(write, encode.targetGameID); write->WriteBits(8, static_cast<uint8_t>(desc.Bounding)); write->WriteDynamic(desc.Vertices.size()); write->WriteDynamic(desc.Polygons.size()); for (size_t a = 0; a < desc.Polygons.size(); a++) { const Polygon& p = desc.Polygons[a]; write->WriteBits(1, p.AutoGenerateNormal ? 1 : 0); write->WriteBits(32, p.Material); write->WriteBits(32, p.Point[0]); write->WriteBits(32, p.Point[1]); write->WriteBits(32, p.Point[2]); } for (size_t a = 0; a < desc.Vertices.size(); a++) { const Vertex& v = desc.Vertices[a]; WriteVector(write, v.Position); WriteVector(write, v.Normal); write->WriteDouble(32, v.TexCoord.U); write->WriteDouble(32, v.TexCoord.V); for (size_t b = 0; b < 4; b++) { write->WriteBits(32, v.Bones[b].BoneID); write->WriteDouble(32, v.Bones[b].Weight); } } write->WriteBits(32, bys); write->WriteBits(3, bis); write->WriteBuffer(data, bys, bis); SAVE_RELEASE(write); } } inline void CLOAK_CALL WriteSingleVertex(In CloakEngine::Files::IWriter* write, In const FinalVertex& v, In bool saveBones) { WriteVector(write, v.Position); WriteVector(write, v.Normal); WriteVector(write, v.Binormal); WriteVector(write, v.Tangent); write->WriteDouble(32, v.TexCoord.U); write->WriteDouble(32, v.TexCoord.V); if (saveBones) { size_t bc = 0; for (size_t a = 0; a < 4; a++) { if (v.Bones[a].Weight > 0) { bc++; } } write->WriteBits(2, bc); for (size_t a = 0; a < 4; a++) { if (v.Bones[a].Weight > 0) { write->WriteBits(32, v.Bones[a].BoneID); write->WriteDouble(32, v.Bones[a].Weight); } } } } #ifdef _DEBUG inline void CLOAK_CALL __DBG_PrintVertex(In const FinalVertex& v) { CloakEngine::Global::Math::Point h(v.Position); CloakDebugLog("\tPosition = [" + std::to_string(h.X) + "|" + std::to_string(h.Y) + "|" + std::to_string(h.Z) + "]"); h = static_cast<CloakEngine::Global::Math::Point>(v.Normal); CloakDebugLog("\tNormal = [" + std::to_string(h.X) + "|" + std::to_string(h.Y) + "|" + std::to_string(h.Z) + "]"); h = static_cast<CloakEngine::Global::Math::Point>(v.Binormal); CloakDebugLog("\tBinormal = [" + std::to_string(h.X) + "|" + std::to_string(h.Y) + "|" + std::to_string(h.Z) + "]"); h = static_cast<CloakEngine::Global::Math::Point>(v.Tangent); CloakDebugLog("\tTangent = [" + std::to_string(h.X) + "|" + std::to_string(h.Y) + "|" + std::to_string(h.Z) + "]"); CloakDebugLog("\tTexCoord = [" + std::to_string(v.TexCoord.U) + "|" + std::to_string(v.TexCoord.V) + "]"); } #define PrintVertex(v) __DBG_PrintVertex(v) #else #define PrintVertex(v) #endif inline void CLOAK_CALL WriteIndexVertexBuffer(In CloakEngine::Files::IWriter* write, In size_t size, In size_t boneCount, In_reads(size) const FinalVertex* vertexBuffer, In_reads(size) const CE::List<bool>& used) { #ifdef _DEBUG size_t vertI = 0; #endif for (size_t a = 0; a < size; a++) { if (used[a] == true) { #ifdef _DEBUG CloakDebugLog("Write vertex " + std::to_string(a) + " at index " + std::to_string(vertI)); PrintVertex(vertexBuffer[a]); vertI++; #endif WriteSingleVertex(write, vertexBuffer[a], boneCount > 0); } } } inline void CLOAK_CALL WriteBoneUsage(In CloakEngine::Files::IWriter* write, In size_t begin, In size_t end, In size_t boneCount, In_reads(end) const FinalVertex* vertexBuffer) { if (boneCount > 0) { bool* usage = new bool[boneCount]; for (size_t a = 0; a < boneCount; a++) { usage[a] = false; } for (size_t a = begin; a < end; a++) { const FinalVertex& v = vertexBuffer[a]; for (size_t b = 0; b < 4; b++) { if (v.Bones[b].Weight > 0) { usage[v.Bones[b].BoneID] = true; } } } for (size_t a = 0; a < boneCount; a++) { write->WriteBits(1, usage[a] ? 1 : 0); } delete[] usage; } } inline void CLOAK_CALL WriteIndexBuffer(In CloakEngine::Files::IWriter* write, In size_t ibsl, In bool shortStripCut, In size_t boneCount, In_reads(size) const FinalVertex* vertexBuffer, In_reads(size) const CE::List<size_t>& indexBuffer, In const CloakEngine::List<MaterialRange>& matRanges) { for (size_t a = 0, b = 0; a < indexBuffer.size() && b < matRanges.size(); b++) { const MaterialRange& mr = matRanges[b]; const size_t cS = min(mr.StripCount, indexBuffer.size() - a); const size_t cL = min(mr.ListCount, indexBuffer.size() - (a + cS)); CloakDebugLog("Write material " + std::to_string(mr.MaterialID) + " (" + std::to_string(cS) + " triangle strip vertices, " + std::to_string(cL) + " triangle list vertices)"); write->WriteBits(32, cS); write->WriteBits(32, cL); write->WriteBits(32, mr.MaterialID); WriteBoneUsage(write, a, a + cS + cL, boneCount, vertexBuffer); for (size_t c = 0; c < cS; a++, c++) { CLOAK_ASSUME(a < indexBuffer.size()); CloakDebugLog("Write Index[" + std::to_string(a) + "]: " + (indexBuffer[a] == STRIP_CUT_VALUE ? "Strip Cut" : std::to_string(indexBuffer[a]))); if (shortStripCut == true && indexBuffer[a] == STRIP_CUT_VALUE) { write->WriteBits(ibsl, STRIP_CUT_VALUE_16); } else { write->WriteBits(ibsl, indexBuffer[a]); } } for (size_t c = 0; c < cL; a++, c++) { CLOAK_ASSUME(a < indexBuffer.size()); CloakDebugLog("Write Index[" + std::to_string(a) + "]: " + (indexBuffer[a] == STRIP_CUT_VALUE ? "Strip Cut" : std::to_string(indexBuffer[a]))); write->WriteBits(ibsl, indexBuffer[a]); } } } inline void CLOAK_CALL WriteRawVertexBuffer(In CloakEngine::Files::IWriter* write, In size_t size, In size_t boneCount, In_reads(size) const FinalVertex* vertexBuffer) { size_t s = 0; size_t lMat = 0; for (size_t a = 0; a < size; a++, s++) { if (a == 0) { lMat = vertexBuffer[a].MaterialID; } else if (lMat != vertexBuffer[a].MaterialID) { CloakDebugLog("Write material " + std::to_string(lMat) + " (" + std::to_string(s) + " vertices)"); write->WriteBits(32, s); write->WriteBits(32, lMat); WriteBoneUsage(write, a - s, a, boneCount, vertexBuffer); for (size_t b = a - s; b < a; b++) { CloakDebugLog("Write vertex " + std::to_string(b)); PrintVertex(vertexBuffer[b]); const FinalVertex& v = vertexBuffer[b]; WriteSingleVertex(write, v, boneCount > 0); } lMat = vertexBuffer[a].MaterialID; s = 0; } } CloakDebugLog("Write material " + std::to_string(lMat) + " (" + std::to_string(s) + " vertices)"); write->WriteBits(32, s); write->WriteBits(32, lMat); WriteBoneUsage(write, size - s, size, boneCount, vertexBuffer); for (size_t b = size - s; b < size; b++) { CloakDebugLog("Write vertex " + std::to_string(b)); PrintVertex(vertexBuffer[b]); const FinalVertex& v = vertexBuffer[b]; WriteSingleVertex(write, v, boneCount > 0); } } CLOAKCOMPILER_API Vector::Vector() { X = Y = Z = W = 0; } CLOAKCOMPILER_API Vector::Vector(In const CloakEngine::Global::Math::Vector& v) { CloakEngine::Global::Math::Point p(v); X = p.X; Y = p.Y; Z = p.Z; W = p.W; } CLOAKCOMPILER_API Vector& Vector::operator=(In const Vector& p) { X = p.X; Y = p.Y; Z = p.Z; W = p.W; return *this; } CLOAKCOMPILER_API Vector& Vector::operator=(In const CloakEngine::Global::Math::Vector& v) { CloakEngine::Global::Math::Point p(v); X = p.X; Y = p.Y; Z = p.Z; W = p.W; return *this; } CLOAKCOMPILER_API Vector::operator CloakEngine::Global::Math::Vector() { return CloakEngine::Global::Math::Vector(X, Y, Z, W); } CLOAKCOMPILER_API Vector::operator const CloakEngine::Global::Math::Vector() const { return CloakEngine::Global::Math::Vector(X, Y, Z, W); } CLOAKCOMPILER_API void CLOAK_CALL EncodeToFile(In CloakEngine::Files::IWriter* output, In const EncodeDesc& encode, In const Desc& desc, In std::function<void(const RespondeInfo&)> response) { bool suc = true; if (CheckTemp(output, encode, desc, response)) { CloakEngine::Files::IVirtualWriteBuffer* wrBuf = CloakEngine::Files::CreateVirtualWriteBuffer(); CloakEngine::Files::IWriter* write = nullptr; CREATE_INTERFACE(CE_QUERY_ARGS(&write)); write->SetTarget(wrBuf); const size_t vbs = desc.Polygons.size() * 3; FinalVertex* vb = NewArray(FinalVertex, vbs); size_t matCount = 0; const size_t polS = desc.Polygons.size(); //Check polygon-vertex aviability for (size_t a = 0; a < desc.Polygons.size() && suc == true; a++) { const Polygon& p = desc.Polygons[a]; for (size_t b = 0; b < 3 && suc == true; b++) { if (p.Point[b] >= desc.Vertices.size()) { SendResponse(response, RespondeCode::ERROR_VERTEXREF, a, p.Point[b], "Polygon " + std::to_string(a) + " refers to vertex " + std::to_string(p.Point[b]) + " but there are only " + std::to_string(desc.Vertices.size()) + " vertices!"); suc = false; } } } if (suc == true) { //Write required minimum of bones in animation skeleton SendResponse(response, RespondeCode::CALC_BONES); size_t maxBone = 0; for (size_t a = 0; a < desc.Vertices.size(); a++) { const Vertex& v = desc.Vertices[a]; for (size_t b = 0; b < 4; b++) { if (v.Bones[b].Weight > 0) { maxBone = max(maxBone, v.Bones[b].BoneID); } } } const size_t boneCount = maxBone > 0 ? maxBone + 1 : 0; write->WriteBits(32, boneCount); //Sort polygons by material SendResponse(response, RespondeCode::CALC_SORT); size_t* const sortHeap = NewArray(size_t, polS * 2); size_t* sorted[2] = { &sortHeap[0],&sortHeap[polS] }; size_t count[16]; for (size_t a = 0; a < polS; a++) { sorted[0][a] = a; } for (uint32_t a = 0xf, b = 0; b < 8; a <<= 4, b++) { for (size_t c = 0; c < 16; c++) { count[c] = 0; } for (size_t c = 0; c < polS; c++) { count[(desc.Polygons[sorted[0][c]].Material & a) >> (4 * b)]++; } for (size_t c = 1; c < 16; c++) { count[c] += count[c - 1]; } for (size_t c = polS; c > 0; c--) { const size_t p = (desc.Polygons[sorted[0][c - 1]].Material & a) >> (4 * b); sorted[1][count[p] - 1] = sorted[0][c - 1]; count[p]--; } size_t* t = sorted[0]; sorted[0] = sorted[1]; sorted[1] = t; } //Remap material ids to zero-based array indices SendResponse(response, RespondeCode::CALC_MATERIALS); uint32_t lastMat; for (size_t a = 0, b = 0; a < polS; a++) { const Polygon& p = desc.Polygons[sorted[0][a]]; if (a == 0) { lastMat = p.Material; } else if (lastMat != p.Material) { lastMat = p.Material; b++; matCount = max(matCount, b + 1); } } matCount = max(1, matCount); //Calculate binormals/tangents & copy vertex data SendResponse(response, RespondeCode::CALC_BINORMALS); for (size_t a = 0; a < polS; a++) { const Polygon& p = desc.Polygons[sorted[0][a]]; CalculatePolygonVertices(desc.Vertices[p.Point[0]], desc.Vertices[p.Point[1]], desc.Vertices[p.Point[2]], p.AutoGenerateNormal, p.Material, &vb[3 * a]); } if (suc == true) { if (boneCount == 0) { //Calculate bounding volume for full mesh if (desc.Bounding != BoundingVolume::None) { SendResponse(response, RespondeCode::CALC_BOUNDING); } const size_t posSize = desc.Vertices.size(); uint8_t* poslHeap = new uint8_t[(sizeof(CloakEngine::Global::Math::Vector)*posSize) + 15]; CloakEngine::Global::Math::Vector* posl = reinterpret_cast<CloakEngine::Global::Math::Vector*>((reinterpret_cast<uintptr_t>(poslHeap) + 15) & ~static_cast<uintptr_t>(0xF)); for (size_t a = 0; a < posSize; a++) { posl[a] = desc.Vertices[a].Position; } switch (desc.Bounding) { case BoundingVolume::None: { write->WriteBits(2, 0); break; } case BoundingVolume::OOBB: { Engine::BoundingVolume::BoundingOOBB bound = Engine::BoundingVolume::CalculateBoundingOOBB(posl, posSize); if (bound.Enabled) { SendResponse(response, RespondeCode::WRITE_BOUNDING); write->WriteBits(2, 1); WriteVector(write, bound.Center); for (size_t a = 0; a < 3; a++) { WriteVector(write, bound.Axis[a]); } for (size_t a = 0; a < 3; a++) { write->WriteDouble(32, bound.HalfSize[a]); } CloakEngine::Global::Math::Point cen(bound.Center); CloakDebugLog("Bounding Box center: " + std::to_string(cen.X) + " | " + std::to_string(cen.Y) + " | " + std::to_string(cen.Z)); for (size_t a = 0; a < 3; a++) { CloakEngine::Global::Math::Point p(bound.Axis[a]); CloakDebugLog("Bounding Box axis[" + std::to_string(a) + "]: " + std::to_string(p.X) + " | " + std::to_string(p.Y) + " | " + std::to_string(p.Z)); } for (size_t a = 0; a < 3; a++) { CloakDebugLog("Bounding Box axis[" + std::to_string(a) + "] scale: " + std::to_string(bound.HalfSize[a])); } break; } else { SendResponse(response, RespondeCode::ERROR_BOUNDING, "Failed to calculate OOBB, switch to bounding-Volume: sphere"); } //Fall through } case BoundingVolume::Sphere: { Engine::BoundingVolume::BoundingSphere bound = Engine::BoundingVolume::CalculateBoundingSphere(posl, posSize); if (bound.Enabled) { SendResponse(response, RespondeCode::WRITE_BOUNDING); write->WriteBits(2, 2); WriteVector(write, bound.Center); write->WriteDouble(32, bound.Radius); CloakEngine::Global::Math::Point cen(bound.Center); CloakDebugLog("Bounding Sphere center: " + std::to_string(cen.X) + " | " + std::to_string(cen.Y) + " | " + std::to_string(cen.Z)); CloakDebugLog("Bounding Sphere radius: " + std::to_string(bound.Radius)); break; } else { SendResponse(response, RespondeCode::ERROR_BOUNDING, "Failed to calculate bounding sphere!"); suc = false; } break; } default: suc = false; SendResponse(response, RespondeCode::ERROR_BOUNDING, "Unknown bounding volume type"); break; } delete[] poslHeap; } else { std::vector<std::vector<size_t>> boneToVertex(boneCount); for (size_t a = 0; a < vbs; a++) { const FinalVertex& v = vb[a]; for (size_t b = 0; b < 4; b++) { if (v.Bones[b].Weight > 0) { boneToVertex[v.Bones[b].BoneID].push_back(a); } } } for (size_t a = 0; a < boneCount; a++) { if (boneToVertex[a].size() == 0) { continue; } //TODO: per-Bone bounding volume //TODO: Find solution to interpolated vertices (vertices between bones -> weight of one bone < 1) SendResponse(response, RespondeCode::ERROR_BOUNDING, "Bounding volume calculation for animated objects is not yet implemented!"); suc = false; } } } if (suc == true) { bool useIndexBuffer = false; if (desc.UseIndexBuffer) { //Calculate index buffer SendResponse(response, RespondeCode::CALC_INDICES); CE::List<bool> vbUsed(vbs); CE::List<size_t> ib; CloakEngine::List<MaterialRange> matRanges; const IndexBufferType stripCut = CalculateIndexBuffer(vb, vbs, &ib, &vbUsed, &matRanges); size_t finVbs = 0; for (size_t a = 0; a < vbs; a++) { if (vbUsed[a] == true) { finVbs++; } } const size_t ibsL = stripCut == IndexBufferType::IB32 ? 4 : 2; CloakDebugLog("IB Comparison: IB = " + std::to_string((finVbs*VertexSize) + (ib.size()*ibsL)) + " (" + std::to_string(finVbs) + " Vertices) Raw = " + std::to_string(vbs*VertexSize) + " (" + std::to_string(vbs) + " Vertices)"); //Test whether we use an index buffer at all: if ((vbs - finVbs)*VertexSize > (ib.size()*ibsL)) { useIndexBuffer = true; //Write all vertices + sorted per material indices SendResponse(response, RespondeCode::WRITE_VERTICES); write->WriteBits(16, matRanges.size() - 1); write->WriteBits(32, finVbs); write->WriteBits(1, 1); //Use index buffer write->WriteBits(1, ibsL == 2 ? 0 : 1); //16 or 32 bit index buffer size if (ibsL == 2) { write->WriteBits(1, stripCut == IndexBufferType::IB16ShortCut ? 1 : 0); } //short strip cuts? write->WriteBits(32, ib.size()); CloakDebugLog("Write Index + Vertex Buffer"); WriteIndexVertexBuffer(write, vbs, boneCount, vb, vbUsed); WriteIndexBuffer(write, ibsL << 3, stripCut == IndexBufferType::IB16ShortCut, boneCount, vb, ib, matRanges); } } if (useIndexBuffer == false) { //Write vertices, sorted and seperated by materials SendResponse(response, RespondeCode::WRITE_VERTICES); write->WriteBits(16, matCount - 1); write->WriteBits(32, vbs); write->WriteBits(1, 0); CloakDebugLog("Write raw vertex buffer"); WriteRawVertexBuffer(write, vbs, boneCount, vb); } } DeleteArray(sortHeap); } DeleteArray(vb); const uint32_t bys = static_cast<uint32_t>(write->GetPosition()); const uint8_t bis = static_cast<uint8_t>(write->GetBitPosition()); write->Save(); if (suc) { output->WriteBuffer(wrBuf, bys, bis); WriteTemp(wrBuf, bys, bis, encode, desc, response); } SAVE_RELEASE(write); SAVE_RELEASE(wrBuf); } SendResponse(response, suc ? RespondeCode::FINISH_SUCCESS : RespondeCode::FINISH_ERROR); } } } }
50,386
24,750
#include <iostream> #include "mdc2250/mdc2250.h" #include "mdc2250/decode.h" void telemetry_callback(const std::string &telemetry) { std::cout << "Got telemetry: " << telemetry << std::endl; } int run() { mdc2250::MDC2250 my_mdc2250(true); my_mdc2250.connect("/dev/tty.USA49Wfd122P1.1"); // Disable echo my_mdc2250.setEcho(true); // Disable watchdog my_mdc2250.setWatchdog(10000); // Setup telemetry size_t period = 25; my_mdc2250.setTelemetry("C,V,C,A", period, telemetry_callback); my_mdc2250.setTelemetry("C,V,C,A", period, telemetry_callback); // Move both motors for 4 seconds my_mdc2250.commandMotor(1, 1000); my_mdc2250.commandMotor(2, -1000); boost::this_thread::sleep(boost::posix_time::milliseconds(4000)); // Stop both motors for 1 second my_mdc2250.commandMotor(1); my_mdc2250.commandMotor(2, 0); boost::this_thread::sleep(boost::posix_time::milliseconds(1000)); // Move both motors for 1 second, but estop (they shouldn't move) my_mdc2250.commandMotors(-1000, 1000); std::cout << "E-stopping!" << std::endl; my_mdc2250.estop(); boost::this_thread::sleep(boost::posix_time::milliseconds(1000)); // Stop both motors for 1 second, and clear the estop my_mdc2250.commandMotors(); my_mdc2250.commandMotors(0); // Same thing my_mdc2250.clearEstop(); // Have to redo telemetry after an estop my_mdc2250.setTelemetry("C,V,C,A", period, telemetry_callback); boost::this_thread::sleep(boost::posix_time::milliseconds(1000)); // Move both motors for 4 seconds my_mdc2250.commandMotors(-1000, 1000); boost::this_thread::sleep(boost::posix_time::milliseconds(4000)); // Stop both motors my_mdc2250.commandMotors(); return 0; } int main(void) { try { return run(); } catch (std::exception &e) { std::cerr << "Unhandled Exception: " << e.what() << std::endl; return 1; } }
1,870
848
#include <algorithm> #include <iostream> #include <utility> using namespace std; int main(){ int n; while (true) { cin>>n; cin.ignore(255,'\n'); if(n == 0){ break; } string s; pair<string,int> arr[n]; string raw[n]; for (int i = 0; i < n; i++) { getline(cin,s); raw[i] = s; if(s.length() == 9){ s = "0"+s; } if(s[0] == '1' && s[1] == '2'){ s[0] = '0'; s[1] = '0'; } string dpn = s.substr(0,5); string blkg = s.substr(6,1); s = blkg+dpn; arr[i] = make_pair(s,i); } sort(arr,arr+n); for (int i = 0; i < n; i++) { cout<<raw[arr[i].second]<<"\n"; } cout<<'\n'; } return 0; }
927
337
// Copyright (c) 2017 Denis Blank // // SPDX-License-Identifier: BSL-1.0 // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #pragma once #include <hpx/functional/deferred_call.hpp> #include <hpx/futures/traits/acquire_future.hpp> #include <hpx/futures/traits/acquire_shared_state.hpp> #include <hpx/futures/traits/detail/future_traits.hpp> #include <hpx/futures/traits/is_future.hpp> #include <hpx/util/detail/reserve.hpp> #include <algorithm> #include <cstddef> #include <iterator> #include <type_traits> #include <utility> #include <vector> namespace hpx { namespace lcos { namespace detail { // Returns true when the given future is ready, // the future is deferred executed if possible first. template <typename T, typename std::enable_if<traits::is_future< typename std::decay<T>::type>::value>::type* = nullptr> bool async_visit_future(T&& current) { // Check for state right away as the element might not be able to // produce a shared state (even if it's ready). if (current.is_ready()) { return true; } auto const& state = traits::detail::get_shared_state(std::forward<T>(current)); if (state.get() == nullptr) { return true; } // Execute_deferred might make the future ready state->execute_deferred(); // Detach the context if the future isn't ready return state->is_ready(); } // Attach the continuation next to the given future template <typename T, typename N, typename std::enable_if<traits::is_future< typename std::decay<T>::type>::value>::type* = nullptr> void async_detach_future(T&& current, N&& next) { auto const& state = traits::detail::get_shared_state(std::forward<T>(current)); // Attach a continuation to this future which will // re-evaluate it and continue to the next argument (if any). state->set_on_completed(util::deferred_call(std::forward<N>(next))); } // Acquire a future range from the given begin and end iterator template <typename Iterator, typename Container = std::vector<typename future_iterator_traits<Iterator>::type>> Container acquire_future_iterators(Iterator begin, Iterator end) { Container lazy_values; auto difference = std::distance(begin, end); if (difference > 0) traits::detail::reserve_if_reservable( lazy_values, static_cast<std::size_t>(difference)); std::transform(begin, end, std::back_inserter(lazy_values), traits::acquire_future_disp()); return lazy_values; // Should be optimized by RVO } // Acquire a future range from the given // begin iterator and count template <typename Iterator, typename Container = std::vector<typename future_iterator_traits<Iterator>::type>> Container acquire_future_n(Iterator begin, std::size_t count) { Container values; traits::detail::reserve_if_reservable(values, count); traits::acquire_future_disp func; for (std::size_t i = 0; i != count; ++i) values.push_back(func(*begin++)); return values; // Should be optimized by RVO } }}} // namespace hpx::lcos::detail
3,478
1,074
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/browser/devtools/devtools_manager.h" #include "base/bind.h" #include "base/message_loop/message_loop.h" #include "content/browser/devtools/devtools_agent_host_impl.h" #include "content/browser/devtools/devtools_netlog_observer.h" #include "content/public/browser/browser_thread.h" #include "content/public/browser/content_browser_client.h" namespace content { // static DevToolsManager* DevToolsManager::GetInstance() { return base::Singleton<DevToolsManager>::get(); } DevToolsManager::DevToolsManager() : delegate_(GetContentClient()->browser()->GetDevToolsManagerDelegate()), attached_hosts_count_(0) { } DevToolsManager::~DevToolsManager() { DCHECK(!attached_hosts_count_); } void DevToolsManager::AgentHostStateChanged( DevToolsAgentHostImpl* agent_host, bool attached) { if (attached) { if (!attached_hosts_count_) { BrowserThread::PostTask( BrowserThread::IO, FROM_HERE, base::Bind(&DevToolsNetLogObserver::Attach)); } ++attached_hosts_count_; } else { --attached_hosts_count_; if (!attached_hosts_count_) { BrowserThread::PostTask( BrowserThread::IO, FROM_HERE, base::Bind(&DevToolsNetLogObserver::Detach)); } } } } // namespace content
1,461
460
/* * Copyright (c) 2013-2014, Julien Bernard * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <tmx/TileSet.h> namespace tmx { const Tile *TileSet::getTile(unsigned id) const noexcept { for (auto tile : *this) { if (tile->getId() == id) { return tile; } } return nullptr; } Rect TileSet::getCoords(unsigned id, Size size) const noexcept { unsigned width = (size.width - 2 * m_margin + m_spacing) / (m_tilewidth + m_spacing); // number of tiles unsigned height = (size.height - 2 * m_margin + m_spacing) / (m_tileheight + m_spacing); // number of tiles unsigned tu = id % width; unsigned tv = id / width; assert(tv < height); unsigned du = m_margin + tu * m_spacing + m_x; unsigned dv = m_margin + tv * m_spacing + m_y; assert((tu + 1) * m_tilewidth + du <= size.width); assert((tv + 1) * m_tileheight + dv <= size.height); return { tu * m_tilewidth + du, tv * m_tileheight + dv, m_tilewidth, m_tileheight }; } }
1,696
632
/******************************************************* * Copyright (C) 2022, Chen Jianqu, Shanghai University * * This file is part of is_slam. * * Licensed under the MIT License; * you may not use this file except in compliance with the License. *******************************************************/ #include <stdio.h> #include <iostream> #include <algorithm> #include <fstream> #include <chrono> #include <ros/ros.h> #include <image_transport/image_transport.h> #include <cv_bridge/cv_bridge.h> #include <message_filters/subscriber.h> #include <message_filters/time_synchronizer.h> #include <message_filters/sync_policies/approximate_time.h> #include <tf/transform_broadcaster.h> #include <tf_conversions/tf_eigen.h> #include <std_msgs/Time.h> #include <opencv2/core/core.hpp> #include <opencv2/core/eigen.hpp> #include <Eigen/Core> #include <Eigen/Geometry> #include <Eigen/Dense> #include"System.h" using namespace std; class ImageGrabber { public: ImageGrabber(ORB_SLAM2::System* pSLAM); void GrabRGBD(const sensor_msgs::ImageConstPtr& msgRGB,const sensor_msgs::ImageConstPtr& msgD); void SetPublisher(image_transport::Publisher* pub_rgb_,image_transport::Publisher* pub_depth_); protected: ORB_SLAM2::System* mpSLAM; tf::TransformBroadcaster* br; image_transport::Publisher* pub_rgb; image_transport::Publisher* pub_depth; unsigned long counter; void MatToTransform(cv::Mat &Tcw,tf::Transform &m); }; int main(int argc, char** argv) { ros::init(argc, argv, "orb_slam_node");//初始化节点 ros::start();//启动节点 if(argc != 3) { cout<<"需要传入参数:视觉词典路径 配置文件路径" << endl; ros::shutdown();//关闭节点 return 1; } //初始化ORB-SLAM2 ORB_SLAM2::System SLAM(argv[1],argv[2],ORB_SLAM2::System::RGBD,true); ImageGrabber igb(&SLAM); ros::NodeHandle nh; //接受RGB图和深度图 message_filters::Subscriber<sensor_msgs::Image> rgb_sub(nh, "/camera/rgb/image_raw", 1); message_filters::Subscriber<sensor_msgs::Image> depth_sub(nh, "/camera/depth_registered/image_raw", 1); typedef message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, sensor_msgs::Image> sync_pol; message_filters::Synchronizer<sync_pol> sync(sync_pol(10), rgb_sub,depth_sub); sync.registerCallback(boost::bind(&ImageGrabber::GrabRGBD,&igb,_1,_2)); image_transport::ImageTransport it(nh); image_transport::Publisher pub_rgb = it.advertise("/orbslam2/rgb", 1); image_transport::Publisher pub_depth = it.advertise("/orbslam2/depth", 1); igb.SetPublisher(&pub_rgb,&pub_depth); ros::spin(); SLAM.Shutdown(); SLAM.SaveKeyFrameTrajectoryTUM("KeyFrameTrajectory.txt"); ros::shutdown(); return 0; } ImageGrabber::ImageGrabber(ORB_SLAM2::System* pSLAM): mpSLAM(pSLAM), counter(0) { br=new tf::TransformBroadcaster(); } void ImageGrabber::SetPublisher(image_transport::Publisher* pub_rgb_,image_transport::Publisher* pub_depth_) { pub_rgb=pub_rgb_; pub_depth=pub_depth_; } void ImageGrabber::MatToTransform(cv::Mat &Tcw,tf::Transform &m) { //设置平移 m.setOrigin( tf::Vector3( Tcw.at<float>(0,3), Tcw.at<float>(1,3), Tcw.at<float>(2,3) ) ); //设置旋转 tf::Matrix3x3 Rcw; Rcw.setValue( //Mat转换为Matrix Tcw.at<float>(0,0),Tcw.at<float>(0,1),Tcw.at<float>(0,2), Tcw.at<float>(1,0),Tcw.at<float>(1,1),Tcw.at<float>(1,2), Tcw.at<float>(2,0),Tcw.at<float>(2,1),Tcw.at<float>(2,2) ); tf::Quaternion q; Rcw.getRotation(q); m.setRotation(q); } void ImageGrabber::GrabRGBD(const sensor_msgs::ImageConstPtr& msgRGB,const sensor_msgs::ImageConstPtr& msgD) { ros::Time timestamp= msgRGB->header.stamp; // Copy the ros image message to cv::Mat. cv_bridge::CvImageConstPtr cv_ptrRGB; try{ cv_ptrRGB = cv_bridge::toCvShare(msgRGB); } catch (cv_bridge::Exception& e){ ROS_ERROR("cv_bridge exception: %s", e.what()); return; } cv_bridge::CvImageConstPtr cv_ptrD; try{ cv_ptrD = cv_bridge::toCvShare(msgD); } catch (cv_bridge::Exception& e){ ROS_ERROR("cv_bridge exception: %s", e.what()); return; } //调用ORB-SLAM2 cv::Mat Tcw=mpSLAM->TrackRGBD(cv_ptrRGB->image,cv_ptrD->image,cv_ptrRGB->header.stamp.toSec()); //如果不是关键帧,就退出吧 if(!mpSLAM->GetIsKeyFrame()) return; tf::Transform m; MatToTransform(Tcw,m); //发布rgb和深度图 pub_rgb->publish(msgRGB); pub_depth->publish(msgD); //cout<<"RGB Time:"<<msgRGB->header.stamp<<endl; //cout<<"Depth Time:"<<msgD->header.stamp<<endl; //cout<<Tcw<<endl<<endl; //发布坐标 br->sendTransform(tf::StampedTransform(m, timestamp, "world", "orb_slam2")); counter++; cout<<"发布关键帧序号:"<<counter<<endl; }
4,619
2,017
// Copyright (c) 2009-2022 The Regents of the University of Michigan. // Part of HOOMD-blue, released under the BSD 3-Clause License. #include "UpdaterBoxMC.h" #include "hoomd/RNGIdentifiers.h" #include <numeric> #include <vector> /*! \file UpdaterBoxMC.cc \brief Definition of UpdaterBoxMC */ namespace hoomd { namespace hpmc { UpdaterBoxMC::UpdaterBoxMC(std::shared_ptr<SystemDefinition> sysdef, std::shared_ptr<Trigger> trigger, std::shared_ptr<IntegratorHPMC> mc, std::shared_ptr<Variant> P) : Updater(sysdef, trigger), m_mc(mc), m_beta_P(P), m_volume_delta(0.0), m_volume_weight(0.0), m_ln_volume_delta(0.0), m_ln_volume_weight(0.0), m_volume_mode("standard"), m_volume_A1(0.0), m_volume_A2(0.0), m_length_delta {0.0, 0.0, 0.0}, m_length_weight(0.0), m_shear_delta {0.0, 0.0, 0.0}, m_shear_weight(0.0), m_shear_reduce(0.0), m_aspect_delta(0.0), m_aspect_weight(0.0) { m_exec_conf->msg->notice(5) << "Constructing UpdaterBoxMC" << std::endl; // initialize stats resetStats(); // allocate memory for m_pos_backup unsigned int MaxN = m_pdata->getMaxN(); GPUArray<Scalar4>(MaxN, m_exec_conf).swap(m_pos_backup); // Connect to the MaxParticleNumberChange signal m_pdata->getMaxParticleNumberChangeSignal() .connect<UpdaterBoxMC, &UpdaterBoxMC::slotMaxNChange>(this); updateChangedWeights(); } UpdaterBoxMC::~UpdaterBoxMC() { m_exec_conf->msg->notice(5) << "Destroying UpdaterBoxMC" << std::endl; m_pdata->getMaxParticleNumberChangeSignal() .disconnect<UpdaterBoxMC, &UpdaterBoxMC::slotMaxNChange>(this); } /*! Determine if box exceeds a shearing threshold and needs to be lattice reduced. The maximum amount of shear to allow is somewhat arbitrary, but must be > 0.5. Small values mean the box is reconstructed more often, making it more confusing to track particle diffusion. Larger shear values mean parallel box planes can get closer together, reducing the number of cells possible in the cell list or increasing the number of images that must be checked for small boxes. Box is oversheared in direction \f$ \hat{e}_i \f$ if \f$ \bar{e}_j \cdot \hat{e}_i >= reduce * \left| \bar{e}_i \right| \f$ or \f$ \bar{e}_j \cdot \bar{e}_i >= reduce * \left| \bar{e}_i \right| ^2 \f$ \f$ = reduce * \bar{e}_i \cdot \bar{e}_i \f$ \returns bool true if box is overly sheared */ inline bool UpdaterBoxMC::is_oversheared() { if (m_shear_reduce <= 0.5) return false; const BoxDim curBox = m_pdata->getGlobalBox(); const Scalar3 x = curBox.getLatticeVector(0); const Scalar3 y = curBox.getLatticeVector(1); const Scalar3 z = curBox.getLatticeVector(2); const Scalar y_x = y.x; // x component of y vector const Scalar max_y_x = x.x * m_shear_reduce; const Scalar z_x = z.x; // x component of z vector const Scalar max_z_x = x.x * m_shear_reduce; // z_y \left| y \right| const Scalar z_yy = dot(z, y); // MAX_SHEAR * left| y \right| ^2 const Scalar max_z_y_2 = dot(y, y) * m_shear_reduce; if (fabs(y_x) > max_y_x || fabs(z_x) > max_z_x || fabs(z_yy) > max_z_y_2) return true; else return false; } /*! Perform lattice reduction. Remove excessive box shearing by finding a more cubic degenerate lattice when shearing is more half a lattice vector from cubic. The lattice reduction could make data needlessly complicated and may break detailed balance, use judiciously. \returns true if overshear was removed */ inline bool UpdaterBoxMC::remove_overshear() { bool overshear = false; // initialize return value const Scalar MAX_SHEAR = Scalar(0.5f); // lattice can be reduced if shearing exceeds this value BoxDim newBox = m_pdata->getGlobalBox(); Scalar3 x = newBox.getLatticeVector(0); Scalar3 y = newBox.getLatticeVector(1); Scalar3 z = newBox.getLatticeVector(2); Scalar xy = newBox.getTiltFactorXY(); Scalar xz = newBox.getTiltFactorXZ(); Scalar yz = newBox.getTiltFactorYZ(); // Remove one lattice vector of shear if necessary. Only apply once so image doesn't change more // than one. const Scalar y_x = y.x; // x component of y vector const Scalar max_y_x = x.x * MAX_SHEAR; if (y_x > max_y_x) { // Ly * xy_new = Ly * xy_old + sign*Lx --> xy_new = xy_old + sign*Lx/Ly xy -= x.x / y.y; y.x = xy * y.y; overshear = true; } if (y_x < -max_y_x) { xy += x.x / y.y; y.x = xy * y.y; overshear = true; } const Scalar z_x = z.x; // x component of z vector const Scalar max_z_x = x.x * MAX_SHEAR; if (z_x > max_z_x) { // Lz * xz_new = Lz * xz_old + sign*Lx --> xz_new = xz_old + sign*Lx/Lz xz -= x.x / z.z; z.x = xz * z.z; overshear = true; } if (z_x < -max_z_x) { // Lz * xz_new = Lz * xz_old + sign*Lx --> xz_new = xz_old + sign*Lx/Lz xz += x.x / z.z; z.x = xz * z.z; overshear = true; } // z_y \left| y \right| const Scalar z_yy = dot(z, y); // MAX_SHEAR * left| y \right| ^2 const Scalar max_z_y_2 = dot(y, y) * MAX_SHEAR; if (z_yy > max_z_y_2) { // Lz * xz_new = Lz * xz_old + sign * y.x --> xz_new = = xz_old + sign * y.x / Lz xz -= y.x / z.z; // Lz * yz_new = Lz * yz_old + sign * y.y --> yz_new = yz_old + sign y.y /Lz yz -= y.y / z.z; overshear = true; } if (z_yy < -max_z_y_2) { // Lz * xz_new = Lz * xz_old + sign * y.x --> xz_new = = xz_old + sign * y.x / Lz xz += y.x / z.z; // Lz * yz_new = Lz * yz_old + sign * y.y --> yz_new = yz_old + sign y.y /Lz yz += y.y / z.z; overshear = true; } if (overshear) { newBox.setTiltFactors(xy, xz, yz); m_pdata->setGlobalBox(newBox); // Use lexical scope to make sure ArrayHandles get cleaned up { // Get particle positions and images ArrayHandle<Scalar4> h_pos(m_pdata->getPositions(), access_location::host, access_mode::readwrite); ArrayHandle<int3> h_image(m_pdata->getImages(), access_location::host, access_mode::readwrite); unsigned int N = m_pdata->getN(); // move the particles to be inside the new box for (unsigned int i = 0; i < N; i++) { Scalar4 pos = h_pos.data[i]; int3 image = h_image.data[i]; newBox.wrap(pos, image); h_pos.data[i] = pos; h_image.data[i] = image; } } // end lexical scope // To get particles into the right domain in MPI, we will store and then reload a snapshot SnapshotParticleData<Scalar> snap; m_pdata->takeSnapshot(snap); // loading from snapshot will load particles into the proper MPI domain m_pdata->initializeFromSnapshot(snap); // we've moved the particles, communicate those changes m_mc->communicate(true); } return overshear; } //! Try new box with particle positions scaled from previous box. /*! If new box generates overlaps, restore original box and particle positions. \param Lx new Lx value \param Ly new Ly value \param Lz new Lz value \param xy new xy value \param xz new xz value \param yz new yz value \param timestep current simulation step \returns bool True if box resize was accepted If box is excessively sheared, subtract lattice vectors to make box more cubic. */ inline bool UpdaterBoxMC::box_resize_trial(Scalar Lx, Scalar Ly, Scalar Lz, Scalar xy, Scalar xz, Scalar yz, uint64_t timestep, Scalar deltaE, hoomd::RandomGenerator& rng) { // Make a backup copy of position data unsigned int N_backup = m_pdata->getN(); { ArrayHandle<Scalar4> h_pos(m_pdata->getPositions(), access_location::host, access_mode::read); ArrayHandle<Scalar4> h_pos_backup(m_pos_backup, access_location::host, access_mode::overwrite); memcpy(h_pos_backup.data, h_pos.data, sizeof(Scalar4) * N_backup); } BoxDim curBox = m_pdata->getGlobalBox(); if (m_mc->getPatchEnergy()) { // energy of old configuration deltaE -= m_mc->computePatchEnergy(timestep); } // Attempt box resize and check for overlaps BoxDim newBox = m_pdata->getGlobalBox(); newBox.setL(make_scalar3(Lx, Ly, Lz)); newBox.setTiltFactors(xy, xz, yz); bool allowed = m_mc->attemptBoxResize(timestep, newBox); if (allowed && m_mc->getPatchEnergy()) { deltaE += m_mc->computePatchEnergy(timestep); } if (allowed && m_mc->getExternalField()) { ArrayHandle<Scalar4> h_pos_backup(m_pos_backup, access_location::host, access_mode::readwrite); Scalar ext_energy = m_mc->getExternalField()->calculateDeltaE(timestep, h_pos_backup.data, NULL, curBox); // The exponential is a very fast function and we may do better to add pseudo-Hamiltonians // and exponentiate only once... deltaE += ext_energy; } double p = hoomd::detail::generate_canonical<double>(rng); if (allowed && p < fast::exp(-deltaE)) { return true; } else { // Restore original box and particle positions { ArrayHandle<Scalar4> h_pos(m_pdata->getPositions(), access_location::host, access_mode::readwrite); ArrayHandle<Scalar4> h_pos_backup(m_pos_backup, access_location::host, access_mode::read); unsigned int N = m_pdata->getN(); if (N != N_backup) { this->m_exec_conf->msg->error() << "update.boxmc" << ": Number of particles mismatch when rejecting box resize" << std::endl; throw std::runtime_error("Error resizing box"); // note, this error should never appear (because particles are not migrated after a // box resize), but is left here as a sanity check } memcpy(h_pos.data, h_pos_backup.data, sizeof(Scalar4) * N); } m_pdata->setGlobalBox(curBox); // we have moved particles, communicate those changes m_mc->communicate(false); return false; } } inline bool UpdaterBoxMC::safe_box(const Scalar newL[3], const unsigned int& Ndim) { // Scalar min_allowed_size = m_mc->getMaxTransMoveSize(); // This is dealt with elsewhere const Scalar min_allowed_size(0.0); // volume must be kept positive for (unsigned int j = 0; j < Ndim; j++) { if ((newL[j]) < min_allowed_size) { // volume must be kept positive m_exec_conf->msg->notice(10) << "Box unsafe because dimension " << j << " would be negative." << std::endl; return false; } } return true; } /*! Perform Metropolis Monte Carlo box resizes and shearing \param timestep Current time step of the simulation */ void UpdaterBoxMC::update(uint64_t timestep) { Updater::update(timestep); m_count_step_start = m_count_total; m_exec_conf->msg->notice(10) << "UpdaterBoxMC: " << timestep << std::endl; // Create a prng instance for this timestep hoomd::RandomGenerator rng( hoomd::Seed(hoomd::RNGIdentifier::UpdaterBoxMC, timestep, m_sysdef->getSeed()), hoomd::Counter(m_instance)); // Choose a move type auto const weight_total = m_weight_partial_sums.back(); if (weight_total == 0.0) { // Attempt to execute with all move weights equal to zero. m_exec_conf->msg->warning() << "No move types with non-zero weight. UpdaterBoxMC has nothing to do." << std::endl; return; } // Generate a number between (0, weight_total] auto const selected = hoomd::detail::generate_canonical<Scalar>(rng) * weight_total; // Select the first move type whose partial sum of weights is greater than // or equal to the generated value. auto const move_type_select = std::distance( m_weight_partial_sums.cbegin(), std::lower_bound(m_weight_partial_sums.cbegin(), m_weight_partial_sums.cend(), selected)); // Attempt and evaluate a move // This section will need to be updated when move types are added. if (move_type_select == 0) { // Isotropic volume change m_exec_conf->msg->notice(8) << "Volume move performed at step " << timestep << std::endl; update_V(timestep, rng); } else if (move_type_select == 1) { // Isotropic volume change in logarithmic steps m_exec_conf->msg->notice(8) << "lnV move performed at step " << timestep << std::endl; update_lnV(timestep, rng); } else if (move_type_select == 2) { // Volume change in distribution of box lengths m_exec_conf->msg->notice(8) << "Box length move performed at step " << timestep << std::endl; update_L(timestep, rng); } else if (move_type_select == 3) { // Shear change m_exec_conf->msg->notice(8) << "Box shear move performed at step " << timestep << std::endl; update_shear(timestep, rng); } else if (move_type_select == 4) { // Volume conserving aspect change m_exec_conf->msg->notice(8) << "Box aspect move performed at step " << timestep << std::endl; update_aspect(timestep, rng); } else { // Should not reach this point m_exec_conf->msg->warning() << "UpdaterBoxMC selected an unassigned move type. Selected " << move_type_select << " from range " << weight_total << std::endl; return; } if (is_oversheared()) { while (remove_overshear()) { }; // lattice reduction, possibly in several steps m_exec_conf->msg->notice(5) << "Lattice reduction performed at step " << timestep << std::endl; } } void UpdaterBoxMC::update_L(uint64_t timestep, hoomd::RandomGenerator& rng) { // Get updater parameters for current timestep Scalar P = (*m_beta_P)(timestep); // Get current particle data and box lattice parameters assert(m_pdata); unsigned int Ndim = m_sysdef->getNDimensions(); unsigned int Nglobal = m_pdata->getNGlobal(); BoxDim curBox = m_pdata->getGlobalBox(); Scalar curL[3]; Scalar newL[3]; // Lx, Ly, Lz newL[0] = curL[0] = curBox.getLatticeVector(0).x; newL[1] = curL[1] = curBox.getLatticeVector(1).y; newL[2] = curL[2] = curBox.getLatticeVector(2).z; Scalar newShear[3]; // xy, xz, yz newShear[0] = curBox.getTiltFactorXY(); newShear[1] = curBox.getTiltFactorXZ(); newShear[2] = curBox.getTiltFactorYZ(); // Volume change // Choose a lattice vector if non-isotropic volume changes unsigned int nonzero_dim = 0; for (unsigned int i = 0; i < Ndim; ++i) if (m_length_delta[i] != 0.0) nonzero_dim++; if (nonzero_dim == 0) { // all dimensions have delta==0, just count as accepted and return m_count_total.volume_accept_count++; return; } unsigned int chosen_nonzero_dim = hoomd::UniformIntDistribution(nonzero_dim - 1)(rng); unsigned int nonzero_dim_count = 0; unsigned int i = 0; for (unsigned int j = 0; j < Ndim; ++j) { if (m_length_delta[j] != 0.0) { if (nonzero_dim_count == chosen_nonzero_dim) { i = j; break; } ++nonzero_dim_count; } } Scalar dL_max(m_length_delta[i]); // Choose a length change Scalar dL = hoomd::UniformDistribution<Scalar>(-dL_max, dL_max)(rng); // perform volume change by applying a delta to one dimension newL[i] += dL; if (!safe_box(newL, Ndim)) { m_count_total.volume_reject_count++; } else { // Calculate volume change for 2 or 3 dimensions. double Vold, dV, Vnew; Vold = curL[0] * curL[1]; if (Ndim == 3) Vold *= curL[2]; Vnew = newL[0] * newL[1]; if (Ndim == 3) Vnew *= newL[2]; dV = Vnew - Vold; // Calculate Boltzmann factor double dBetaH = P * dV - Nglobal * log(Vnew / Vold); // attempt box change bool accept = box_resize_trial(newL[0], newL[1], newL[2], newShear[0], newShear[1], newShear[2], timestep, dBetaH, rng); if (accept) { m_count_total.volume_accept_count++; } else { m_count_total.volume_reject_count++; } } } //! Update the box volume in logarithmic steps void UpdaterBoxMC::update_lnV(uint64_t timestep, hoomd::RandomGenerator& rng) { // Get updater parameters for current timestep Scalar P = (*m_beta_P)(timestep); // Get current particle data and box lattice parameters assert(m_pdata); unsigned int Ndim = m_sysdef->getNDimensions(); unsigned int Nglobal = m_pdata->getNGlobal(); BoxDim curBox = m_pdata->getGlobalBox(); Scalar curL[3]; Scalar newL[3]; // Lx, Ly, Lz newL[0] = curL[0] = curBox.getLatticeVector(0).x; newL[1] = curL[1] = curBox.getLatticeVector(1).y; newL[2] = curL[2] = curBox.getLatticeVector(2).z; Scalar newShear[3]; // xy, xz, yz newShear[0] = curBox.getTiltFactorXY(); newShear[1] = curBox.getTiltFactorXZ(); newShear[2] = curBox.getTiltFactorYZ(); // original volume double V = curL[0] * curL[1]; if (Ndim == 3) { V *= curL[2]; } // Aspect ratios Scalar A1 = m_volume_A1; Scalar A2 = m_volume_A2; // Volume change Scalar dlnV_max(m_ln_volume_delta); // Choose a volume change Scalar dlnV = hoomd::UniformDistribution<Scalar>(-dlnV_max, dlnV_max)(rng); Scalar new_V = V * exp(dlnV); // perform isotropic volume change if (Ndim == 3) { newL[0] = pow(A1 * A2 * new_V, (1. / 3.)); newL[1] = newL[0] / A1; newL[2] = newL[0] / A2; } else // Ndim ==2 { newL[0] = pow(A1 * new_V, (1. / 2.)); newL[1] = newL[0] / A1; // newL[2] is already assigned to curL[2] } if (!safe_box(newL, Ndim)) { m_count_total.ln_volume_reject_count++; } else { // Calculate Boltzmann factor double dBetaH = P * (new_V - V) - (Nglobal + 1) * log(new_V / V); // attempt box change bool accept = box_resize_trial(newL[0], newL[1], newL[2], newShear[0], newShear[1], newShear[2], timestep, dBetaH, rng); if (accept) { m_count_total.ln_volume_accept_count++; } else { m_count_total.ln_volume_reject_count++; } } } void UpdaterBoxMC::update_V(uint64_t timestep, hoomd::RandomGenerator& rng) { // Get updater parameters for current timestep Scalar P = (*m_beta_P)(timestep); // Get current particle data and box lattice parameters assert(m_pdata); unsigned int Ndim = m_sysdef->getNDimensions(); unsigned int Nglobal = m_pdata->getNGlobal(); BoxDim curBox = m_pdata->getGlobalBox(); Scalar curL[3]; Scalar newL[3]; // Lx, Ly, Lz newL[0] = curL[0] = curBox.getLatticeVector(0).x; newL[1] = curL[1] = curBox.getLatticeVector(1).y; newL[2] = curL[2] = curBox.getLatticeVector(2).z; Scalar newShear[3]; // xy, xz, yz newShear[0] = curBox.getTiltFactorXY(); newShear[1] = curBox.getTiltFactorXZ(); newShear[2] = curBox.getTiltFactorYZ(); // original volume double V = curL[0] * curL[1]; if (Ndim == 3) { V *= curL[2]; } // Aspect ratios Scalar A1 = m_volume_A1; Scalar A2 = m_volume_A2; // Volume change Scalar dV_max(m_volume_delta); // Choose a volume change Scalar dV = hoomd::UniformDistribution<Scalar>(-dV_max, dV_max)(rng); // perform isotropic volume change if (Ndim == 3) { newL[0] = pow((A1 * A2 * (V + dV)), (1. / 3.)); newL[1] = newL[0] / A1; newL[2] = newL[0] / A2; } else // Ndim ==2 { newL[0] = pow((A1 * (V + dV)), (1. / 2.)); newL[1] = newL[0] / A1; // newL[2] is already assigned to curL[2] } if (!safe_box(newL, Ndim)) { m_count_total.volume_reject_count++; } else { // Calculate new volume double Vnew = newL[0] * newL[1]; if (Ndim == 3) { Vnew *= newL[2]; } // Calculate Boltzmann factor double dBetaH = P * dV - Nglobal * log(Vnew / V); // attempt box change bool accept = box_resize_trial(newL[0], newL[1], newL[2], newShear[0], newShear[1], newShear[2], timestep, dBetaH, rng); if (accept) { m_count_total.volume_accept_count++; } else { m_count_total.volume_reject_count++; } } } void UpdaterBoxMC::update_shear(uint64_t timestep, hoomd::RandomGenerator& rng) { // Get updater parameters for current timestep // Get current particle data and box lattice parameters assert(m_pdata); unsigned int Ndim = m_sysdef->getNDimensions(); // unsigned int Nglobal = m_pdata->getNGlobal(); BoxDim curBox = m_pdata->getGlobalBox(); Scalar curL[3]; Scalar newL[3]; // Lx, Ly, Lz newL[0] = curL[0] = curBox.getLatticeVector(0).x; newL[1] = curL[1] = curBox.getLatticeVector(1).y; newL[2] = curL[2] = curBox.getLatticeVector(2).z; Scalar newShear[3]; // xy, xz, yz newShear[0] = curBox.getTiltFactorXY(); newShear[1] = curBox.getTiltFactorXZ(); newShear[2] = curBox.getTiltFactorYZ(); Scalar dA, dA_max; // Choose a tilt factor and randomly perturb it unsigned int i(0); if (Ndim == 3) { i = hoomd::UniformIntDistribution(2)(rng); } dA_max = m_shear_delta[i]; dA = hoomd::UniformDistribution<Scalar>(-dA_max, dA_max)(rng); newShear[i] += dA; // Attempt box resize bool trial_success = box_resize_trial(newL[0], newL[1], newL[2], newShear[0], newShear[1], newShear[2], timestep, Scalar(0.0), rng); if (trial_success) { m_count_total.shear_accept_count++; } else { m_count_total.shear_reject_count++; } } void UpdaterBoxMC::update_aspect(uint64_t timestep, hoomd::RandomGenerator& rng) { // We have not established what ensemble this samples: // This is not a thermodynamic updater. // There is also room for improvement in enforcing volume conservation. // Get updater parameters for current timestep // Get current particle data and box lattice parameters assert(m_pdata); unsigned int Ndim = m_sysdef->getNDimensions(); // unsigned int Nglobal = m_pdata->getNGlobal(); BoxDim curBox = m_pdata->getGlobalBox(); Scalar curL[3]; Scalar newL[3]; // Lx, Ly, Lz newL[0] = curL[0] = curBox.getLatticeVector(0).x; newL[1] = curL[1] = curBox.getLatticeVector(1).y; newL[2] = curL[2] = curBox.getLatticeVector(2).z; Scalar newShear[3]; // xy, xz, yz newShear[0] = curBox.getTiltFactorXY(); newShear[1] = curBox.getTiltFactorXZ(); newShear[2] = curBox.getTiltFactorYZ(); // Choose an aspect ratio and randomly perturb it unsigned int i = hoomd::UniformIntDistribution(Ndim - 1)(rng); Scalar dA = Scalar(1.0) + hoomd::UniformDistribution<Scalar>(Scalar(0.0), m_aspect_delta)(rng); if (hoomd::UniformIntDistribution(1)(rng)) { dA = Scalar(1.0) / dA; } newL[i] *= dA; Scalar lambda = curL[i] / newL[i]; if (Ndim == 3) { lambda = sqrt(lambda); } for (unsigned int j = 0; j < Ndim; j++) { if (i != j) { newL[j] = lambda * curL[j]; } } // Attempt box resize bool trial_success = box_resize_trial(newL[0], newL[1], newL[2], newShear[0], newShear[1], newShear[2], timestep, Scalar(0.0), rng); if (trial_success) { m_count_total.aspect_accept_count++; } else { m_count_total.aspect_reject_count++; } } /*! \param mode 0 -> Absolute count, 1 -> relative to the start of the run, 2 -> relative to the last executed step \return The current state of the acceptance counters UpdaterBoxMC maintains a count of the number of accepted and rejected moves since instantiation. getCounters() provides the current value. The parameter *mode* controls whether the returned counts are absolute, relative to the start of the run, or relative to the start of the last executed step. */ hpmc_boxmc_counters_t UpdaterBoxMC::getCounters(unsigned int mode) { hpmc_boxmc_counters_t result; if (mode == 0) result = m_count_total; else if (mode == 1) result = m_count_total - m_count_run_start; else result = m_count_total - m_count_step_start; // don't MPI_AllReduce counters because all ranks count the same thing return result; } namespace detail { void export_UpdaterBoxMC(pybind11::module& m) { pybind11::class_<UpdaterBoxMC, Updater, std::shared_ptr<UpdaterBoxMC>>(m, "UpdaterBoxMC") .def(pybind11::init<std::shared_ptr<SystemDefinition>, std::shared_ptr<Trigger>, std::shared_ptr<IntegratorHPMC>, std::shared_ptr<Variant>>()) .def_property("volume", &UpdaterBoxMC::getVolumeParams, &UpdaterBoxMC::setVolumeParams) .def_property("length", &UpdaterBoxMC::getLengthParams, &UpdaterBoxMC::setLengthParams) .def_property("shear", &UpdaterBoxMC::getShearParams, &UpdaterBoxMC::setShearParams) .def_property("aspect", &UpdaterBoxMC::getAspectParams, &UpdaterBoxMC::setAspectParams) .def_property("betaP", &UpdaterBoxMC::getBetaP, &UpdaterBoxMC::setBetaP) .def("getCounters", &UpdaterBoxMC::getCounters) .def_property("instance", &UpdaterBoxMC::getInstance, &UpdaterBoxMC::setInstance); pybind11::class_<hpmc_boxmc_counters_t>(m, "hpmc_boxmc_counters_t") .def_property_readonly("volume", [](const hpmc_boxmc_counters_t& a) { pybind11::tuple result; result = pybind11::make_tuple(a.volume_accept_count, a.volume_reject_count); return result; }) .def_property_readonly("ln_volume", [](const hpmc_boxmc_counters_t& a) { pybind11::tuple result; result = pybind11::make_tuple(a.ln_volume_accept_count, a.ln_volume_reject_count); return result; }) .def_property_readonly("aspect", [](const hpmc_boxmc_counters_t& a) { pybind11::tuple result; result = pybind11::make_tuple(a.aspect_accept_count, a.aspect_reject_count); return result; }) .def_property_readonly("shear", [](const hpmc_boxmc_counters_t& a) { pybind11::tuple result; result = pybind11::make_tuple(a.shear_accept_count, a.shear_reject_count); return result; }); } } // end namespace detail void UpdaterBoxMC::updateChangedWeights() { // This line will need to be rewritten or updated when move types are added to the updater. auto const weights = std::vector<Scalar> {m_volume_weight, m_ln_volume_weight, m_length_weight, m_shear_weight, m_aspect_weight}; m_weight_partial_sums = std::vector<Scalar>(weights.size()); std::partial_sum(weights.cbegin(), weights.cend(), m_weight_partial_sums.begin()); } } // end namespace hpmc } // end namespace hoomd
31,921
10,459
#if !defined(BE_UTIL_STRING_BASE64_DECODE_HPP_) && !defined(DOXYGEN) #include "base64_decode.hpp" #elif !defined(BE_UTIL_STRING_BASE64_DECODE_INL_) #define BE_UTIL_STRING_BASE64_DECODE_INL_ namespace be::util { namespace detail { /////////////////////////////////////////////////////////////////////////////// template <char S62, char S63, char P> UC base64_index(char symbol) { if (symbol >= 'A' && symbol <= 'Z') { return UC(symbol - 'A'); } else if (symbol >= 'a' && symbol <= 'z') { return UC(26 + symbol - 'a'); } else if (symbol >= '0' && symbol <= '9') { return UC(52 + symbol - '0'); } else if (symbol == S62) { return 62u; } else if (symbol == S63) { return 63u; } else if (symbol == P) { return UC(-2); } else { return UC(-1); } } /////////////////////////////////////////////////////////////////////////////// inline void base64_decode_3_bytes(UC a, UC b, UC c, UC d, UC* out) { out[0] = (a << 2) | (b >> 4); out[1] = (b << 4) | (c >> 2); out[2] = (c << 6) | d; } /////////////////////////////////////////////////////////////////////////////// inline void base64_decode_2_bytes(UC a, UC b, UC c, UC* out) { out[0] = (a << 2) | (b >> 4); out[1] = (b << 4) | (c >> 2); } /////////////////////////////////////////////////////////////////////////////// inline void base64_decode_1_byte(UC a, UC b, UC* out) { out[0] = (a << 2) | (b >> 4); } /////////////////////////////////////////////////////////////////////////////// template <char S62, char S63, char P> std::size_t base64_decode(SV encoded_data, UC* out) { std::size_t remaining_bytes = encoded_data.size(); const char* ptr = &(*encoded_data.begin()); UC* begin = out; UC indices[4]; UC n_indices = 0; while (remaining_bytes > 0) { UC index = base64_index<S62, S63, P>(*ptr); indices[n_indices] = index; ++ptr; if (index <= 63u) { ++n_indices; if (n_indices == 4) { base64_decode_3_bytes(indices[0], indices[1], indices[2], indices[3], out); out += 3; n_indices = 0; } } else if (index == UC(-2)) { break; // if we find a pad character, ignore the rest of the input } --remaining_bytes; } if (n_indices == 3) { base64_decode_2_bytes(indices[0], indices[1], indices[2], out); out += 2; } else if (n_indices == 2) { base64_decode_1_byte(indices[0], indices[1], out); ++out; } return std::size_t(out - begin); } } // be::util::detail /////////////////////////////////////////////////////////////////////////////// template <char S62, char S63, char P> S base64_decode_string(SV encoded_data) { S decoded; if (encoded_data.empty()) { return decoded; } decoded.resize((encoded_data.size() / 4) * 3 + 3); std::size_t size = detail::base64_decode<S62, S63, P>(encoded_data, reinterpret_cast<UC*>(&(decoded[0]))); decoded.resize(size); return decoded; } /////////////////////////////////////////////////////////////////////////////// template <char S62, char S63, char P> Buf<UC> base64_decode_buf(SV encoded_data) { if (encoded_data.empty()) { return Buf<UC>(); } Buf<UC> buf = make_buf<UC>((encoded_data.size() / 4) * 3 + 3); std::size_t size = detail::base64_decode<S62, S63, P>(encoded_data, buf.get()); buf.release(); return Buf<UC>(buf.get(), size, be::detail::delete_array); } } // be::util #endif
3,489
1,298
// Copyright (c) 2017-2021, Mudita Sp. z.o.o. All rights reserved. // For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md #include "endpoints/developerMode/event/ATRequest.hpp" #include "handler/RawATHandler.hpp" #include "CellularUrcHandler.hpp" #include "service-cellular/CellularCall.hpp" #include "service-cellular/CellularMessage.hpp" #include "service-cellular/CellularServiceAPI.hpp" #include "service-cellular/ServiceCellular.hpp" #include "service-cellular/SignalStrength.hpp" #include "service-cellular/State.hpp" #include "service-cellular/USSD.hpp" #include "service-cellular/MessageConstants.hpp" #include "service-cellular/connection-manager/ConnectionManagerCellularCommands.hpp" #include "SimCard.hpp" #include "NetworkSettings.hpp" #include "service-cellular/RequestFactory.hpp" #include "service-cellular/CellularRequestHandler.hpp" #include "system/messages/SentinelRegistrationMessage.hpp" #include <Audio/AudioCommon.hpp> #include <BaseInterface.hpp> #include <CalllogRecord.hpp> #include <Commands.hpp> #include <at/ATFactory.hpp> #include <Common/Common.hpp> #include <Common/Query.hpp> #include <MessageType.hpp> #include <modem/ATCommon.hpp> #include <modem/ATParser.hpp> #include <modem/mux/DLCChannel.h> #include <modem/mux/CellularMux.h> #include <NotificationsRecord.hpp> #include <PhoneNumber.hpp> #include <Result.hpp> #include <Service/Message.hpp> #include <Service/Service.hpp> #include <Timers/TimerFactory.hpp> #include <Tables/CalllogTable.hpp> #include <Tables/Record.hpp> #include <Utils.hpp> #include <Utility.hpp> #include <at/cmd/CLCC.hpp> #include <at/cmd/CFUN.hpp> #include <at/UrcClip.hpp> #include <at/UrcCmti.hpp> #include <at/UrcCreg.hpp> #include <at/UrcCtze.hpp> #include <at/UrcCusd.hpp> #include <at/UrcQind.hpp> #include <at/UrcCpin.hpp> // for Cpin #include <at/response.hpp> #include <bsp/cellular/bsp_cellular.hpp> #include <EventStore.hpp> #include <country.hpp> #include <log/log.hpp> #include <at/UrcFactory.hpp> #include <queries/messages/sms/QuerySMSSearchByType.hpp> #include <queries/notifications/QueryNotificationsIncrement.hpp> #include <queries/notifications/QueryNotificationsMultipleIncrement.hpp> #include <projdefs.h> #include <service-antenna/AntennaMessage.hpp> #include <service-antenna/AntennaServiceAPI.hpp> #include <service-antenna/ServiceAntenna.hpp> #include <service-appmgr/Constants.hpp> #include <service-appmgr/Controller.hpp> #include <service-db/agents/settings/SystemSettings.hpp> #include <service-db/DBServiceAPI.hpp> #include <service-db/DBNotificationMessage.hpp> #include <service-db/QueryMessage.hpp> #include <service-evtmgr/Constants.hpp> #include <service-evtmgr/EventManagerServiceAPI.hpp> #include <service-evtmgr/EVMessages.hpp> #include <service-desktop/DesktopMessages.hpp> #include <service-desktop/DeveloperModeMessage.hpp> #include <service-time/service-time/TimeMessage.hpp> #include <task.h> #include <ucs2/UCS2.hpp> #include <utf8/UTF8.hpp> #include <queries/messages/sms/QuerySMSUpdate.hpp> #include <queries/messages/sms/QuerySMSAdd.hpp> #include <algorithm> #include <bits/exception.h> #include <cassert> #include <iostream> #include <map> #include <optional> #include <string> #include <utility> #include <vector> #include "checkSmsCenter.hpp" #include <service-desktop/Constants.hpp> #include <gsl/util> #include <ticks.hpp> #include "ServiceCellularPriv.hpp" #include <service-cellular/api/request/sim.hpp> #include <service-cellular/api/notification/notification.hpp> #include <ctime> #include <at/cmd/QCFGUsbnet.hpp> const char *ServiceCellular::serviceName = cellular::service::name; inline constexpr auto cellularStack = 8000; using namespace cellular; using namespace cellular::msg; using cellular::service::State; ServiceCellular::ServiceCellular() : sys::Service(serviceName, "", cellularStack, sys::ServicePriority::Idle), phoneModeObserver{std::make_unique<sys::phone_modes::Observer>()}, priv{std::make_unique<internal::ServiceCellularPriv>(this)} { LOG_INFO("[ServiceCellular] Initializing"); bus.channels.push_back(sys::BusChannel::ServiceCellularNotifications); bus.channels.push_back(sys::BusChannel::ServiceDBNotifications); bus.channels.push_back(sys::BusChannel::ServiceEvtmgrNotifications); bus.channels.push_back(sys::BusChannel::PhoneModeChanges); callStateTimer = sys::TimerFactory::createPeriodicTimer( this, "call_state", std::chrono::milliseconds{1000}, [this](sys::Timer &) { CallStateTimerHandler(); }); callEndedRecentlyTimer = sys::TimerFactory::createSingleShotTimer( this, "callEndedRecentlyTimer", std::chrono::seconds{5}, [this](sys::Timer &timer) { priv->outSMSHandler.sendMessageIfDelayed(); }); stateTimer = sys::TimerFactory::createPeriodicTimer( this, "state", std::chrono::milliseconds{1000}, [&](sys::Timer &) { handleStateTimer(); }); ussdTimer = sys::TimerFactory::createPeriodicTimer( this, "ussd", std::chrono::milliseconds{1000}, [this](sys::Timer &) { handleUSSDTimer(); }); sleepTimer = sys::TimerFactory::createPeriodicTimer( this, "sleep", constants::sleepTimerInterval, [this](sys::Timer &) { SleepTimerHandler(); }); connectionTimer = sys::TimerFactory::createPeriodicTimer(this, "connection", std::chrono::seconds{60}, [this](sys::Timer &) { utility::conditionally_invoke( [this]() { return phoneModeObserver->isInMode(sys::phone_modes::PhoneMode::Offline); }, [this]() { if (connectionManager != nullptr) connectionManager->onTimerTick(); }); }); simTimer = sys::TimerFactory::createSingleShotTimer( this, "simTimer", std::chrono::milliseconds{6000}, [this](sys::Timer &) { priv->simCard->handleSimTimer(); }); ongoingCall.setStartCallAction([=](const CalllogRecord &rec) { auto call = DBServiceAPI::CalllogAdd(this, rec); if (call.ID == DB_ID_NONE) { LOG_ERROR("CalllogAdd failed"); } return call; }); ongoingCall.setEndCallAction([=](const CalllogRecord &rec) { if (DBServiceAPI::CalllogUpdate(this, rec) && rec.type == CallType::CT_MISSED) { DBServiceAPI::GetQuery(this, db::Interface::Name::Notifications, std::make_unique<db::query::notifications::Increment>( NotificationsRecord::Key::Calls, rec.phoneNumber)); } return true; }); notificationCallback = [this](std::string &data) { LOG_DEBUG("Notifications callback called with %u data bytes", static_cast<unsigned int>(data.size())); std::string logStr = utils::removeNewLines(data); LOG_SENSITIVE(LOGDEBUG, "Data: %s", logStr.c_str()); atURCStream.write(data); auto vUrc = atURCStream.getURCList(); for (const auto &urc : vUrc) { std::string message; auto msg = identifyNotification(urc); if (msg != std::nullopt) { bus.sendMulticast(msg.value(), sys::BusChannel::ServiceCellularNotifications); } } }; packetData = std::make_unique<packet_data::PacketData>(*this); /// call in apnListChanged handler registerMessageHandlers(); } ServiceCellular::~ServiceCellular() { LOG_INFO("[ServiceCellular] Cleaning resources"); } void ServiceCellular::SleepTimerHandler() { auto currentTime = cpp_freertos::Ticks::TicksToMs(cpp_freertos::Ticks::GetTicks()); auto lastCommunicationTimestamp = cmux->getLastCommunicationTimestamp(); auto timeOfInactivity = currentTime >= lastCommunicationTimestamp ? currentTime - lastCommunicationTimestamp : std::numeric_limits<TickType_t>::max() - lastCommunicationTimestamp + currentTime; if (!ongoingCall.isValid() && priv->state->get() == State::ST::Ready && timeOfInactivity >= constants::enterSleepModeTime.count()) { cmux->enterSleepMode(); cpuSentinel->ReleaseMinimumFrequency(); } } void ServiceCellular::CallStateTimerHandler() { LOG_DEBUG("CallStateTimerHandler"); auto msg = std::make_shared<CellularListCallsMessage>(); bus.sendUnicast(std::move(msg), ServiceCellular::serviceName); } sys::ReturnCodes ServiceCellular::InitHandler() { board = cmux->getBoard(); settings = std::make_unique<settings::Settings>(); settings->init(::service::ServiceProxy(shared_from_this())); connectionManager = std::make_unique<ConnectionManager>( utils::getNumericValue<bool>( settings->getValue(settings::Cellular::offlineMode, settings::SettingsScope::Global)), static_cast<std::chrono::minutes>(utils::getNumericValue<int>(settings->getValue( settings->getValue(settings::Offline::connectionFrequency, settings::SettingsScope::Global)))), std::make_shared<ConnectionManagerCellularCommands>(*this)); priv->state->set(State::ST::WaitForStartPermission); settings->registerValueChange( settings::Cellular::volte_on, [this](const std::string &value) { volteChanged(value); }, ::settings::SettingsScope::Global); settings->registerValueChange( settings::Cellular::apn_list, [this](const std::string &value) { apnListChanged(value); }, ::settings::SettingsScope::Global); priv->setInitialMultiPartSMSUID(static_cast<std::uint8_t>(utils::getNumericValue<int>( settings->getValue(settings::Cellular::currentUID, settings::SettingsScope::Global)))); priv->saveNewMultiPartSMSUIDCallback = [this](std::uint8_t uid) -> void { settings->setValue( settings::Cellular::currentUID, std::to_string(static_cast<int>(uid)), settings::SettingsScope::Global); }; cpuSentinel = std::make_shared<sys::CpuSentinel>(serviceName, this); ongoingCall.setCpuSentinel(cpuSentinel); auto sentinelRegistrationMsg = std::make_shared<sys::SentinelRegistrationMessage>(cpuSentinel); bus.sendUnicast(sentinelRegistrationMsg, ::service::name::system_manager); cmux->registerCellularDevice(); return sys::ReturnCodes::Success; } sys::ReturnCodes ServiceCellular::DeinitHandler() { settings->deinit(); return sys::ReturnCodes::Success; } void ServiceCellular::ProcessCloseReason(sys::CloseReason closeReason) { sendCloseReadyMessage(this); } sys::ReturnCodes ServiceCellular::SwitchPowerModeHandler(const sys::ServicePowerMode mode) { LOG_INFO("[ServiceCellular] PowerModeHandler: %s", c_str(mode)); switch (mode) { case sys::ServicePowerMode ::Active: cmux->exitSleepMode(); break; case sys::ServicePowerMode ::SuspendToRAM: case sys::ServicePowerMode ::SuspendToNVM: cmux->enterSleepMode(); break; } return sys::ReturnCodes::Success; } void ServiceCellular::registerMessageHandlers() { phoneModeObserver->connect(this); phoneModeObserver->subscribe( [this](sys::phone_modes::PhoneMode mode) { connectionManager->onPhoneModeChange(mode); }); phoneModeObserver->subscribe([&](sys::phone_modes::Tethering tethering) { if (tethering == sys::phone_modes::Tethering::On) { priv->tetheringHandler->enable(); } else { priv->tetheringHandler->disable(); logTetheringCalls(); } }); priv->connectSimCard(); priv->connectNetworkTime(); priv->connectSimContacts(); priv->connectImeiGetHandler(); connect(typeid(CellularStartOperatorsScanMessage), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<CellularStartOperatorsScanMessage *>(request); return handleCellularStartOperatorsScan(msg); }); connect(typeid(CellularGetActiveContextsMessage), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<CellularGetActiveContextsMessage *>(request); return handleCellularGetActiveContextsMessage(msg); }); connect(typeid(CellularRequestCurrentOperatorNameMessage), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<CellularRequestCurrentOperatorNameMessage *>(request); return handleCellularRequestCurrentOperatorName(msg); }); connect(typeid(CellularGetAPNMessage), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<CellularGetAPNMessage *>(request); return handleCellularGetAPNMessage(msg); }); connect(typeid(CellularSetAPNMessage), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<CellularSetAPNMessage *>(request); return handleCellularSetAPNMessage(msg); }); connect(typeid(CellularNewAPNMessage), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<CellularNewAPNMessage *>(request); return handleCellularNewAPNMessage(msg); }); connect(typeid(CellularSetDataTransferMessage), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<CellularSetDataTransferMessage *>(request); return handleCellularSetDataTransferMessage(msg); }); connect(typeid(CellularGetDataTransferMessage), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<CellularGetDataTransferMessage *>(request); return handleCellularGetDataTransferMessage(msg); }); connect(typeid(CellularActivateContextMessage), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<CellularActivateContextMessage *>(request); return handleCellularActivateContextMessage(msg); }); connect(typeid(CellularDeactivateContextMessage), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<CellularDeactivateContextMessage *>(request); return handleCellularDeactivateContextMessage(msg); }); connect(typeid(CellularChangeVoLTEDataMessage), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<CellularChangeVoLTEDataMessage *>(request); volteOn = msg->getVoLTEon(); settings->setValue(settings::Cellular::volte_on, std::to_string(volteOn), settings::SettingsScope::Global); NetworkSettings networkSettings(*this); auto vstate = networkSettings.getVoLTEConfigurationState(); if ((vstate != VoLTEState::On) && volteOn) { LOG_DEBUG("VoLTE On"); if (networkSettings.setVoLTEState(VoLTEState::On) == at::Result::Code::OK) { priv->modemResetHandler->performSoftReset(); } } else if (!volteOn) { LOG_DEBUG("VoLTE Off"); if (networkSettings.setVoLTEState(VoLTEState::Off) == at::Result::Code::OK) { priv->modemResetHandler->performSoftReset(); } } return std::make_shared<CellularResponseMessage>(true); }); connect(typeid(CellularSetFlightModeMessage), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<CellularSetFlightModeMessage *>(request); return handleCellularSetFlightModeMessage(msg); }); connect(typeid(CellularPowerStateChange), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<CellularPowerStateChange *>(request); priv->nextPowerState = msg->getNewState(); handle_power_state_change(); return sys::MessageNone{}; }); connect(typeid(sdesktop::developerMode::DeveloperModeRequest), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<sdesktop::developerMode::DeveloperModeRequest *>(request); if (typeid(*msg->event.get()) == typeid(sdesktop::developerMode::CellularHotStartEvent)) { priv->simCard->setChannel(nullptr); priv->networkTime->setChannel(nullptr); priv->simContacts->setChannel(nullptr); priv->imeiGetHandler->setChannel(nullptr); cmux->closeChannels(); ///> change state - simulate hot start handle_power_up_request(); } if (typeid(*msg->event.get()) == typeid(sdesktop::developerMode::CellularStateInfoRequestEvent)) { auto event = std::make_unique<sdesktop::developerMode::CellularStateInfoRequestEvent>(priv->state->c_str()); auto message = std::make_shared<sdesktop::developerMode::DeveloperModeRequest>(std::move(event)); bus.sendUnicast(std::move(message), ::service::name::service_desktop); } if (typeid(*msg->event.get()) == typeid(sdesktop::developerMode::CellularSleepModeInfoRequestEvent)) { auto event = std::make_unique<sdesktop::developerMode::CellularSleepModeInfoRequestEvent>( cmux->isCellularInSleepMode()); auto message = std::make_shared<sdesktop::developerMode::DeveloperModeRequest>(std::move(event)); bus.sendUnicast(std::move(message), ::service::name::service_desktop); } if (typeid(*msg->event.get()) == typeid(sdesktop::developerMode::ATResponseEvent)) { auto channel = cmux->get(CellularMux::Channel::Commands); assert(channel); auto handler = cellular::RawATHandler(*channel); return handler.handle(msg); } return sys::MessageNone{}; }); connect(typeid(CellularNewIncomingSMSMessage), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<CellularNewIncomingSMSMessage *>(request); auto ret = receiveSMS(msg->getData()); return std::make_shared<CellularResponseMessage>(ret); }); connect(typeid(CellularAnswerIncomingCallMessage), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<CellularAnswerIncomingCallMessage *>(request); return handleCellularAnswerIncomingCallMessage(msg); }); connect(typeid(CellularCallRequestMessage), [&](sys::Message *request) -> sys::MessagePointer { if (phoneModeObserver->isInMode(sys::phone_modes::PhoneMode::Offline)) { this->bus.sendUnicast(std::make_shared<CellularCallRejectedByOfflineNotification>(), ::service::name::appmgr); return std::make_shared<CellularResponseMessage>(true); } auto msg = static_cast<CellularCallRequestMessage *>(request); return handleCellularCallRequestMessage(msg); }); connect(typeid(CellularHangupCallMessage), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<CellularHangupCallMessage *>(request); handleCellularHangupCallMessage(msg); return sys::MessageNone{}; }); connect(typeid(CellularDismissCallMessage), [&](sys::Message *request) -> sys::MessagePointer { handleCellularDismissCallMessage(request); return sys::MessageNone{}; }); connect(typeid(db::QueryResponse), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<db::QueryResponse *>(request); return handleDBQueryResponseMessage(msg); }); connect(typeid(CellularListCallsMessage), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<CellularListCallsMessage *>(request); return handleCellularListCallsMessage(msg); }); connect(typeid(db::NotificationMessage), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<db::NotificationMessage *>(request); return handleDBNotificationMessage(msg); }); connect(typeid(CellularRingingMessage), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<CellularRingingMessage *>(request); return handleCellularRingingMessage(msg); }); connect(typeid(CellularIncominCallMessage), [&](sys::Message *request) -> sys::MessagePointer { return handleCellularIncomingCallMessage(request); }); connect(typeid(CellularCallerIdMessage), [&](sys::Message *request) -> sys::MessagePointer { auto msg = static_cast<CellularCallerIdMessage *>(request); return handleCellularCallerIdMessage(msg); }); connect(typeid(CellularGetIMSIMessage), [&](sys::Message *request) -> sys::MessagePointer { return handleCellularGetIMSIMessage(request); }); connect(typeid(CellularGetOwnNumberMessage), [&](sys::Message *request) -> sys::MessagePointer { return handleCellularGetOwnNumberMessage(request); }); connect(typeid(CellularGetNetworkInfoMessage), [&](sys::Message *request) -> sys::MessagePointer { return handleCellularGetNetworkInfoMessage(request); }); connect(typeid(CellularAntennaRequestMessage), [&](sys::Message *request) -> sys::MessagePointer { return handleCellularSelectAntennaMessage(request); }); connect(typeid(CellularSetScanModeMessage), [&](sys::Message *request) -> sys::MessagePointer { return handleCellularSetScanModeMessage(request); }); connect(typeid(CellularGetScanModeMessage), [&](sys::Message *request) -> sys::MessagePointer { return handleCellularGetScanModeMessage(request); }); connect(typeid(CellularGetFirmwareVersionMessage), [&](sys::Message *request) -> sys::MessagePointer { return handleCellularGetFirmwareVersionMessage(request); }); connect(typeid(sevm::StatusStateMessage), [&](sys::Message *request) -> sys::MessagePointer { return handleEVMStatusMessage(request); }); connect(typeid(CellularGetCsqMessage), [&](sys::Message *request) -> sys::MessagePointer { return handleCellularGetCsqMessage(request); }); connect(typeid(CellularGetCregMessage), [&](sys::Message *request) -> sys::MessagePointer { return handleCellularGetCregMessage(request); }); connect(typeid(CellularGetNwinfoMessage), [&](sys::Message *request) -> sys::MessagePointer { return handleCellularGetNwinfoMessage(request); }); connect(typeid(CellularGetAntennaMessage), [&](sys::Message *request) -> sys::MessagePointer { return handleCellularGetAntennaMessage(request); }); connect(typeid(CellularDtmfRequestMessage), [&](sys::Message *request) -> sys::MessagePointer { return handleCellularDtmfRequestMessage(request); }); connect(typeid(CellularUSSDMessage), [&](sys::Message *request) -> sys::MessagePointer { return handleCellularUSSDMessage(request); }); connect(typeid(cellular::StateChange), [&](sys::Message *request) -> sys::MessagePointer { return handleStateRequestMessage(request); }); connect(typeid(CellularCallActiveNotification), [&](sys::Message *request) -> sys::MessagePointer { return handleCallActiveNotification(request); }); connect(typeid(CellularCallAbortedNotification), [&](sys::Message *request) -> sys::MessagePointer { return handleCallAbortedNotification(request); }); connect(typeid(CellularPowerUpProcedureCompleteNotification), [&](sys::Message *request) -> sys::MessagePointer { return handlePowerUpProcedureCompleteNotification(request); }); connect(typeid(CellularPowerDownDeregisteringNotification), [&](sys::Message *request) -> sys::MessagePointer { return handlePowerDownDeregisteringNotification(request); }); connect(typeid(CellularPowerDownDeregisteredNotification), [&](sys::Message *request) -> sys::MessagePointer { return handlePowerDownDeregisteredNotification(request); }); connect(typeid(CellularNewIncomingSMSNotification), [&](sys::Message *request) -> sys::MessagePointer { return handleNewIncomingSMSNotification(request); }); connect(typeid(CellularSmsDoneNotification), [&](sys::Message *request) -> sys::MessagePointer { return handleSmsDoneNotification(request); }); connect(typeid(CellularSignalStrengthUpdateNotification), [&](sys::Message *request) -> sys::MessagePointer { return handleSignalStrengthUpdateNotification(request); }); connect(typeid(CellularNetworkStatusUpdateNotification), [&](sys::Message *request) -> sys::MessagePointer { return handleNetworkStatusUpdateNotification(request); }); connect(typeid(CellularUrcIncomingNotification), [&](sys::Message *request) -> sys::MessagePointer { return handleUrcIncomingNotification(request); }); connect(typeid(CellularRingNotification), [&](sys::Message *request) -> sys::MessagePointer { return handleCellularRingNotification(request); }); connect(typeid(CellularCallerIdNotification), [&](sys::Message *request) -> sys::MessagePointer { return handleCellularCallerIdNotification(request); }); connect(typeid(CellularSetConnectionFrequencyMessage), [&](sys::Message *request) -> sys::MessagePointer { return handleCellularSetConnectionFrequencyMessage(request); }); handle_CellularGetChannelMessage(); } void ServiceCellular::change_state(cellular::StateChange *msg) { assert(msg); switch (msg->request) { case State::ST::Idle: handle_idle(); break; case State::ST::WaitForStartPermission: handle_wait_for_start_permission(); break; case State::ST::PowerUpRequest: handle_power_up_request(); break; case State::ST::StatusCheck: handle_status_check(); break; case State::ST::PowerUpInProgress: handle_power_up_in_progress_procedure(); break; case State::ST::PowerUpProcedure: handle_power_up_procedure(); break; case State::ST::BaudDetect: if (nextPowerStateChangeAwaiting) { handle_power_state_change(); } else { handle_baud_detect(); } break; case State::ST::AudioConfigurationProcedure: handle_audio_conf_procedure(); break; case State::ST::CellularPrivInit: handle_cellular_priv_init(); break; case State::ST::CellularConfProcedure: handle_start_conf_procedure(); break; case State::ST::APNConfProcedure: handle_apn_conf_procedure(); break; case State::ST::SanityCheck: handle_sim_sanity_check(); break; case State::ST::ModemOn: handle_modem_on(); break; case State::ST::URCReady: handle_URCReady(); break; case State::ST::ModemFatalFailure: handle_fatal_failure(); break; case State::ST::Failed: handle_failure(); break; case State::ST::Ready: handle_ready(); break; case State::ST::PowerDownStarted: handle_power_down_started(); break; case State::ST::PowerDownWaiting: handle_power_down_waiting(); break; case State::ST::PowerDown: handle_power_down(); if (nextPowerStateChangeAwaiting) { handle_power_state_change(); } break; }; } bool ServiceCellular::handle_idle() { LOG_DEBUG("Idle"); return true; } bool ServiceCellular::handle_wait_for_start_permission() { auto msg = std::make_shared<CellularCheckIfStartAllowedMessage>(); bus.sendUnicast(msg, ::service::name::system_manager); return true; } bool ServiceCellular::handle_power_up_request() { cmux->selectAntenna(bsp::cellular::antenna::lowBand); switch (board) { case bsp::Board::RT1051: priv->state->set(State::ST::StatusCheck); break; case bsp::Board::Linux: priv->state->set(State::ST::PowerUpProcedure); break; case bsp::Board::none: return false; break; } return true; } bool ServiceCellular::handle_power_up_procedure() { switch (board) { case bsp::Board::RT1051: { LOG_DEBUG("RT1051 - cold start"); cmux->turnOnModem(); // wait for status pin change to change state break; } case bsp::Board::Linux: { // check baud once to determine if it's already turned on auto ret = cmux->baudDetectOnce(); if (ret == CellularMux::ConfState::Success) { // it's on aka hot start. LOG_DEBUG("Linux - hot start"); priv->state->set(State::ST::CellularConfProcedure); break; } else { // it's off aka cold start LOG_DEBUG("Linux - cold start"); LOG_WARN("Press PWR_KEY for 2 sec on modem eval board!"); vTaskDelay(pdMS_TO_TICKS(2000)); // give some 2 secs more for user input // if it's Linux, then wait for status pin to become active, to align its starting position with RT1051 vTaskDelay(pdMS_TO_TICKS(8000)); priv->state->set(State::ST::PowerUpInProgress); break; } } case bsp::Board::none: default: LOG_FATAL("Board not known!"); assert(0); break; } return true; } bool ServiceCellular::handle_power_up_in_progress_procedure(void) { if (priv->modemResetHandler->isResetInProgress()) { constexpr auto msModemUartInitTime = 12000; vTaskDelay(pdMS_TO_TICKS(msModemUartInitTime)); } priv->state->set(State::ST::BaudDetect); return true; } bool ServiceCellular::handle_baud_detect() { auto ret = cmux->baudDetectProcedure(); if (ret == CellularMux::ConfState::Success) { priv->state->set(State::ST::CellularConfProcedure); return true; } else { priv->state->set(State::ST::ModemFatalFailure); return false; } } bool ServiceCellular::handle_power_down_started() { /// we should not send anything to the modem from now on return true; } bool ServiceCellular::handle_power_down_waiting() { switch (board) { case bsp::Board::RT1051: // wait for pin status become inactive (handled elsewhere) break; case bsp::Board::Linux: // if it's Linux, then wait for status pin to become inactive, to align with RT1051 vTaskDelay(pdMS_TO_TICKS(17000)); // according to docs this shouldn't be needed, but better be safe than Quectel priv->state->set(State::ST::PowerDown); break; default: LOG_ERROR("Powering 'down an unknown device not handled"); return false; } return true; } bool ServiceCellular::handle_power_down() { LOG_DEBUG("Powered Down"); cmux->closeChannels(); cmux.reset(); cmux = std::make_unique<CellularMux>(PortSpeed_e::PS460800, this); if (priv->modemResetHandler->isResetInProgress()) { priv->state->set(State::ST::Idle); } return true; } bool ServiceCellular::handle_start_conf_procedure() { // Start configuration procedure, if it's first run modem will be restarted auto confRet = cmux->confProcedure(); if (confRet == CellularMux::ConfState::Success) { priv->state->set(State::ST::AudioConfigurationProcedure); return true; } priv->state->set(State::ST::Failed); return false; } bool ServiceCellular::handle_audio_conf_procedure() { auto audioRet = cmux->audioConfProcedure(); if (audioRet == CellularMux::ConfState::ModemNeedsReset) { priv->modemResetHandler->performReboot(); return false; } if (audioRet == CellularMux::ConfState::Success) { auto cmd = at::factory(at::AT::IPR) + std::to_string(ATPortSpeeds_text[cmux->getStartParams().PortSpeed]); LOG_DEBUG("Setting baudrate %i baud", ATPortSpeeds_text[cmux->getStartParams().PortSpeed]); if (!cmux->getParser()->cmd(cmd)) { LOG_ERROR("Baudrate setup error"); priv->state->set(State::ST::Failed); return false; } cmux->getCellular()->setSpeed(ATPortSpeeds_text[cmux->getStartParams().PortSpeed]); vTaskDelay(1000); if (cmux->startMultiplexer() == CellularMux::ConfState::Success) { LOG_DEBUG("[ServiceCellular] Modem is fully operational"); // open channel - notifications DLCChannel *notificationsChannel = cmux->get(CellularMux::Channel::Notifications); if (notificationsChannel != nullptr) { LOG_DEBUG("Setting up notifications callback"); notificationsChannel->setCallback(notificationCallback); } priv->state->set(State::ST::CellularPrivInit); return true; } else { priv->state->set(State::ST::Failed); return false; } } else if (audioRet == CellularMux::ConfState::Failure) { /// restart priv->state->set(State::ST::AudioConfigurationProcedure); return true; } // Reset procedure started, do nothing here priv->state->set(State::ST::Idle); return true; } bool ServiceCellular::handle_cellular_priv_init() { auto channel = cmux->get(CellularMux::Channel::Commands); priv->simCard->setChannel(channel); priv->networkTime->setChannel(channel); priv->simContacts->setChannel(channel); priv->imeiGetHandler->setChannel(channel); if (!priv->tetheringHandler->configure()) { priv->modemResetHandler->performHardReset(); return true; } auto flightMode = settings->getValue(settings::Cellular::offlineMode, settings::SettingsScope::Global) == "1" ? true : false; connectionManager->setFlightMode(flightMode); auto interval = 0; if (utils::toNumeric(settings->getValue(settings::Offline::connectionFrequency, settings::SettingsScope::Global), interval)) { connectionManager->setInterval(std::chrono::minutes{interval}); } if (!connectionManager->onPhoneModeChange(phoneModeObserver->getCurrentPhoneMode())) { priv->state->set(State::ST::Failed); LOG_ERROR("Failed to handle phone mode"); return false; } priv->state->set(State::ST::APNConfProcedure); return true; } auto ServiceCellular::handle(db::query::SMSSearchByTypeResult *response) -> bool { if (response->getResults().empty()) { priv->outSMSHandler.handleNoMoreDbRecords(); } else { for (auto &rec : response->getResults()) { if (rec.type == SMSType::QUEUED) { priv->outSMSHandler.handleIncomingDbRecord(rec, callEndedRecentlyTimer.isActive()); } } } return true; } /** * NOTICE: URC handling function identifyNotification works on different thread, so sending * any AT commands is not allowed here (also in URC handlers and other functions called from here) * @return */ std::optional<std::shared_ptr<sys::Message>> ServiceCellular::identifyNotification(const std::string &data) { CellularUrcHandler urcHandler(*this); std::string str(data.begin(), data.end()); std::string logStr = utils::removeNewLines(str); LOG_SENSITIVE(LOGDEBUG, "Notification:: %s", logStr.c_str()); auto urc = at::urc::UrcFactory::Create(str); urc->Handle(urcHandler); if (!urc->isHandled()) { LOG_SENSITIVE(LOGWARN, "Unhandled notification: %s", logStr.c_str()); } return urcHandler.getResponse(); } auto ServiceCellular::receiveSMS(std::string messageNumber) -> bool { constexpr auto ucscSetMaxRetries = 3; auto retVal = true; auto channel = cmux->get(CellularMux::Channel::Commands); if (channel == nullptr) { retVal = false; return retVal; } auto ucscSetRetries = 0; while (ucscSetRetries < ucscSetMaxRetries) { if (!channel->cmd(at::AT::SMS_UCSC2)) { ++ucscSetRetries; LOG_ERROR("Could not set UCS2 charset mode for TE. Retry %d", ucscSetRetries); } else { break; } } auto _ = gsl::finally([&channel, &retVal, &messageNumber] { if (!channel->cmd(at::AT::SMS_GSM)) { LOG_ERROR("Could not set GSM (default) charset mode for TE"); } // delete message from modem memory if (retVal && !channel->cmd(at::factory(at::AT::CMGD) + messageNumber)) { LOG_ERROR("Could not delete SMS from modem"); } }); bool messageParsed = false; std::string messageRawBody; UTF8 receivedNumber; const auto &cmd = at::factory(at::AT::QCMGR); auto ret = channel->cmd(cmd + messageNumber, cmd.getTimeout()); if (!ret) { LOG_ERROR("!!!! Could not read text message !!!!"); retVal = false; } else { for (std::size_t i = 0; i < ret.response.size(); i++) { if (ret.response[i].find("QCMGR") != std::string::npos) { std::istringstream ss(ret.response[i]); std::string token; std::vector<std::string> tokens; while (std::getline(ss, token, ',')) { tokens.push_back(token); } tokens[1].erase(std::remove(tokens[1].begin(), tokens[1].end(), '\"'), tokens[1].end()); /* * tokens: * [0] - +QCMGR * [1] - sender number * [2] - none * [3] - date YY/MM/DD * [4] - hour HH/MM/SS/timezone * concatenaded messages * [5] - unique concatenated message id * [6] - current message number * [7] - total messages count * */ // parse sender number receivedNumber = UCS2(tokens[1]).toUTF8(); // parse date tokens[3].erase(std::remove(tokens[3].begin(), tokens[3].end(), '\"'), tokens[3].end()); auto messageDate = std::time(nullptr); if (tokens.size() == 5) { LOG_DEBUG("Single message"); messageRawBody = ret.response[i + 1]; messageParsed = true; } else if (tokens.size() == 8) { LOG_DEBUG("Concatenated message"); uint32_t last = 0; uint32_t current = 0; try { last = std::stoi(tokens[7]); current = std::stoi(tokens[6]); } catch (const std::exception &e) { LOG_ERROR("ServiceCellular::receiveSMS error %s", e.what()); retVal = false; break; } LOG_DEBUG("part %" PRIu32 "from %" PRIu32, current, last); if (current == last) { messageParts.push_back(ret.response[i + 1]); for (std::size_t j = 0; j < messageParts.size(); j++) { messageRawBody += messageParts[j]; } messageParts.clear(); messageParsed = true; } else { messageParts.push_back(ret.response[i + 1]); } } if (messageParsed) { messageParsed = false; const auto decodedMessage = UCS2(messageRawBody).toUTF8(); const auto record = createSMSRecord(decodedMessage, receivedNumber, messageDate); if (!dbAddSMSRecord(record)) { LOG_ERROR("Failed to add text message to db"); retVal = false; break; } } } } } return retVal; } bool ServiceCellular::getOwnNumber(std::string &destination) { auto ret = cmux->get(CellularMux::Channel::Commands)->cmd(at::AT::CNUM); if (ret) { auto begin = ret.response[0].find(','); auto end = ret.response[0].rfind(','); if (begin != std::string::npos && end != std::string::npos) { std::string number; try { number = ret.response[0].substr(begin, end - begin); } catch (std::exception &e) { LOG_ERROR("ServiceCellular::getOwnNumber exception: %s", e.what()); return false; } number.erase(std::remove(number.begin(), number.end(), '"'), number.end()); number.erase(std::remove(number.begin(), number.end(), ','), number.end()); destination = number; return true; } } LOG_ERROR("ServiceCellular::getOwnNumber failed."); return false; } bool ServiceCellular::getIMSI(std::string &destination, bool fullNumber) { auto ret = cmux->get(CellularMux::Channel::Commands)->cmd(at::AT::CIMI); if (ret) { if (fullNumber) { destination = ret.response[0]; } else { try { destination = ret.response[0].substr(0, 3); } catch (std::exception &e) { LOG_ERROR("ServiceCellular::getIMSI exception: %s", e.what()); return false; } } return true; } LOG_ERROR("ServiceCellular::getIMSI failed."); return false; } std::vector<std::string> ServiceCellular::getNetworkInfo(void) { std::vector<std::string> data; auto channel = cmux->get(CellularMux::Channel::Commands); if (channel) { auto resp = channel->cmd(at::AT::CSQ); if (resp.code == at::Result::Code::OK) { data.push_back(resp.response[0]); } else { LOG_ERROR("CSQ Error"); data.push_back(""); } resp = channel->cmd(at::AT::CREG); if (resp.code == at::Result::Code::OK) { data.push_back(resp.response[0]); } else { LOG_ERROR("CREG Error"); data.push_back(""); } resp = channel->cmd(at::AT::QNWINFO); if (resp.code == at::Result::Code::OK) { std::string ret; if (at::response::parseQNWINFO(resp.response[0], ret)) { data.push_back(ret); } else { data.push_back(""); } } else { LOG_ERROR("QNWINFO Error"); data.push_back(""); } } return data; } std::vector<std::string> get_last_AT_error(DLCChannel *channel) { auto ret = channel->cmd(at::AT::CEER); return std::move(ret.response); } void log_last_AT_error(DLCChannel *channel) { std::vector<std::string> atErrors(get_last_AT_error(channel)); int i = 1; for (auto &msg_line : atErrors) { LOG_ERROR("%d/%d: %s", i, static_cast<int>(atErrors.size()), msg_line.c_str()); i++; } } bool is_SIM_detection_enabled(DLCChannel *channel) { auto ret = channel->cmd(at::AT::SIM_DET); if (ret) { if (ret.response[0].find("+QSIMDET: 1") != std::string::npos) { LOG_DEBUG("SIM detecition enabled!"); return true; } } else { LOG_FATAL("Cant check sim detection status!"); log_last_AT_error(channel); } return false; } bool enable_SIM_detection(DLCChannel *channel) { auto ret = channel->cmd(at::AT::SIM_DET_ON); if (!ret) { log_last_AT_error(channel); return false; } return true; } bool is_SIM_status_enabled(DLCChannel *channel) { auto ret = channel->cmd(at::AT::QSIMSTAT); if (ret) { if (ret.response[0].find("+QSIMSTAT: 1") != std::string::npos) { LOG_DEBUG("SIM swap enabled!"); return true; } } else { LOG_FATAL("SIM swap status failure! %s", ret.response[0].c_str()); log_last_AT_error(channel); } return false; } bool enable_SIM_status(DLCChannel *channel) { auto ret = channel->cmd(at::AT::SIMSTAT_ON); if (!ret) { log_last_AT_error(channel); return false; } return true; } void save_SIM_detection_status(DLCChannel *channel) { auto ret = channel->cmd(at::AT::STORE_SETTINGS_ATW); if (!ret) { log_last_AT_error(channel); } } bool sim_check_hot_swap(DLCChannel *channel) { assert(channel); bool reboot_needed = false; if (!is_SIM_detection_enabled(channel)) { reboot_needed = true; } if (!is_SIM_status_enabled(channel)) { reboot_needed = true; } if (reboot_needed) { enable_SIM_detection(channel); enable_SIM_status(channel); save_SIM_detection_status(channel); LOG_FATAL("Modem reboot required, Please remove battery!"); } return !reboot_needed; } bool ServiceCellular::handle_sim_sanity_check() { auto ret = sim_check_hot_swap(cmux->get(CellularMux::Channel::Commands)); if (ret) { priv->state->set(State::ST::ModemOn); } else { LOG_ERROR("Sanity check failure - modem has to be rebooted"); priv->modemResetHandler->performHardReset(); } return ret; } bool ServiceCellular::handle_modem_on() { auto channel = cmux->get(CellularMux::Channel::Commands); channel->cmd("AT+CCLK?"); // inform host ap ready cmux->informModemHostWakeup(); tetheringTurnOnURC(); priv->state->set(State::ST::URCReady); LOG_DEBUG("AP ready"); return true; } bool ServiceCellular::handle_URCReady() { auto channel = cmux->get(CellularMux::Channel::Commands); bool ret = true; priv->requestNetworkTimeSettings(); ret = ret && channel->cmd(at::AT::ENABLE_NETWORK_REGISTRATION_URC); bus.sendMulticast<cellular::msg::notification::ModemStateChanged>(cellular::api::ModemState::Ready); LOG_DEBUG("%s", priv->state->c_str()); return ret; } bool ServiceCellular::handleTextMessagesInit() { auto channel = cmux->get(CellularMux::Channel::Commands); if (channel == nullptr) { LOG_ERROR("Cant configure sim! no Commands channel!"); return false; } auto commands = at::getCommadsSet(at::commadsSet::smsInit); for (const auto &command : commands) { if (!channel->cmd(command)) { LOG_ERROR("Text messages init failed!"); return false; } } if (!receiveAllMessages()) { LOG_ERROR("Receiving all messages from modem failed"); return true; // this is not blocking issue } return true; } SMSRecord ServiceCellular::createSMSRecord(const UTF8 &decodedMessage, const UTF8 &receivedNumber, const time_t messageDate, const SMSType &smsType) const noexcept { SMSRecord record{}; record.body = decodedMessage; record.number = utils::PhoneNumber::getReceivedNumberView(receivedNumber); record.type = SMSType::INBOX; record.date = messageDate; return record; } bool ServiceCellular::dbAddSMSRecord(const SMSRecord &record) { return DBServiceAPI::AddSMS( this, record, db::QueryCallback::fromFunction([this, number = record.number](auto response) { auto result = dynamic_cast<db::query::SMSAddResult *>(response); if (result == nullptr || !result->result) { return false; } onSMSReceived(number); return true; })); } void ServiceCellular::onSMSReceived(const utils::PhoneNumber::View &number) { DBServiceAPI::GetQuery( this, db::Interface::Name::Notifications, std::make_unique<db::query::notifications::Increment>(NotificationsRecord::Key::Sms, number)); bus.sendMulticast(std::make_shared<CellularIncomingSMSNotificationMessage>(), sys::BusChannel::ServiceCellularNotifications); } bool ServiceCellular::receiveAllMessages() { auto channel = cmux->get(CellularMux::Channel::Commands); if (channel == nullptr) { return false; } constexpr std::string_view cmd = "CMGL: "; if (auto ret = channel->cmd(at::AT::LIST_MESSAGES)) { for (std::size_t i = 0; i < ret.response.size(); i++) { if (auto pos = ret.response[i].find(cmd); pos != std::string::npos) { auto startPos = pos + cmd.size(); auto endPos = ret.response[i].find_first_of(','); if (receiveSMS(ret.response[i].substr(startPos, endPos - startPos))) { LOG_WARN("Cannot receive text message - %" PRIu32 " / %" PRIu32, static_cast<uint32_t>(i), static_cast<uint32_t>(ret.response.size())); } } } return true; } else { return false; } } bool ServiceCellular::handle_failure() { priv->state->set(State::ST::Idle); bus.sendMulticast<cellular::msg::notification::ModemStateChanged>(cellular::api::ModemState::Fail); return true; } bool ServiceCellular::handle_fatal_failure() { LOG_FATAL("Await for death!"); bus.sendMulticast<cellular::msg::notification::ModemStateChanged>(cellular::api::ModemState::Fatal); while (true) { vTaskDelay(500); } return true; } bool ServiceCellular::handle_ready() { LOG_DEBUG("%s", priv->state->c_str()); sleepTimer.start(); return true; } bool ServiceCellular::SetScanMode(std::string mode) { auto channel = cmux->get(CellularMux::Channel::Commands); if (channel) { auto command = at::factory(at::AT::SET_SCANMODE); auto resp = channel->cmd(command.getCmd() + mode + ",1", command.getTimeout(), 1); if (resp.code == at::Result::Code::OK) { return true; } } return false; } std::string ServiceCellular::GetScanMode(void) { auto channel = cmux->get(CellularMux::Channel::Commands); if (channel) { auto resp = channel->cmd(at::AT::GET_SCANMODE); if (resp.code == at::Result::Code::OK) { auto beg = resp.response[0].find(","); if (beg != std::string::npos) { auto response = resp.response[0].substr(beg + 1, 1); return response; } } else { LOG_ERROR("Unable to get network search mode configuration"); } } return {}; } bool ServiceCellular::transmitDtmfTone(uint32_t digit) { auto channel = cmux->get(CellularMux::Channel::Commands); at::Result resp; if (channel) { auto command = at::factory(at::AT::QLDTMF); std::string dtmfString = "\"" + std::string(1, digit) + "\""; resp = channel->cmd(command.getCmd() + dtmfString); if (resp) { command = at::factory(at::AT::VTS); resp = channel->cmd(command.getCmd() + dtmfString); } } return resp.code == at::Result::Code::OK; } void ServiceCellular::handle_CellularGetChannelMessage() { connect(CellularGetChannelMessage(), [&](sys::Message *req) { auto getChannelMsg = static_cast<CellularGetChannelMessage *>(req); LOG_DEBUG("Handle request for channel: %s", CellularMux::name(getChannelMsg->dataChannel).c_str()); std::shared_ptr<CellularGetChannelResponseMessage> channelResponsMessage = std::make_shared<CellularGetChannelResponseMessage>(cmux->get(getChannelMsg->dataChannel)); LOG_DEBUG("channel ptr: %p", channelResponsMessage->dataChannelPtr); bus.sendUnicast(std::move(channelResponsMessage), req->sender); return sys::MessageNone{}; }); } bool ServiceCellular::handle_status_check(void) { LOG_INFO("Checking modem status."); auto modemActive = cmux->isModemActive(); if (modemActive) { // modem is already turned on, call configutarion procedure LOG_INFO("Modem is already turned on."); LOG_DEBUG("RT1051 - hot start"); priv->state->set(State::ST::PowerUpInProgress); } else { priv->state->set(State::ST::PowerUpProcedure); } return true; } void ServiceCellular::startStateTimer(uint32_t timeout) { stateTimeout = timeout; stateTimer.start(); } void ServiceCellular::stopStateTimer() { stateTimeout = 0; stateTimer.stop(); } void ServiceCellular::handleStateTimer(void) { stateTimeout--; if (stateTimeout == 0) { stopStateTimer(); LOG_FATAL("State %s timeout occured!", priv->state->c_str()); priv->state->set(State::ST::ModemFatalFailure); } } void ServiceCellular::handle_power_state_change() { nextPowerStateChangeAwaiting = false; auto modemActive = cmux->isModemActive(); if (priv->nextPowerState == State::PowerState::On) { if (priv->state->get() == State::ST::PowerDownWaiting) { LOG_DEBUG("Powerdown in progress. Powerup request queued."); nextPowerStateChangeAwaiting = true; } else if (priv->state->get() == State::ST::PowerUpProcedure || priv->state->get() == State::ST::PowerUpInProgress) { LOG_DEBUG("Powerup already in progress"); } else if (priv->state->get() == State::ST::PowerDown || priv->state->get() == State::ST::WaitForStartPermission) { LOG_INFO("Modem Power UP."); priv->state->set(State::ST::PowerUpRequest); } else { LOG_DEBUG("Modem already powered up."); } } else { if (priv->state->get() == State::ST::PowerUpProcedure || priv->state->get() == State::ST::PowerUpInProgress) { LOG_DEBUG("Powerup in progress. Powerdown request queued."); nextPowerStateChangeAwaiting = true; } else if (priv->state->get() == State::ST::PowerDownWaiting) { LOG_DEBUG("Powerdown already in progress."); } else if (priv->state->get() == State::ST::PowerDown) { LOG_DEBUG("Modem already powered down."); } else if (priv->state->get() == State::ST::WaitForStartPermission && !modemActive) { LOG_DEBUG("Modem already powered down."); priv->state->set(State::ST::PowerDown); } else { LOG_INFO("Modem Power DOWN."); cmux->turnOffModem(); priv->state->set(State::ST::PowerDownWaiting); } } } bool ServiceCellular::handleUSSDRequest(CellularUSSDMessage::RequestType requestType, const std::string &request) { constexpr uint32_t commandTimeout = 120000; auto channel = cmux->get(CellularMux::Channel::Commands); if (channel != nullptr) { if (requestType == CellularUSSDMessage::RequestType::pullSesionRequest) { channel->cmd(at::AT::SMS_GSM); std::string command = at::factory(at::AT::CUSD_SEND) + request + ",15"; auto result = channel->cmd(command, std::chrono::milliseconds(commandTimeout)); if (result.code == at::Result::Code::OK) { ussdState = ussd::State::pullRequestSent; setUSSDTimer(); } } else if (requestType == CellularUSSDMessage::RequestType::abortSesion) { ussdState = ussd::State::sesionAborted; auto result = channel->cmd(at::AT::CUSD_CLOSE_SESSION); if (result.code == at::Result::Code::OK) { CellularServiceAPI::USSDRequest(this, CellularUSSDMessage::RequestType::pushSesionRequest); } else { CellularServiceAPI::USSDRequest(this, CellularUSSDMessage::RequestType::abortSesion); } } else if (requestType == CellularUSSDMessage::RequestType::pushSesionRequest) { ussdState = ussd::State::pushSesion; auto result = channel->cmd(at::AT::CUSD_OPEN_SESSION); if (result.code == at::Result::Code::OK) {} } return true; } return false; } void ServiceCellular::handleUSSDTimer(void) { if (ussdTimeout > 0) { ussdTimeout -= 1; } else { LOG_WARN("USSD timeout occured, abotrig current session"); ussdTimer.stop(); CellularServiceAPI::USSDRequest(this, CellularUSSDMessage::RequestType::abortSesion); } } void ServiceCellular::setUSSDTimer(void) { switch (ussdState) { case ussd::State::pullRequestSent: ussdTimeout = ussd::pullResponseTimeout; break; case ussd::State::pullResponseReceived: ussdTimeout = ussd::pullSesionTimeout; break; case ussd::State::pushSesion: case ussd::State::sesionAborted: case ussd::State::none: ussdTimeout = ussd::noTimeout; break; } if (ussdTimeout == ussd::noTimeout) { ussdTimer.stop(); return; } ussdTimer.start(); } std::shared_ptr<cellular::RawCommandRespAsync> ServiceCellular::handleCellularStartOperatorsScan( CellularStartOperatorsScanMessage *msg) { LOG_INFO("CellularStartOperatorsScan handled"); auto ret = std::make_shared<cellular::RawCommandRespAsync>(CellularMessage::Type::OperatorsScanResult); NetworkSettings networkSettings(*this); ret->data = networkSettings.scanOperators(msg->getFullInfo()); bus.sendUnicast(ret, msg->sender); return ret; } bool ServiceCellular::handle_apn_conf_procedure() { LOG_DEBUG("APN on modem configuration"); packetData->setupAPNSettings(); priv->state->set(State::ST::SanityCheck); return true; } std::shared_ptr<CellularCurrentOperatorNameResponse> ServiceCellular::handleCellularRequestCurrentOperatorName( CellularRequestCurrentOperatorNameMessage *msg) { LOG_INFO("CellularRequestCurrentOperatorName handled"); NetworkSettings networkSettings(*this); const auto currentNetworkOperatorName = networkSettings.getCurrentOperatorName(); Store::GSM::get()->setNetworkOperatorName(currentNetworkOperatorName); return std::make_shared<CellularCurrentOperatorNameResponse>(currentNetworkOperatorName); } std::shared_ptr<CellularGetAPNResponse> ServiceCellular::handleCellularGetAPNMessage(CellularGetAPNMessage *msg) { std::vector<std::shared_ptr<packet_data::APN::Config>> apns; if (auto type = msg->getAPNType(); type) { if (auto apn = packetData->getAPNFirst(*type); apn) { apns.push_back(*apn); } return std::make_shared<CellularGetAPNResponse>(apns); } if (auto ctxid = msg->getContextId(); ctxid) { if (auto apn = packetData->getAPN(*ctxid); apn) { apns.push_back(*apn); } return std::make_shared<CellularGetAPNResponse>(apns); } return std::make_shared<CellularGetAPNResponse>(packetData->getAPNs()); } std::shared_ptr<CellularSetAPNResponse> ServiceCellular::handleCellularSetAPNMessage(CellularSetAPNMessage *msg) { auto apn = msg->getAPNConfig(); auto ret = packetData->setAPN(apn); settings->setValue(settings::Cellular::apn_list, packetData->saveAPNSettings(), settings::SettingsScope::Global); return std::make_shared<CellularSetAPNResponse>(ret); } std::shared_ptr<CellularNewAPNResponse> ServiceCellular::handleCellularNewAPNMessage(CellularNewAPNMessage *msg) { auto apn = msg->getAPNConfig(); std::uint8_t newId = 0; auto ret = packetData->newAPN(apn, newId); settings->setValue(settings::Cellular::apn_list, packetData->saveAPNSettings(), settings::SettingsScope::Global); return std::make_shared<CellularNewAPNResponse>(ret, newId); } std::shared_ptr<CellularSetDataTransferResponse> ServiceCellular::handleCellularSetDataTransferMessage( CellularSetDataTransferMessage *msg) { packetData->setDataTransfer(msg->getDataTransfer()); return std::make_shared<CellularSetDataTransferResponse>(at::Result::Code::OK); } std::shared_ptr<CellularGetDataTransferResponse> ServiceCellular::handleCellularGetDataTransferMessage( CellularGetDataTransferMessage *msg) { return std::make_shared<CellularGetDataTransferResponse>(packetData->getDataTransfer()); } std::shared_ptr<CellularActivateContextResponse> ServiceCellular::handleCellularActivateContextMessage( CellularActivateContextMessage *msg) { return std::make_shared<CellularActivateContextResponse>(packetData->activateContext(msg->getContextId()), msg->getContextId()); } std::shared_ptr<CellularDeactivateContextResponse> ServiceCellular::handleCellularDeactivateContextMessage( CellularDeactivateContextMessage *msg) { return std::make_shared<CellularDeactivateContextResponse>(packetData->deactivateContext(msg->getContextId()), msg->getContextId()); } std::shared_ptr<CellularGetActiveContextsResponse> ServiceCellular::handleCellularGetActiveContextsMessage( CellularGetActiveContextsMessage *msg) { return std::make_shared<CellularGetActiveContextsResponse>(packetData->getActiveContexts()); } std::shared_ptr<CellularSetOperatorAutoSelectResponse> ServiceCellular::handleCellularSetOperatorAutoSelect( CellularSetOperatorAutoSelectMessage *msg) { LOG_INFO("CellularSetOperatorAutoSelect handled"); NetworkSettings networkSettings(*this); return std::make_shared<CellularSetOperatorAutoSelectResponse>(networkSettings.setOperatorAutoSelect()); } std::shared_ptr<CellularSetOperatorResponse> ServiceCellular::handleCellularSetOperator(CellularSetOperatorMessage *msg) { LOG_INFO("CellularSetOperatorAutoSelect handled"); NetworkSettings networkSettings(*this); return std::make_shared<CellularSetOperatorResponse>( networkSettings.setOperator(msg->getMode(), msg->getFormat(), msg->getName())); } void ServiceCellular::volteChanged(const std::string &value) { if (!value.empty()) { LOG_INFO("VoLTE setting state changed to '%s'.", value.c_str()); volteOn = utils::getNumericValue<bool>(value); } } void ServiceCellular::apnListChanged(const std::string &value) { if (!value.empty()) { LOG_INFO("apn_list setting state changed to '%s'.", value.c_str()); packetData->loadAPNSettings(value); } } auto ServiceCellular::handleCellularAnswerIncomingCallMessage(CellularMessage *msg) -> std::shared_ptr<CellularResponseMessage> { LOG_INFO("%s", __PRETTY_FUNCTION__); if (ongoingCall.getType() != CallType::CT_INCOMING) { return std::make_shared<CellularResponseMessage>(true); } auto channel = cmux->get(CellularMux::Channel::Commands); auto ret = false; if (channel) { auto response = channel->cmd(at::AT::ATA); if (response) { // Propagate "CallActive" notification into system bus.sendMulticast(std::make_shared<CellularCallActiveNotification>(), sys::BusChannel::ServiceCellularNotifications); ret = true; } } return std::make_shared<CellularResponseMessage>(ret); } auto ServiceCellular::handleCellularCallRequestMessage(CellularCallRequestMessage *msg) -> std::shared_ptr<CellularResponseMessage> { LOG_INFO("%s", __PRETTY_FUNCTION__); auto channel = cmux->get(CellularMux::Channel::Commands); if (channel == nullptr) { return std::make_shared<CellularResponseMessage>(false); } cellular::RequestFactory factory( msg->number.getEntered(), *channel, msg->callMode, Store::GSM::get()->simCardInserted()); auto request = factory.create(); CellularRequestHandler handler(*this); auto result = channel->cmd(request->command()); request->handle(handler, result); return std::make_shared<CellularResponseMessage>(request->isHandled()); } void ServiceCellular::handleCellularHangupCallMessage(CellularHangupCallMessage *msg) { LOG_INFO("%s", __PRETTY_FUNCTION__); auto channel = cmux->get(CellularMux::Channel::Commands); if (channel) { if (channel->cmd(at::AT::ATH)) { callManager.hangUp(); callStateTimer.stop(); callEndedRecentlyTimer.start(); if (!ongoingCall.endCall(CellularCall::Forced::True)) { LOG_ERROR("Failed to end ongoing call"); } bus.sendMulticast(std::make_shared<CellularResponseMessage>(true, msg->type), sys::BusChannel::ServiceCellularNotifications); } else { LOG_ERROR("Call not aborted"); bus.sendMulticast(std::make_shared<CellularResponseMessage>(false, msg->type), sys::BusChannel::ServiceCellularNotifications); } } bus.sendMulticast(std::make_shared<CellularResponseMessage>(false, msg->type), sys::BusChannel::ServiceCellularNotifications); } void ServiceCellular::handleCellularDismissCallMessage(sys::Message *msg) { LOG_INFO("%s", __PRETTY_FUNCTION__); auto message = static_cast<CellularDismissCallMessage *>(msg); hangUpCall(); if (message->addNotificationRequired()) { handleCallAbortedNotification(msg); } } auto ServiceCellular::handleDBQueryResponseMessage(db::QueryResponse *msg) -> std::shared_ptr<sys::ResponseMessage> { bool responseHandled = false; auto result = msg->getResult(); if (auto response = dynamic_cast<db::query::SMSSearchByTypeResult *>(result.get())) { responseHandled = handle(response); } else if (result->hasListener()) { responseHandled = result->handle(); } if (responseHandled) { return std::make_shared<sys::ResponseMessage>(); } else { return std::make_shared<sys::ResponseMessage>(sys::ReturnCodes::Unresolved); } } auto ServiceCellular::handleCellularListCallsMessage(CellularMessage *msg) -> std::shared_ptr<sys::ResponseMessage> { at::cmd::CLCC cmd; auto base = cmux->get(CellularMux::Channel::Commands)->cmd(cmd); if (auto response = cmd.parseCLCC(base); response) { const auto &data = response.getData(); auto it = std::find_if(std::begin(data), std::end(data), [&](const auto &entry) { return entry.stateOfCall == ModemCall::CallState::Active && entry.mode == ModemCall::CallMode::Voice; }); if (it != std::end(data)) { auto notification = std::make_shared<CellularCallActiveNotification>(); bus.sendMulticast(std::move(notification), sys::BusChannel::ServiceCellularNotifications); callStateTimer.stop(); return std::make_shared<CellularResponseMessage>(true); } } return std::make_shared<CellularResponseMessage>(false); } auto ServiceCellular::handleDBNotificationMessage(db::NotificationMessage *msg) -> std::shared_ptr<sys::ResponseMessage> { if (msg->interface == db::Interface::Name::SMS && (msg->type == db::Query::Type::Create || msg->type == db::Query::Type::Update)) { priv->outSMSHandler.handleDBNotification(); return std::make_shared<sys::ResponseMessage>(); } return std::make_shared<sys::ResponseMessage>(sys::ReturnCodes::Failure); } auto ServiceCellular::handleCellularRingingMessage(CellularRingingMessage *msg) -> std::shared_ptr<sys::ResponseMessage> { LOG_INFO("%s", __PRETTY_FUNCTION__); return std::make_shared<CellularResponseMessage>(ongoingCall.startCall(msg->number, CallType::CT_OUTGOING)); } auto ServiceCellular::handleCellularIncomingCallMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { LOG_INFO("%s", __PRETTY_FUNCTION__); auto ret = true; auto message = static_cast<CellularIncominCallMessage *>(msg); if (!ongoingCall.isValid()) { ret = ongoingCall.startCall(message->number, CallType::CT_INCOMING); } return std::make_shared<CellularResponseMessage>(ret); } auto ServiceCellular::handleCellularCallerIdMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { auto message = static_cast<CellularCallerIdMessage *>(msg); ongoingCall.setNumber(message->number); return sys::MessageNone{}; } auto ServiceCellular::handleCellularGetIMSIMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { std::string temp; if (getIMSI(temp)) { return std::make_shared<CellularResponseMessage>(true, temp); } return std::make_shared<CellularResponseMessage>(false); } auto ServiceCellular::handleCellularGetOwnNumberMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { std::string temp; if (getOwnNumber(temp)) { return std::make_shared<CellularGetOwnNumberResponseMessage>(true, temp); } return std::make_shared<CellularGetOwnNumberResponseMessage>(false); } auto ServiceCellular::handleCellularGetNetworkInfoMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { auto message = std::make_shared<cellular::RawCommandRespAsync>(CellularMessage::Type::NetworkInfoResult); message->data = getNetworkInfo(); bus.sendUnicast(message, msg->sender); return std::make_shared<CellularResponseMessage>(true); } auto ServiceCellular::handleCellularSelectAntennaMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { auto message = static_cast<CellularAntennaRequestMessage *>(msg); cmux->selectAntenna(message->antenna); vTaskDelay(50); // sleep for 50 ms... auto actualAntenna = cmux->getAntenna(); if (actualAntenna == bsp::cellular::antenna::lowBand) { LOG_INFO("Low band antenna set"); } else { LOG_INFO("High band antenna set"); } bool changedAntenna = (actualAntenna == message->antenna); auto notification = std::make_shared<AntennaChangedMessage>(); bus.sendMulticast(notification, sys::BusChannel::AntennaNotifications); return std::make_shared<CellularResponseMessage>(changedAntenna); } auto ServiceCellular::handleCellularSetScanModeMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { auto message = static_cast<CellularSetScanModeMessage *>(msg); bool ret = SetScanMode(message->data); return std::make_shared<CellularResponseMessage>(ret); } auto ServiceCellular::handleCellularGetScanModeMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { auto mode = GetScanMode(); if (mode != "") { auto response = std::make_shared<cellular::RawCommandRespAsync>(CellularMessage::Type::GetScanModeResult); response->data.push_back(mode); bus.sendUnicast(response, msg->sender); return std::make_shared<CellularResponseMessage>(true); } return std::make_shared<CellularResponseMessage>(false); } auto ServiceCellular::handleCellularGetFirmwareVersionMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { std::string response; auto channel = cmux->get(CellularMux::Channel::Commands); if (channel) { auto resp = channel->cmd(at::AT::QGMR); if (resp.code == at::Result::Code::OK) { response = resp.response[0]; return std::make_shared<CellularResponseMessage>(true, response); } } return std::make_shared<CellularResponseMessage>(false); } auto ServiceCellular::handleEVMStatusMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { using namespace bsp::cellular::status; auto message = static_cast<sevm::StatusStateMessage *>(msg); auto status_pin = message->state; if (priv->modemResetHandler->handleStatusPinEvent(status_pin == value::ACTIVE)) { return std::make_shared<CellularResponseMessage>(true); } if (status_pin == value::ACTIVE) { if (priv->state->get() == State::ST::PowerUpProcedure) { priv->state->set(State::ST::PowerUpInProgress); // and go to baud detect as usual } } if (status_pin == value::INACTIVE) { if (priv->state->get() == State::ST::PowerDownWaiting) { priv->state->set(State::ST::PowerDown); } } return std::make_shared<CellularResponseMessage>(true); } auto ServiceCellular::handleCellularGetCsqMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { auto channel = cmux->get(CellularMux::Channel::Commands); if (channel) { auto modemResponse = channel->cmd(at::AT::CSQ); if (modemResponse.code == at::Result::Code::OK) { return std::make_shared<CellularResponseMessage>(true, modemResponse.response[0]); } } return std::make_shared<CellularResponseMessage>(false); } auto ServiceCellular::handleCellularGetCregMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { auto channel = cmux->get(CellularMux::Channel::Commands); if (channel) { auto resp = channel->cmd(at::AT::CREG); if (resp.code == at::Result::Code::OK) { return std::make_shared<CellularResponseMessage>(true, resp.response[0]); } } return std::make_shared<CellularResponseMessage>(false); } auto ServiceCellular::handleCellularGetNwinfoMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { auto channel = cmux->get(CellularMux::Channel::Commands); if (channel) { auto resp = channel->cmd(at::AT::QNWINFO); if (resp.code == at::Result::Code::OK) { return std::make_shared<CellularResponseMessage>(true, resp.response[0]); } } return std::make_shared<CellularResponseMessage>(false); } auto ServiceCellular::handleCellularGetAntennaMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { auto antenna = cmux->getAntenna(); return std::make_shared<CellularAntennaResponseMessage>(true, antenna, CellularMessage::Type::GetAntenna); } auto ServiceCellular::handleCellularDtmfRequestMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { auto message = static_cast<CellularDtmfRequestMessage *>(msg); auto resp = transmitDtmfTone(message->getDigit()); return std::make_shared<CellularResponseMessage>(resp); } auto ServiceCellular::handleCellularUSSDMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { auto message = static_cast<CellularUSSDMessage *>(msg); return std::make_shared<CellularResponseMessage>(handleUSSDRequest(message->type, message->data)); } auto ServiceCellular::handleStateRequestMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { change_state(dynamic_cast<cellular::StateChange *>(msg)); return std::make_shared<CellularResponseMessage>(true); } auto ServiceCellular::handleCallActiveNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { auto ret = std::make_shared<CellularResponseMessage>(ongoingCall.setActive()); NetworkSettings networkSettings(*this); auto currentNAT = networkSettings.getCurrentNAT(); if (currentNAT) { auto currentSimpleNAT = NetworkSettings::toSimpleNAT(*currentNAT); LOG_INFO("Current NAT %s(%s)", utils::enumToString(*currentNAT).c_str(), utils::enumToString(currentSimpleNAT).c_str()); if (currentSimpleNAT == NetworkSettings::SimpleNAT::LTE) { LOG_INFO("VoLTE call"); } else { LOG_INFO("Non VoLTE call"); } } else { LOG_WARN("Cannot get current NAT"); } return ret; } auto ServiceCellular::handleCallAbortedNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { callStateTimer.stop(); auto ret = ongoingCall.endCall(); callManager.hangUp(); return std::make_shared<CellularResponseMessage>(ret); } auto ServiceCellular::handlePowerUpProcedureCompleteNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { if (board == bsp::Board::Linux) { priv->state->set(State::ST::CellularConfProcedure); } return std::make_shared<CellularResponseMessage>(true); } auto ServiceCellular::handlePowerDownDeregisteringNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { if (priv->state->get() != State::ST::PowerDownWaiting) { priv->state->set(State::ST::PowerDownStarted); return std::make_shared<CellularResponseMessage>(true); } return std::make_shared<CellularResponseMessage>(false); } auto ServiceCellular::handlePowerDownDeregisteredNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { priv->state->set(State::ST::PowerDownWaiting); return std::make_shared<CellularResponseMessage>(true); } auto ServiceCellular::handleNewIncomingSMSNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { auto message = static_cast<CellularNewIncomingSMSNotification *>(msg); auto notification = std::make_shared<CellularNewIncomingSMSMessage>(message->data); bus.sendUnicast(std::move(notification), msg->sender); return std::make_shared<CellularResponseMessage>(true); } auto ServiceCellular::handleSmsDoneNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { auto resp = handleTextMessagesInit(); return std::make_shared<CellularResponseMessage>(resp); } auto ServiceCellular::handleSignalStrengthUpdateNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { return std::make_shared<CellularResponseMessage>(false); } auto ServiceCellular::handleNetworkStatusUpdateNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { return std::make_shared<CellularResponseMessage>(false); } auto ServiceCellular::handleUrcIncomingNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { // when handling URC, the CPU frequency does not go below a certain level cpuSentinel->HoldMinimumFrequency(bsp::CpuFrequencyHz::Level_4); cmux->exitSleepMode(); return std::make_shared<CellularResponseMessage>(true); } auto ServiceCellular::handleCellularSetFlightModeMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { auto setMsg = static_cast<CellularSetFlightModeMessage *>(msg); settings->setValue( settings::Cellular::offlineMode, std::to_string(setMsg->flightModeOn), settings::SettingsScope::Global); connectionManager->setFlightMode(setMsg->flightModeOn); connectionManager->onPhoneModeChange(phoneModeObserver->getCurrentPhoneMode()); return std::make_shared<CellularResponseMessage>(true); } auto ServiceCellular::handleCellularRingNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { LOG_INFO("%s", __PRETTY_FUNCTION__); if (phoneModeObserver->isTetheringOn() || connectionManager->forceDismissCalls()) { return std::make_shared<CellularResponseMessage>(hangUpCall()); } if (!callManager.isIncomingCallPropagated()) { auto message = static_cast<CellularRingNotification *>(msg); bus.sendMulticast(std::make_shared<CellularIncominCallMessage>(message->getNubmer()), sys::BusChannel::ServiceCellularNotifications); callManager.ring(); } return std::make_shared<CellularResponseMessage>(true); } auto ServiceCellular::handleCellularCallerIdNotification(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { if (connectionManager->forceDismissCalls()) { return std::make_shared<CellularResponseMessage>(hangUpCall()); } auto message = static_cast<CellularCallerIdNotification *>(msg); if (phoneModeObserver->isTetheringOn()) { tetheringCalllog.push_back(CalllogRecord{CallType::CT_MISSED, message->getNubmer()}); return std::make_shared<CellularResponseMessage>(hangUpCallBusy()); } if (!callManager.isCallerInfoComplete()) { bus.sendMulticast(std::make_shared<CellularCallerIdMessage>(message->getNubmer()), sys::BusChannel::ServiceCellularNotifications); callManager.completeCallerInfo(); } return std::make_shared<CellularResponseMessage>(true); } auto ServiceCellular::handleCellularSetConnectionFrequencyMessage(sys::Message *msg) -> std::shared_ptr<sys::ResponseMessage> { auto setMsg = static_cast<CellularSetConnectionFrequencyMessage *>(msg); settings->setValue(settings::Offline::connectionFrequency, std::to_string(setMsg->getConnectionFrequency()), settings::SettingsScope::Global); connectionManager->setInterval(std::chrono::minutes{setMsg->getConnectionFrequency()}); connectionManager->onPhoneModeChange(phoneModeObserver->getCurrentPhoneMode()); return std::make_shared<CellularResponseMessage>(true); } auto ServiceCellular::hangUpCall() -> bool { auto channel = cmux->get(CellularMux::Channel::Commands); if (channel != nullptr) { if (channel->cmd(at::factory(at::AT::ATH))) { callManager.hangUp(); return true; } } LOG_ERROR("Failed to hang up call"); return false; } auto ServiceCellular::hangUpCallBusy() -> bool { auto channel = cmux->get(CellularMux::Channel::Commands); if (channel != nullptr) { if (channel->cmd(at::factory(at::AT::QHUP_BUSY))) { return true; } } LOG_ERROR("Failed to hang up call"); return false; } auto ServiceCellular::tetheringTurnOffURC() -> bool { auto channel = cmux->get(CellularMux::Channel::Commands); if (channel != nullptr) { if (!channel->cmd(at::factory(at::AT::CSQ_URC_OFF))) { LOG_ERROR("Failed to stop CSQ URC"); return false; } if (!channel->cmd(at::factory(at::AT::ACT_URC_OFF))) { LOG_ERROR("Failed to stop ACT URC"); return false; } if (!channel->cmd(at::factory(at::AT::SMS_URC_OFF))) { LOG_ERROR("Failed to stop SMS URC"); return false; } if (!channel->cmd(at::factory(at::AT::RING_URC_OFF))) { LOG_ERROR("Failed to stop RING URC"); return false; } } return true; } auto ServiceCellular::tetheringTurnOnURC() -> bool { auto channel = cmux->get(CellularMux::Channel::Commands); if (channel != nullptr) { if (!channel->cmd(at::factory(at::AT::CSQ_URC_ON))) { LOG_ERROR("Failed to stop CSQ URC"); return false; } if (!channel->cmd(at::factory(at::AT::ACT_URC_ON))) { LOG_ERROR("Failed to stop ACT URC"); return false; } if (!channel->cmd(at::factory(at::AT::SMS_URC_ON))) { LOG_ERROR("Failed to stop SMS URC"); return false; } if (!channel->cmd(at::factory(at::AT::RING_URC_ON))) { LOG_ERROR("Failed to stop RING URC"); return false; } } return true; } auto ServiceCellular::logTetheringCalls() -> void { if (!tetheringCalllog.empty()) { for (auto callRecord : tetheringCalllog) { auto call = DBServiceAPI::CalllogAdd(this, callRecord); if (call.ID == DB_ID_NONE) { LOG_ERROR("CalllogAdd failed"); } } std::vector<utils::PhoneNumber::View> numbers; for (auto calllogRecord : tetheringCalllog) { numbers.push_back(calllogRecord.phoneNumber); } DBServiceAPI::GetQuery( this, db::Interface::Name::Notifications, std::make_unique<db::query::notifications::MultipleIncrement>(NotificationsRecord::Key::Calls, numbers)); tetheringCalllog.clear(); } } TaskHandle_t ServiceCellular::getTaskHandle() { return xTaskGetCurrentTaskHandle(); }
83,980
25,044
// // impl/compose.hpp // ~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2020 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #ifndef BOOST_ASIO_IMPL_COMPOSE_HPP #define BOOST_ASIO_IMPL_COMPOSE_HPP #if defined(_MSC_VER) && (_MSC_VER >= 1200) # pragma once #endif // defined(_MSC_VER) && (_MSC_VER >= 1200) #include <boost/asio/detail/config.hpp> #include <boost/asio/associated_executor.hpp> #include <boost/asio/detail/handler_alloc_helpers.hpp> #include <boost/asio/detail/handler_cont_helpers.hpp> #include <boost/asio/detail/handler_invoke_helpers.hpp> #include <boost/asio/detail/type_traits.hpp> #include <boost/asio/detail/variadic_templates.hpp> #include <boost/asio/execution/executor.hpp> #include <boost/asio/execution/outstanding_work.hpp> #include <boost/asio/executor_work_guard.hpp> #include <boost/asio/is_executor.hpp> #include <boost/asio/system_executor.hpp> #include <boost/asio/detail/push_options.hpp> namespace boost { namespace asio { namespace detail { template <typename Executor, typename = void> class composed_work_guard { public: typedef typename decay< typename prefer_result<Executor, execution::outstanding_work_t::tracked_t >::type >::type executor_type; composed_work_guard(const Executor& ex) : executor_(boost::asio::prefer(ex, execution::outstanding_work.tracked)) { } void reset() { } executor_type get_executor() const BOOST_ASIO_NOEXCEPT { return executor_; } private: executor_type executor_; }; #if !defined(BOOST_ASIO_NO_TS_EXECUTORS) template <typename Executor> struct composed_work_guard<Executor, typename enable_if< !execution::is_executor<Executor>::value >::type> : executor_work_guard<Executor> { composed_work_guard(const Executor& ex) : executor_work_guard<Executor>(ex) { } }; #endif // !defined(BOOST_ASIO_NO_TS_EXECUTORS) template <typename> struct composed_io_executors; template <> struct composed_io_executors<void()> { composed_io_executors() BOOST_ASIO_NOEXCEPT : head_(system_executor()) { } typedef system_executor head_type; system_executor head_; }; inline composed_io_executors<void()> make_composed_io_executors() { return composed_io_executors<void()>(); } template <typename Head> struct composed_io_executors<void(Head)> { explicit composed_io_executors(const Head& ex) BOOST_ASIO_NOEXCEPT : head_(ex) { } typedef Head head_type; Head head_; }; template <typename Head> inline composed_io_executors<void(Head)> make_composed_io_executors(const Head& head) { return composed_io_executors<void(Head)>(head); } #if defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES) template <typename Head, typename... Tail> struct composed_io_executors<void(Head, Tail...)> { explicit composed_io_executors(const Head& head, const Tail&... tail) BOOST_ASIO_NOEXCEPT : head_(head), tail_(tail...) { } void reset() { head_.reset(); tail_.reset(); } typedef Head head_type; Head head_; composed_io_executors<void(Tail...)> tail_; }; template <typename Head, typename... Tail> inline composed_io_executors<void(Head, Tail...)> make_composed_io_executors(const Head& head, const Tail&... tail) { return composed_io_executors<void(Head, Tail...)>(head, tail...); } #else // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES) #define BOOST_ASIO_PRIVATE_COMPOSED_IO_EXECUTORS_DEF(n) \ template <typename Head, BOOST_ASIO_VARIADIC_TPARAMS(n)> \ struct composed_io_executors<void(Head, BOOST_ASIO_VARIADIC_TARGS(n))> \ { \ explicit composed_io_executors(const Head& head, \ BOOST_ASIO_VARIADIC_CONSTREF_PARAMS(n)) BOOST_ASIO_NOEXCEPT \ : head_(head), \ tail_(BOOST_ASIO_VARIADIC_BYVAL_ARGS(n)) \ { \ } \ \ void reset() \ { \ head_.reset(); \ tail_.reset(); \ } \ \ typedef Head head_type; \ Head head_; \ composed_io_executors<void(BOOST_ASIO_VARIADIC_TARGS(n))> tail_; \ }; \ \ template <typename Head, BOOST_ASIO_VARIADIC_TPARAMS(n)> \ inline composed_io_executors<void(Head, BOOST_ASIO_VARIADIC_TARGS(n))> \ make_composed_io_executors(const Head& head, \ BOOST_ASIO_VARIADIC_CONSTREF_PARAMS(n)) \ { \ return composed_io_executors< \ void(Head, BOOST_ASIO_VARIADIC_TARGS(n))>( \ head, BOOST_ASIO_VARIADIC_BYVAL_ARGS(n)); \ } \ /**/ BOOST_ASIO_VARIADIC_GENERATE(BOOST_ASIO_PRIVATE_COMPOSED_IO_EXECUTORS_DEF) #undef BOOST_ASIO_PRIVATE_COMPOSED_IO_EXECUTORS_DEF #endif // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES) template <typename> struct composed_work; template <> struct composed_work<void()> { typedef composed_io_executors<void()> executors_type; composed_work(const executors_type&) BOOST_ASIO_NOEXCEPT : head_(system_executor()) { } void reset() { head_.reset(); } typedef system_executor head_type; composed_work_guard<system_executor> head_; }; template <typename Head> struct composed_work<void(Head)> { typedef composed_io_executors<void(Head)> executors_type; explicit composed_work(const executors_type& ex) BOOST_ASIO_NOEXCEPT : head_(ex.head_) { } void reset() { head_.reset(); } typedef Head head_type; composed_work_guard<Head> head_; }; #if defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES) template <typename Head, typename... Tail> struct composed_work<void(Head, Tail...)> { typedef composed_io_executors<void(Head, Tail...)> executors_type; explicit composed_work(const executors_type& ex) BOOST_ASIO_NOEXCEPT : head_(ex.head_), tail_(ex.tail_) { } void reset() { head_.reset(); tail_.reset(); } typedef Head head_type; composed_work_guard<Head> head_; composed_work<void(Tail...)> tail_; }; #else // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES) #define BOOST_ASIO_PRIVATE_COMPOSED_WORK_DEF(n) \ template <typename Head, BOOST_ASIO_VARIADIC_TPARAMS(n)> \ struct composed_work<void(Head, BOOST_ASIO_VARIADIC_TARGS(n))> \ { \ typedef composed_io_executors<void(Head, \ BOOST_ASIO_VARIADIC_TARGS(n))> executors_type; \ \ explicit composed_work(const executors_type& ex) BOOST_ASIO_NOEXCEPT \ : head_(ex.head_), \ tail_(ex.tail_) \ { \ } \ \ void reset() \ { \ head_.reset(); \ tail_.reset(); \ } \ \ typedef Head head_type; \ composed_work_guard<Head> head_; \ composed_work<void(BOOST_ASIO_VARIADIC_TARGS(n))> tail_; \ }; \ /**/ BOOST_ASIO_VARIADIC_GENERATE(BOOST_ASIO_PRIVATE_COMPOSED_WORK_DEF) #undef BOOST_ASIO_PRIVATE_COMPOSED_WORK_DEF #endif // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES) #if defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES) template <typename Impl, typename Work, typename Handler, typename Signature> class composed_op; template <typename Impl, typename Work, typename Handler, typename R, typename... Args> class composed_op<Impl, Work, Handler, R(Args...)> #else // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES) template <typename Impl, typename Work, typename Handler, typename Signature> class composed_op #endif // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES) { public: template <typename I, typename W, typename H> composed_op(BOOST_ASIO_MOVE_ARG(I) impl, BOOST_ASIO_MOVE_ARG(W) work, BOOST_ASIO_MOVE_ARG(H) handler) : impl_(BOOST_ASIO_MOVE_CAST(I)(impl)), work_(BOOST_ASIO_MOVE_CAST(W)(work)), handler_(BOOST_ASIO_MOVE_CAST(H)(handler)), invocations_(0) { } #if defined(BOOST_ASIO_HAS_MOVE) composed_op(composed_op&& other) : impl_(BOOST_ASIO_MOVE_CAST(Impl)(other.impl_)), work_(BOOST_ASIO_MOVE_CAST(Work)(other.work_)), handler_(BOOST_ASIO_MOVE_CAST(Handler)(other.handler_)), invocations_(other.invocations_) { } #endif // defined(BOOST_ASIO_HAS_MOVE) typedef typename associated_executor<Handler, typename composed_work_guard< typename Work::head_type >::executor_type >::type executor_type; executor_type get_executor() const BOOST_ASIO_NOEXCEPT { return (get_associated_executor)(handler_, work_.head_.get_executor()); } typedef typename associated_allocator<Handler, std::allocator<void> >::type allocator_type; allocator_type get_allocator() const BOOST_ASIO_NOEXCEPT { return (get_associated_allocator)(handler_, std::allocator<void>()); } #if defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES) template<typename... T> void operator()(BOOST_ASIO_MOVE_ARG(T)... t) { if (invocations_ < ~0u) ++invocations_; impl_(*this, BOOST_ASIO_MOVE_CAST(T)(t)...); } void complete(Args... args) { this->work_.reset(); this->handler_(BOOST_ASIO_MOVE_CAST(Args)(args)...); } #else // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES) void operator()() { if (invocations_ < ~0u) ++invocations_; impl_(*this); } void complete() { this->work_.reset(); this->handler_(); } #define BOOST_ASIO_PRIVATE_COMPOSED_OP_DEF(n) \ template<BOOST_ASIO_VARIADIC_TPARAMS(n)> \ void operator()(BOOST_ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ if (invocations_ < ~0u) \ ++invocations_; \ impl_(*this, BOOST_ASIO_VARIADIC_MOVE_ARGS(n)); \ } \ \ template<BOOST_ASIO_VARIADIC_TPARAMS(n)> \ void complete(BOOST_ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ this->work_.reset(); \ this->handler_(BOOST_ASIO_VARIADIC_MOVE_ARGS(n)); \ } \ /**/ BOOST_ASIO_VARIADIC_GENERATE(BOOST_ASIO_PRIVATE_COMPOSED_OP_DEF) #undef BOOST_ASIO_PRIVATE_COMPOSED_OP_DEF #endif // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES) //private: Impl impl_; Work work_; Handler handler_; unsigned invocations_; }; template <typename Impl, typename Work, typename Handler, typename Signature> inline asio_handler_allocate_is_deprecated asio_handler_allocate(std::size_t size, composed_op<Impl, Work, Handler, Signature>* this_handler) { #if defined(BOOST_ASIO_NO_DEPRECATED) boost_asio_handler_alloc_helpers::allocate(size, this_handler->handler_); return asio_handler_allocate_is_no_longer_used(); #else // defined(BOOST_ASIO_NO_DEPRECATED) return boost_asio_handler_alloc_helpers::allocate( size, this_handler->handler_); #endif // defined(BOOST_ASIO_NO_DEPRECATED) } template <typename Impl, typename Work, typename Handler, typename Signature> inline asio_handler_deallocate_is_deprecated asio_handler_deallocate(void* pointer, std::size_t size, composed_op<Impl, Work, Handler, Signature>* this_handler) { boost_asio_handler_alloc_helpers::deallocate( pointer, size, this_handler->handler_); #if defined(BOOST_ASIO_NO_DEPRECATED) return asio_handler_deallocate_is_no_longer_used(); #endif // defined(BOOST_ASIO_NO_DEPRECATED) } template <typename Impl, typename Work, typename Handler, typename Signature> inline bool asio_handler_is_continuation( composed_op<Impl, Work, Handler, Signature>* this_handler) { return this_handler->invocations_ > 1 ? true : boost_asio_handler_cont_helpers::is_continuation( this_handler->handler_); } template <typename Function, typename Impl, typename Work, typename Handler, typename Signature> inline asio_handler_invoke_is_deprecated asio_handler_invoke(Function& function, composed_op<Impl, Work, Handler, Signature>* this_handler) { boost_asio_handler_invoke_helpers::invoke( function, this_handler->handler_); #if defined(BOOST_ASIO_NO_DEPRECATED) return asio_handler_invoke_is_no_longer_used(); #endif // defined(BOOST_ASIO_NO_DEPRECATED) } template <typename Function, typename Impl, typename Work, typename Handler, typename Signature> inline asio_handler_invoke_is_deprecated asio_handler_invoke(const Function& function, composed_op<Impl, Work, Handler, Signature>* this_handler) { boost_asio_handler_invoke_helpers::invoke( function, this_handler->handler_); #if defined(BOOST_ASIO_NO_DEPRECATED) return asio_handler_invoke_is_no_longer_used(); #endif // defined(BOOST_ASIO_NO_DEPRECATED) } template <typename Signature, typename Executors> class initiate_composed_op { public: typedef typename composed_io_executors<Executors>::head_type executor_type; template <typename T> explicit initiate_composed_op(int, BOOST_ASIO_MOVE_ARG(T) executors) : executors_(BOOST_ASIO_MOVE_CAST(T)(executors)) { } executor_type get_executor() const BOOST_ASIO_NOEXCEPT { return executors_.head_; } template <typename Handler, typename Impl> void operator()(BOOST_ASIO_MOVE_ARG(Handler) handler, BOOST_ASIO_MOVE_ARG(Impl) impl) const { composed_op<typename decay<Impl>::type, composed_work<Executors>, typename decay<Handler>::type, Signature>( BOOST_ASIO_MOVE_CAST(Impl)(impl), composed_work<Executors>(executors_), BOOST_ASIO_MOVE_CAST(Handler)(handler))(); } private: composed_io_executors<Executors> executors_; }; template <typename Signature, typename Executors> inline initiate_composed_op<Signature, Executors> make_initiate_composed_op( BOOST_ASIO_MOVE_ARG(composed_io_executors<Executors>) executors) { return initiate_composed_op<Signature, Executors>(0, BOOST_ASIO_MOVE_CAST(composed_io_executors<Executors>)(executors)); } template <typename IoObject> inline typename IoObject::executor_type get_composed_io_executor(IoObject& io_object, typename enable_if< !is_executor<IoObject>::value && !execution::is_executor<IoObject>::value >::type* = 0) { return io_object.get_executor(); } template <typename Executor> inline const Executor& get_composed_io_executor(const Executor& ex, typename enable_if< is_executor<Executor>::value || execution::is_executor<Executor>::value >::type* = 0) { return ex; } } // namespace detail #if !defined(GENERATING_DOCUMENTATION) #if defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES) template <typename CompletionToken, typename Signature, typename Implementation, typename... IoObjectsOrExecutors> BOOST_ASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, Signature) async_compose(BOOST_ASIO_MOVE_ARG(Implementation) implementation, BOOST_ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token, BOOST_ASIO_MOVE_ARG(IoObjectsOrExecutors)... io_objects_or_executors) { return async_initiate<CompletionToken, Signature>( detail::make_initiate_composed_op<Signature>( detail::make_composed_io_executors( detail::get_composed_io_executor( BOOST_ASIO_MOVE_CAST(IoObjectsOrExecutors)( io_objects_or_executors))...)), token, BOOST_ASIO_MOVE_CAST(Implementation)(implementation)); } #else // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES) template <typename CompletionToken, typename Signature, typename Implementation> BOOST_ASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, Signature) async_compose(BOOST_ASIO_MOVE_ARG(Implementation) implementation, BOOST_ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token) { return async_initiate<CompletionToken, Signature>( detail::make_initiate_composed_op<Signature>( detail::make_composed_io_executors()), token, BOOST_ASIO_MOVE_CAST(Implementation)(implementation)); } # define BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR(n) \ BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_##n # define BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_1 \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T1)(x1)) # define BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_2 \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T1)(x1)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T2)(x2)) # define BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_3 \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T1)(x1)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T2)(x2)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T3)(x3)) # define BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_4 \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T1)(x1)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T2)(x2)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T3)(x3)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T4)(x4)) # define BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_5 \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T1)(x1)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T2)(x2)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T3)(x3)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T4)(x4)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T5)(x5)) # define BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_6 \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T1)(x1)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T2)(x2)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T3)(x3)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T4)(x4)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T5)(x5)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T6)(x6)) # define BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_7 \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T1)(x1)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T2)(x2)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T3)(x3)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T4)(x4)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T5)(x5)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T6)(x6)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T7)(x7)) # define BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_8 \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T1)(x1)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T2)(x2)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T3)(x3)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T4)(x4)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T5)(x5)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T6)(x6)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T7)(x7)), \ detail::get_composed_io_executor(BOOST_ASIO_MOVE_CAST(T8)(x8)) #define BOOST_ASIO_PRIVATE_ASYNC_COMPOSE_DEF(n) \ template <typename CompletionToken, typename Signature, \ typename Implementation, BOOST_ASIO_VARIADIC_TPARAMS(n)> \ BOOST_ASIO_INITFN_AUTO_RESULT_TYPE(CompletionToken, Signature) \ async_compose(BOOST_ASIO_MOVE_ARG(Implementation) implementation, \ BOOST_ASIO_NONDEDUCED_MOVE_ARG(CompletionToken) token, \ BOOST_ASIO_VARIADIC_MOVE_PARAMS(n)) \ { \ return async_initiate<CompletionToken, Signature>( \ detail::make_initiate_composed_op<Signature>( \ detail::make_composed_io_executors( \ BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR(n))), \ token, BOOST_ASIO_MOVE_CAST(Implementation)(implementation)); \ } \ /**/ BOOST_ASIO_VARIADIC_GENERATE(BOOST_ASIO_PRIVATE_ASYNC_COMPOSE_DEF) #undef BOOST_ASIO_PRIVATE_ASYNC_COMPOSE_DEF #undef BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR #undef BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_1 #undef BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_2 #undef BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_3 #undef BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_4 #undef BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_5 #undef BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_6 #undef BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_7 #undef BOOST_ASIO_PRIVATE_GET_COMPOSED_IO_EXECUTOR_8 #endif // defined(BOOST_ASIO_HAS_VARIADIC_TEMPLATES) #endif // !defined(GENERATING_DOCUMENTATION) } // namespace asio } // namespace boost #include <boost/asio/detail/pop_options.hpp> #endif // BOOST_ASIO_IMPL_COMPOSE_HPP
20,461
8,132
#include <fc/real128.hpp> #include <wallet/Config.hpp> #include <wallet/Exceptions.hpp> #include <wallet/Wallet.hpp> #include <wallet/WalletImpl.hpp> #include <blockchain/Time.hpp> #include <cli/Pretty.hpp> #include <utilities/GitRevision.hpp> #include <utilities/KeyConversion.hpp> #include <algorithm> #include <fstream> #include <thread> //#include <fc/io/json.hpp>//add for fc::json::to_pretty_string(builder) ??? #include <blockchain/ForkBlocks.hpp> #include "blockchain/ImessageOperations.hpp" #include "blockchain/Exceptions.hpp" #include "boost/filesystem/path.hpp" #include "boost/filesystem/operations.hpp" #include <blockchain/ContractOperations.hpp> #include <utilities/CommonApi.hpp> namespace thinkyoung { namespace wallet { namespace detail { WalletImpl::WalletImpl() { _num_scanner_threads = std::max(_num_scanner_threads, std::thread::hardware_concurrency()); _scanner_threads.reserve(_num_scanner_threads); for (uint32_t i = 0; i < _num_scanner_threads; ++i) _scanner_threads.push_back(std::unique_ptr<fc::thread>(new fc::thread("wallet_scanner_" + std::to_string(i)))); } WalletImpl::~WalletImpl() { } void WalletImpl::state_changed(const PendingChainStatePtr& state) { if (!self->is_open() || !self->is_unlocked()) return; const auto last_unlocked_scanned_number = self->get_last_scanned_block_number(); if (_blockchain->get_head_block_num() < last_unlocked_scanned_number) { self->set_last_scanned_block_number(_blockchain->get_head_block_num()); self->set_last_scanned_block_number_for_alp(_blockchain->get_head_block_num()); } } void WalletImpl::block_applied(const BlockSummary& summary) { if (!self->is_open() || !self->is_unlocked()) return; auto ntptime = blockchain::ntp_time(); fc::time_point t = (ntptime.valid() ? *ntp_time() : fc::time_point::now()); if ((t - _blockchain->get_head_block_timestamp()) <= fc::seconds(ALP_BLOCKCHAIN_BLOCK_INTERVAL_SEC * 2)) handle_events(summary.applied_changes->event_vector); if (!self->get_transaction_scanning()) return; if (summary.block_data.block_num <= self->get_last_scanned_block_number()) return; self->start_scan(std::min(self->get_last_scanned_block_number() + 1, summary.block_data.block_num), -1); } void WalletImpl::handle_events(const vector<EventOperation>& event_vector) { auto map_end = contract_id_event_to_script_id_vector_db.unordered_end(); for (auto i : event_vector) { ScriptRelationKey key(i.id, i.event_type); auto res = contract_id_event_to_script_id_vector_db.unordered_find(key); if (res == map_end) continue; vector < ScriptIdType > vec_for_remove = res->second; for (auto sid : res->second) { oScriptEntry script = self->get_script_entry(sid); if (!script.valid()) { auto it = vec_for_remove.begin(); while (it != vec_for_remove.end()) { if (sid == *it) { it = vec_for_remove.erase(it); } else it++; } continue; } if (script->enable == false) continue; lua::lib::GluaStateScope scope; auto code_stream = lua::api::global_glua_chain_api->get_bytestream_from_code(scope.L(), script->code); if (!code_stream) continue; try { lua::lib::add_global_bool_variable(scope.L(), "truncated", i.is_truncated); lua::lib::add_global_string_variable(scope.L(), "event_type", i.event_type.c_str()); lua::lib::add_global_string_variable(scope.L(), "param", i.event_param.c_str()); lua::lib::add_global_string_variable(scope.L(), "contract_id", i.id.AddressToString().c_str()); lua::lib::run_compiled_bytestream(scope.L(), code_stream.get()); } catch (fc::exception e) { printf("Exception:%s\n", e.to_detail_string().c_str()); } } if (vec_for_remove.size() < res->second.size()) contract_id_event_to_script_id_vector_db.store(key, vec_for_remove); } } vector<WalletTransactionEntry> WalletImpl::get_pending_transactions()const { return _wallet_db.get_pending_transactions(); } void WalletImpl::transfer_to_contract_trx(SignedTransaction& trx, const Address& to_contract_address, const Asset& asset_to_transfer, const Asset& asset_for_exec, const Asset& transaction_fee, const PublicKeyType& from, const map<BalanceIdType, ShareType>& balances) { trx.operations.emplace_back(TransferContractOperation(to_contract_address, asset_to_transfer, asset_for_exec, transaction_fee, from, balances)); } /* void WalletImpl::withdraw_to_transaction( const Asset& amount_to_withdraw, const string& from_account_name, SignedTransaction& trx, unordered_set<Address>& required_signatures, const Asset& amount_for_refund ) { try { FC_ASSERT(!from_account_name.empty()); bool deal_for_refund = true; auto amount_remaining = amount_for_refund; if (amount_remaining == Asset()) amount_remaining.asset_id = amount_to_withdraw.asset_id; const AccountBalanceEntrySummaryType balance_entrys = self->get_spendable_account_balance_entrys(from_account_name); if (balance_entrys.find(from_account_name) == balance_entrys.end()) FC_CAPTURE_AND_THROW(insufficient_funds, (from_account_name)(amount_to_withdraw)(balance_entrys)); for (const auto& entry : balance_entrys.at(from_account_name)) { Asset balance = entry.get_spendable_balance(_blockchain->get_pending_state()->now()); if (balance.amount <= 0 || balance.asset_id != amount_remaining.asset_id) continue; const auto owner = entry.owner(); if (!owner.valid()) continue; //先处理可以退的withdraw,再处理不可以退的withdraw if (deal_for_refund) { if (amount_remaining.amount > balance.amount) { trx.withdraw(entry.id(), balance.amount, true); required_signatures.insert(*owner); amount_remaining -= balance; } else { if (amount_remaining.amount > 0) { trx.withdraw(entry.id(), amount_remaining.amount, true); required_signatures.insert(*owner); balance -= amount_remaining; } deal_for_refund = false; amount_remaining = amount_to_withdraw - amount_for_refund; } } if (!deal_for_refund) { if (amount_remaining.amount > balance.amount) { trx.withdraw(entry.id(), balance.amount, false); required_signatures.insert(*owner); amount_remaining -= balance; } else { if (amount_remaining.amount > 0) { trx.withdraw(entry.id(), amount_remaining.amount, false); required_signatures.insert(*owner); } return; } } } const string required = _blockchain->to_pretty_asset(amount_to_withdraw); const string available = _blockchain->to_pretty_asset(amount_to_withdraw - amount_remaining); FC_CAPTURE_AND_THROW(insufficient_funds, (required)(available)(balance_entrys)); } FC_CAPTURE_AND_RETHROW((amount_to_withdraw)(from_account_name)(trx)(required_signatures)(amount_for_refund)) } */ void WalletImpl::withdraw_to_transaction( const Asset& amount_to_withdraw, const string& from_account_name, SignedTransaction& trx, unordered_set<Address>& required_signatures ) { try { FC_ASSERT(!from_account_name.empty()); auto amount_remaining = amount_to_withdraw; const AccountBalanceEntrySummaryType balance_entrys = self->get_spendable_account_balance_entrys(from_account_name); if (balance_entrys.find(from_account_name) == balance_entrys.end()) FC_CAPTURE_AND_THROW(insufficient_funds, (from_account_name)(amount_to_withdraw)(balance_entrys)); for (const auto& entry : balance_entrys.at(from_account_name)) { const Asset balance = entry.get_spendable_balance(_blockchain->get_pending_state()->now()); if (balance.amount <= 0 || balance.asset_id != amount_remaining.asset_id) continue; const auto owner = entry.owner(); if (!owner.valid()) continue; if (amount_remaining.amount > balance.amount) { trx.withdraw(entry.id(), balance.amount); required_signatures.insert(*owner); amount_remaining -= balance; } else { trx.withdraw(entry.id(), amount_remaining.amount); required_signatures.insert(*owner); return; } } const string required = _blockchain->to_pretty_asset(amount_to_withdraw); const string available = _blockchain->to_pretty_asset(amount_to_withdraw - amount_remaining); FC_CAPTURE_AND_THROW(insufficient_funds, (required)(available)(balance_entrys)); } FC_CAPTURE_AND_RETHROW((amount_to_withdraw)(from_account_name)(trx)(required_signatures)) } // TODO: What about retracted accounts? void WalletImpl::authorize_update(unordered_set<Address>& required_signatures, oAccountEntry account, bool need_owner_key) { oWalletKeyEntry oauthority_key = _wallet_db.lookup_key(account->owner_key); // We do this check a lot and it doesn't fit conveniently into a loop because we're interested in two types of keys. // Instead, we extract it into a function. auto accept_key = [&]()->bool { if (oauthority_key.valid() && oauthority_key->has_private_key()) { required_signatures.insert(oauthority_key->get_address()); return true; } return false; }; if (accept_key()) return; if (!need_owner_key) { oauthority_key = _wallet_db.lookup_key(account->active_address()); if (accept_key()) return; } auto dot = account->name.find('.'); while (dot != string::npos) { account = _blockchain->get_account_entry(account->name.substr(dot + 1)); FC_ASSERT(account.valid(), "Parent account is not valid; this should never happen."); oauthority_key = _wallet_db.lookup_key(account->active_address()); if (accept_key()) return; oauthority_key = _wallet_db.lookup_key(account->owner_key); if (accept_key()) return; dot = account->name.find('.'); } } SecretHashType WalletImpl::get_secret(uint32_t block_num, const PrivateKeyType& delegate_key)const { BlockIdType header_id; if (block_num != uint32_t(-1) && block_num > 1) { auto block_header = _blockchain->get_block_header(block_num - 1); header_id = block_header.id(); } fc::sha512::encoder key_enc; fc::raw::pack(key_enc, delegate_key); fc::sha512::encoder enc; fc::raw::pack(enc, key_enc.result()); fc::raw::pack(enc, header_id); return fc::ripemd160::hash(enc.result()); } void WalletImpl::reschedule_relocker() { if (!_relocker_done.valid() || _relocker_done.ready()) _relocker_done = fc::async([this]() { relocker(); }, "wallet_relocker"); } void WalletImpl::relocker() { fc::time_point now = fc::time_point::now(); ilog("Starting wallet relocker task at time: ${t}", ("t", now)); if (!_scheduled_lock_time.valid() || now >= *_scheduled_lock_time) { /* Don't relock if we have enabled delegates */ if (!self->get_my_delegates(enabled_delegate_status).empty()) { ulog("Wallet not automatically relocking because there are enabled delegates!"); return; } self->lock(); } else { if (!_relocker_done.canceled()) { ilog("Scheduling wallet relocker task for time: ${t}", ("t", *_scheduled_lock_time)); _relocker_done = fc::schedule([this]() { relocker(); }, *_scheduled_lock_time, "wallet_relocker"); } } } void WalletImpl::start_scan_task(const uint32_t start_block_num, const uint32_t limit) { try { fc::oexception scan_exception; try { const uint32_t head_block_num = _blockchain->get_head_block_num(); uint32_t current_block_num = std::max(start_block_num, 1u); uint32_t prev_block_num = current_block_num - 1; uint32_t count = 0; const bool track_progress = current_block_num < head_block_num && limit > 0; if (track_progress) { ulog("Beginning scan at block ${n}...", ("n", current_block_num)); _scan_progress = 0; } const auto update_progress = [=](const uint32_t count) { if (!track_progress) return; const uint32_t total = std::min({ limit, head_block_num, head_block_num - current_block_num + 1 }); if (total == 0) return; _scan_progress = float(count) / total; if (count % 10000 == 0) ulog("Scanning ${p} done...", ("p", cli::pretty_percent(_scan_progress, 1))); }; if (start_block_num == 0) { scan_balances(); scan_accounts(); scan_contracts(); } else if (_dirty_accounts) { scan_accounts(); } else if (_dirty_contracts) { scan_contracts(); } while (current_block_num <= head_block_num && count < limit && !_scan_in_progress.canceled()) { try { scan_block(current_block_num); } catch (const fc::exception& e) { elog("Error scanning block ${n}: ${e}", ("n", current_block_num)("e", e.to_detail_string())); } ++count; prev_block_num = current_block_num; self->set_last_scanned_block_number_for_alp(prev_block_num); ++current_block_num; if (count > 1) { update_progress(count); if (count % 10 == 0) fc::usleep(fc::microseconds(1)); } } self->set_last_scanned_block_number(std::max(prev_block_num, self->get_last_scanned_block_number())); if (track_progress) { _scan_progress = 1; ulog("Scan complete."); } if (_dirty_balances) scan_balances_experimental(); if (_dirty_accounts) scan_accounts(); if (_dirty_contracts) scan_contracts(); } catch (const fc::exception& e) { scan_exception = e; } if (scan_exception.valid()) { _scan_progress = -1; ulog("Scan failure."); throw *scan_exception; } } FC_CAPTURE_AND_RETHROW((start_block_num)(limit)) } void WalletImpl::upgrade_version() { const uint32_t current_version = self->get_version(); if (current_version > ALP_WALLET_VERSION) { FC_THROW_EXCEPTION(unsupported_version, "Wallet version newer than client supports!", ("wallet_version", current_version)("supported_version", ALP_WALLET_VERSION)); } else if (current_version == ALP_WALLET_VERSION) { return; } ulog("Upgrading wallet..."); std::exception_ptr upgrade_failure_exception; try { self->auto_backup("version_upgrade"); if (current_version < 100) { self->set_automatic_backups(true); self->set_transaction_scanning(self->get_my_delegates(enabled_delegate_status).empty()); /* Check for old index format genesis claim virtual transactions */ auto present = false; _blockchain->scan_balances([&](const BalanceEntry& bal_rec) { if (!bal_rec.snapshot_info.valid()) return; const auto id = bal_rec.id().addr; present |= _wallet_db.lookup_transaction(id).valid(); }); if (present) { const function<void(void)> rescan = [&]() { /* Upgrade genesis claim virtual transaction indexes */ _blockchain->scan_balances([&](const BalanceEntry& bal_rec) { if (!bal_rec.snapshot_info.valid()) return; const auto id = bal_rec.id().addr; _wallet_db.remove_transaction(id); }); scan_balances(); }; _unlocked_upgrade_tasks.push_back(rescan); } } if (current_version < 101) { /* Check for old index format market order virtual transactions */ auto present = false; const auto items = _wallet_db.get_transactions(); for (const auto& item : items) { const auto id = item.first; const auto trx_rec = item.second; if (trx_rec.is_virtual && trx_rec.is_market) { present = true; _wallet_db.remove_transaction(id); } } if (present) { const auto start = 1; const auto end = _blockchain->get_head_block_num(); /* Upgrade market order virtual transaction indexes */ //for( auto block_num = start; block_num <= end; block_num++ ) //{ //const auto block_timestamp = _blockchain->get_block_header( block_num ).timestamp; // const auto market_trxs = _blockchain->get_market_transactions( block_num ); //for( const auto& market_trx : market_trxs ) //scan_market_transaction( market_trx, block_num, block_timestamp ); //} } } if (current_version < 102) { self->set_transaction_fee(Asset(ALP_WALLET_DEFAULT_TRANSACTION_FEE)); self->set_transaction_imessage_fee_coe(ALP_BLOCKCHAIN_MIN_MESSAGE_FEE_COE); self->set_transaction_imessage_soft_max_length(ALP_BLOCKCHAIN_MAX_SOFT_MAX_MESSAGE_SIZE); } if (current_version < 106) { self->set_transaction_expiration(ALP_WALLET_DEFAULT_TRANSACTION_EXPIRATION_SEC); } if (current_version < 107) { const auto items = _wallet_db.get_transactions(); for (const auto& item : items) { const auto id = item.first; const auto trx_rec = item.second; if (trx_rec.is_virtual && trx_rec.is_market && trx_rec.block_num == 554801) _wallet_db.remove_transaction(id); } } if (current_version < 111) { self->set_transaction_imessage_fee_coe(ALP_BLOCKCHAIN_MIN_MESSAGE_FEE_COE); self->set_transaction_imessage_soft_max_length(ALP_BLOCKCHAIN_MAX_SOFT_MAX_MESSAGE_SIZE); const function<void(void)> repair = [&]() { self->repair_entrys(optional<string>()); self->start_scan(0, -1); }; _unlocked_upgrade_tasks.push_back(repair); } if (current_version < 112) { self->set_last_scanned_block_number_for_alp(0); } if (_unlocked_upgrade_tasks.empty()) { self->set_version(ALP_WALLET_VERSION); ulog("Wallet successfully upgraded."); } else { ulog("Please unlock your wallet to complete the upgrade..."); } } catch (...) { upgrade_failure_exception = std::current_exception(); } if (upgrade_failure_exception) { ulog("Wallet upgrade failure."); std::rethrow_exception(upgrade_failure_exception); } } void WalletImpl::upgrade_version_unlocked() { if (_unlocked_upgrade_tasks.empty()) return; ulog("Continuing wallet upgrade..."); std::exception_ptr upgrade_failure_exception; try { for (const auto& task : _unlocked_upgrade_tasks) task(); _unlocked_upgrade_tasks.clear(); self->set_version(ALP_WALLET_VERSION); ulog("Wallet successfully upgraded."); } catch (...) { upgrade_failure_exception = std::current_exception(); } if (upgrade_failure_exception) { ulog("Wallet upgrade failure."); std::rethrow_exception(upgrade_failure_exception); } } void WalletImpl::create_file(const path& wallet_file_path, const string& password, const string& brainkey) { try { FC_ASSERT(self->is_enabled(), "Wallet is disabled in this client!"); if (fc::exists(wallet_file_path)) FC_THROW_EXCEPTION(wallet_already_exists, "Wallet file already exists!", ("wallet_file_path", wallet_file_path)); if (password.size() < ALP_WALLET_MIN_PASSWORD_LENGTH) FC_THROW_EXCEPTION(password_too_short, "Password too short!", ("size", password.size())); std::exception_ptr create_file_failure; try { self->close(); _wallet_db.open(wallet_file_path); _wallet_password = fc::sha512::hash(password.c_str(), password.size()); MasterKey new_master_key; ExtendedPrivateKey epk; if (!brainkey.empty()) { auto base = fc::sha512::hash(brainkey.c_str(), brainkey.size()); /* strengthen the key a bit */ for (uint32_t i = 0; i < 100ll * 1000ll; ++i) base = fc::sha512::hash(base); epk = ExtendedPrivateKey(base); } else { wlog("generating random"); epk = ExtendedPrivateKey(PrivateKeyType::generate()); } _wallet_db.set_master_key(epk, _wallet_password); self->set_version(ALP_WALLET_VERSION); self->set_automatic_backups(true); self->set_transaction_scanning(true); self->set_last_scanned_block_number(_blockchain->get_head_block_num()); self->set_last_scanned_block_number_for_alp(0); self->set_transaction_fee(Asset(ALP_WALLET_DEFAULT_TRANSACTION_FEE)); self->set_transaction_expiration(ALP_WALLET_DEFAULT_TRANSACTION_EXPIRATION_SEC); self->set_transaction_imessage_fee_coe(ALP_BLOCKCHAIN_MIN_MESSAGE_FEE_COE); self->set_transaction_imessage_soft_max_length(ALP_BLOCKCHAIN_MAX_SOFT_MAX_MESSAGE_SIZE); _wallet_db.close(); _wallet_db.open(wallet_file_path); _current_wallet_path = wallet_file_path; script_id_to_script_entry_db.close(); script_id_to_script_entry_db.open(wallet_file_path / "script_id_to_script_entry_db"); contract_id_event_to_script_id_vector_db.close(); contract_id_event_to_script_id_vector_db.open(wallet_file_path / "contract_id_event_to_script_id_vector_db"); FC_ASSERT(_wallet_db.validate_password(_wallet_password)); } catch (...) { create_file_failure = std::current_exception(); } if (create_file_failure) { self->close(); fc::remove_all(wallet_file_path); std::rethrow_exception(create_file_failure); } } FC_RETHROW_EXCEPTIONS(warn, "Unable to create wallet '${wallet_file_path}'", ("wallet_file_path", wallet_file_path)) } void WalletImpl::open_file(const path& wallet_file_path) { try { FC_ASSERT(self->is_enabled(), "Wallet is disabled in this client!"); if (!fc::exists(wallet_file_path)) FC_THROW_EXCEPTION(no_such_wallet, "No such wallet exists!", ("wallet_file_path", wallet_file_path)); if (self->is_open() && _current_wallet_path == wallet_file_path) return; std::exception_ptr open_file_failure; try { self->close(); _current_wallet_path = wallet_file_path; _wallet_db.open(wallet_file_path); upgrade_version(); self->set_data_directory(fc::absolute(wallet_file_path.parent_path())); script_id_to_script_entry_db.close(); script_id_to_script_entry_db.open(wallet_file_path / "script_id_to_script_entry_db"); contract_id_event_to_script_id_vector_db.close(); contract_id_event_to_script_id_vector_db.open(wallet_file_path / "contract_id_event_to_script_id_vector_db"); } catch (...) { open_file_failure = std::current_exception(); } if (open_file_failure) { self->close(); std::rethrow_exception(open_file_failure); } } FC_RETHROW_EXCEPTIONS(warn, "Unable to open wallet ${wallet_file_path}", ("wallet_file_path", wallet_file_path)) } /** * Creates a new private key under the specified account. This key * will not be valid for sending TITAN transactions to, but will * be able to receive payments directly. */ PrivateKeyType WalletImpl::get_new_private_key(const string& account_name) { try { if (NOT self->is_open()) FC_CAPTURE_AND_THROW(wallet_closed); if (NOT self->is_unlocked()) FC_CAPTURE_AND_THROW(wallet_locked); if (NOT is_receive_account(account_name)) FC_CAPTURE_AND_THROW(unknown_receive_account, (account_name)); const auto current_account = _wallet_db.lookup_account(account_name); FC_ASSERT(current_account.valid()); return _wallet_db.generate_new_account_child_key(_wallet_password, account_name); } FC_CAPTURE_AND_RETHROW((account_name)) } PublicKeyType WalletImpl::get_new_public_key(const string& account_name) { try { return get_new_private_key(account_name).get_public_key(); } FC_CAPTURE_AND_RETHROW((account_name)) } Address WalletImpl::get_new_address(const string& account_name, const string& label) { try { auto addr = Address(get_new_public_key(account_name)); auto okey = _wallet_db.lookup_key(addr); FC_ASSERT(okey.valid(), "Key I just created does not exist"); _wallet_db.store_key(*okey); return addr; } FC_CAPTURE_AND_RETHROW((account_name)) } SlateIdType WalletImpl::set_delegate_slate(SignedTransaction& transaction, const VoteStrategy strategy)const { try { SlateIdType slate_id = 0; if (strategy == vote_none) return slate_id; const SlateEntry entry = get_delegate_slate(strategy); slate_id = entry.id(); if (slate_id == 0) return slate_id; if (!_blockchain->get_slate_entry(slate_id).valid()) transaction.define_slate(entry.slate); transaction.set_slates(slate_id); return slate_id; } FC_CAPTURE_AND_RETHROW((transaction)(strategy)) } /** * Any account for which this wallet owns the active private key. */ bool WalletImpl::is_receive_account(const string& account_name)const { FC_ASSERT(self->is_open()); if (!_blockchain->is_valid_account_name(account_name)) return false; auto opt_account = _wallet_db.lookup_account(account_name); if (!opt_account.valid()) return false; auto opt_key = _wallet_db.lookup_key(opt_account->active_address()); if (!opt_key.valid()) return false; return opt_key->has_private_key(); } bool WalletImpl::is_unique_account(const string& account_name)const { //There are two possibilities here. First, the wallet has multiple entrys named account_name //Second, the wallet has a different entry named account_name than the blockchain does. //Check that the wallet has at most one account named account_name auto known_accounts = _wallet_db.get_accounts(); bool found = false; for (const auto& known_account : known_accounts) { if (known_account.second.name == account_name) { if (found) return false; found = true; } } if (!found) //The wallet does not contain an account with this name. No conflict is possible. return true; //The wallet has an account named account_name. Check that it matches with the blockchain auto local_account = _wallet_db.lookup_account(account_name); auto registered_account = _blockchain->get_account_entry(account_name); if (local_account && registered_account) return local_account->owner_key == registered_account->owner_key; return local_account || registered_account; } /** * Select a slate of delegates from those approved by this wallet. Specify * strategy as vote_none, vote_all, or vote_random. The slate * returned will contain no more than ALP_BLOCKCHAIN_MAX_SLATE_SIZE delegates. */ SlateEntry WalletImpl::get_delegate_slate(const VoteStrategy strategy)const { if (strategy == vote_none) return SlateEntry(); vector<AccountIdType> for_candidates; const auto account_items = _wallet_db.get_accounts(); for (const auto& item : account_items) { const auto account_entry = item.second; if (!account_entry.is_delegate() && strategy != vote_recommended) continue; if (account_entry.approved <= 0) continue; for_candidates.push_back(account_entry.id); } std::random_shuffle(for_candidates.begin(), for_candidates.end()); size_t slate_size = 0; if (strategy == vote_all) { slate_size = std::min<size_t>(ALP_BLOCKCHAIN_MAX_SLATE_SIZE, for_candidates.size()); } else if (strategy == vote_random) { slate_size = std::min<size_t>(ALP_BLOCKCHAIN_MAX_SLATE_SIZE / 3, for_candidates.size()); slate_size = rand() % (slate_size + 1); } else if (strategy == vote_recommended && for_candidates.size() < ALP_BLOCKCHAIN_MAX_SLATE_SIZE) { unordered_map<AccountIdType, int> recommended_candidate_ranks; //Tally up the recommendation count for all delegates recommended by delegates I approve of for (const auto approved_candidate : for_candidates) { oAccountEntry candidate_entry = _blockchain->get_account_entry(approved_candidate); if (!candidate_entry.valid()) continue; if (candidate_entry->is_retracted()) continue; if (!candidate_entry->public_data.is_object() || !candidate_entry->public_data.get_object().contains("slate_id")) continue; if (!candidate_entry->public_data.get_object()["slate_id"].is_uint64()) { //Delegate is doing something non-kosher with their slate_id. Disapprove of them. self->set_account_approval(candidate_entry->name, -1); continue; } oSlateEntry recommendations = _blockchain->get_slate_entry(candidate_entry->public_data.get_object()["slate_id"].as<SlateIdType>()); if (!recommendations.valid()) { //Delegate is doing something non-kosher with their slate_id. Disapprove of them. self->set_account_approval(candidate_entry->name, -1); continue; } for (const auto recommended_candidate : recommendations->slate) ++recommended_candidate_ranks[recommended_candidate]; } //Disqualify non-delegates and delegates I actively disapprove of for (const auto& acct_rec : account_items) if (!acct_rec.second.is_delegate() || acct_rec.second.approved < 0) recommended_candidate_ranks.erase(acct_rec.second.id); //Remove from rankings candidates I already approve of for (const auto approved_id : for_candidates) if (recommended_candidate_ranks.find(approved_id) != recommended_candidate_ranks.end()) recommended_candidate_ranks.erase(approved_id); //Remove non-delegates from for_candidates vector<AccountIdType> delegates; for (const auto id : for_candidates) { const oAccountEntry entry = _blockchain->get_account_entry(id); if (!entry.valid()) continue; if (!entry->is_delegate()) continue; if (entry->is_retracted()) continue; delegates.push_back(id); } for_candidates = delegates; //While I can vote for more candidates, and there are more recommendations to vote for... while (for_candidates.size() < ALP_BLOCKCHAIN_MAX_SLATE_SIZE && recommended_candidate_ranks.size() > 0) { int best_rank = 0; AccountIdType best_ranked_candidate; //Add highest-ranked candidate to my list to vote for and remove him from rankings for (const auto& ranked_candidate : recommended_candidate_ranks) if (ranked_candidate.second > best_rank) { best_rank = ranked_candidate.second; best_ranked_candidate = ranked_candidate.first; } for_candidates.push_back(best_ranked_candidate); recommended_candidate_ranks.erase(best_ranked_candidate); } slate_size = for_candidates.size(); } SlateEntry entry; for (const AccountIdType id : for_candidates) entry.slate.insert(id); FC_ASSERT(entry.slate.size() <= ALP_BLOCKCHAIN_MAX_SLATE_SIZE); return entry; } } // thinkyoung::Wallet::detail Wallet::Wallet(ChainDatabasePtr blockchain, bool enabled) : my(new detail::WalletImpl()) { my->self = this; my->_is_enabled = enabled; my->_blockchain = blockchain; my->_blockchain->add_observer(my.get()); this->_generating_block = false; } Wallet::~Wallet() { close(); } void Wallet::set_data_directory(const path& data_dir) { my->_data_directory = data_dir; } path Wallet::get_data_directory()const { return my->_data_directory; } WalletDb& Wallet::get_wallet_db() const { return my->_wallet_db; } void Wallet::scan_contracts() { my->scan_contracts(); } void Wallet::create(const string& wallet_name, const string& password, const string& brainkey) { try { FC_ASSERT(is_enabled(), "Wallet is disabled in this client!"); if (!my->_blockchain->is_valid_account_name(wallet_name)) FC_THROW_EXCEPTION(invalid_name, "Invalid name for a wallet!", ("wallet_name", wallet_name)); auto wallet_file_path = fc::absolute(get_data_directory()) / wallet_name; if (fc::exists(wallet_file_path)) FC_THROW_EXCEPTION(wallet_already_exists, "Wallet name already exists!", ("wallet_name", wallet_name)); if (password.size() < ALP_WALLET_MIN_PASSWORD_LENGTH) FC_THROW_EXCEPTION(password_too_short, "Password too short!", ("size", password.size())); std::exception_ptr wallet_create_failure; try { my->create_file(wallet_file_path, password, brainkey); open(wallet_name); unlock(password, ALP_WALLET_DEFAULT_UNLOCK_TIME_SEC); } catch (...) { wallet_create_failure = std::current_exception(); } if (wallet_create_failure) { close(); std::rethrow_exception(wallet_create_failure); } } FC_RETHROW_EXCEPTIONS(warn, "Unable to create wallet '${wallet_name}'", ("wallet_name", wallet_name)) } void Wallet::open(const string& wallet_name) { try { FC_ASSERT(is_enabled(), "Wallet is disabled in this client!"); if (!my->_blockchain->is_valid_account_name(wallet_name)) FC_THROW_EXCEPTION(invalid_name, "Invalid name for a wallet!", ("wallet_name", wallet_name)); auto wallet_file_path = fc::absolute(get_data_directory()) / wallet_name; if (!fc::exists(wallet_file_path)) FC_THROW_EXCEPTION(no_such_wallet, "No such wallet exists!", ("wallet_name", wallet_name)); std::exception_ptr open_file_failure; try { my->open_file(wallet_file_path); } catch (...) { open_file_failure = std::current_exception(); } if (open_file_failure) { close(); std::rethrow_exception(open_file_failure); } my->scan_accounts(); my->scan_balances_experimental(); my->scan_contracts(); } FC_CAPTURE_AND_RETHROW((wallet_name)) } void Wallet::close() { try { lock(); try { ilog("Canceling wallet relocker task..."); my->_relocker_done.cancel_and_wait("Wallet::close()"); ilog("Wallet relocker task canceled"); } catch (const fc::exception& e) { wlog("Unexpected exception from wallet's relocker() : ${e}", ("e", e)); } catch (...) { wlog("Unexpected exception from wallet's relocker()"); } my->_balance_entrys.clear(); my->_dirty_balances = true; my->_wallet_db.close(); my->script_id_to_script_entry_db.close(); my->contract_id_event_to_script_id_vector_db.close(); my->_current_wallet_path = fc::path(); } FC_CAPTURE_AND_RETHROW() } bool Wallet::is_enabled() const { return my->_is_enabled; } bool Wallet::is_open()const { return my->_wallet_db.is_open(); } string Wallet::get_wallet_name()const { return my->_current_wallet_path.filename().generic_string(); } void Wallet::export_to_json(const path& filename)const { try { if (fc::exists(filename)) FC_THROW_EXCEPTION(file_already_exists, "Filename to export to already exists!", ("filename", filename)); if (filename == "") FC_THROW_EXCEPTION(filename_not_regular, "Filename isn't a regular name!", ("filename", filename)); FC_ASSERT(is_open(), "Wallet not open!"); my->_wallet_db.export_to_json(filename); } FC_CAPTURE_AND_RETHROW((filename)) } void Wallet::create_from_json(const path& filename, const string& wallet_name, const string& passphrase) { try { FC_ASSERT(is_enabled(), "Wallet is disabled in this client!"); if (!fc::exists(filename)) FC_THROW_EXCEPTION(file_not_found, "Filename to import from could not be found!", ("filename", filename)); if (!my->_blockchain->is_valid_account_name(wallet_name)) FC_THROW_EXCEPTION(invalid_wallet_name, "Invalid name for a wallet!", ("wallet_name", wallet_name)); create(wallet_name, passphrase); std::exception_ptr import_failure; try { set_version(0); my->_wallet_db.import_from_json(filename); close(); open(wallet_name); unlock(passphrase, ALP_WALLET_DEFAULT_UNLOCK_TIME_SEC); } catch (...) { import_failure = std::current_exception(); } if (import_failure) { close(); fc::path wallet_file_path = fc::absolute(get_data_directory()) / wallet_name; fc::remove_all(wallet_file_path); std::rethrow_exception(import_failure); } } FC_CAPTURE_AND_RETHROW((filename)(wallet_name)) } void Wallet::auto_backup(const string& reason)const { try { if (!get_automatic_backups()) return; ulog("Backing up wallet..."); fc::path wallet_path = my->_current_wallet_path; std::string wallet_name = wallet_path.filename().string(); fc::path wallet_dir = wallet_path.parent_path(); fc::path backup_path; while (true) { fc::time_point_sec now(time_point::now()); std::string backup_filename = wallet_name + "-" + now.to_non_delimited_iso_string(); if (!reason.empty()) backup_filename += "-" + reason; backup_filename += ".json"; backup_path = wallet_dir / ".backups" / wallet_name / backup_filename; if (!fc::exists(backup_path)) break; fc::usleep(fc::seconds(1)); } export_to_json(backup_path); ulog("Wallet automatically backed up to: ${f}", ("f", backup_path)); } FC_CAPTURE_AND_RETHROW() } void Wallet::set_version(uint32_t v) { try { FC_ASSERT(is_open(), "Wallet not open!"); my->_wallet_db.set_property(version, variant(v)); } FC_CAPTURE_AND_RETHROW() } uint32_t Wallet::get_version()const { try { FC_ASSERT(is_open(), "Wallet not open!"); try { return my->_wallet_db.get_property(version).as<uint32_t>(); } catch (...) { } return 0; } FC_CAPTURE_AND_RETHROW() } void Wallet::set_automatic_backups(bool enabled) { try { FC_ASSERT(is_open(), "Wallet not open!"); my->_wallet_db.set_property(automatic_backups, variant(enabled)); } FC_CAPTURE_AND_RETHROW() } bool Wallet::get_automatic_backups()const { try { FC_ASSERT(is_open(), "Wallet not open!"); try { return my->_wallet_db.get_property(automatic_backups).as<bool>(); } catch (...) { } return true; } FC_CAPTURE_AND_RETHROW() } void Wallet::set_transaction_scanning(bool enabled) { try { FC_ASSERT(is_open(), "Wallet not open!"); my->_wallet_db.set_property(transaction_scanning, variant(enabled)); } FC_CAPTURE_AND_RETHROW() } bool Wallet::get_transaction_scanning()const { try { FC_ASSERT(is_open(), "Wallet not open!"); if (list_my_accounts().empty()) return false; try { return my->_wallet_db.get_property(transaction_scanning).as<bool>(); } catch (...) { } return true; } FC_CAPTURE_AND_RETHROW() } void Wallet::unlock(const string& password, uint32_t timeout_seconds) { try { std::exception_ptr unlock_error; try { FC_ASSERT(is_open(), "Need open"); if (timeout_seconds < 1) FC_THROW_EXCEPTION(invalid_timeout, "Invalid timeout!"); const uint32_t max_timeout_seconds = std::numeric_limits<uint32_t>::max() - thinkyoung::blockchain::now().sec_since_epoch(); if (timeout_seconds > max_timeout_seconds) FC_THROW_EXCEPTION(invalid_timeout, "Timeout too large!", ("max_timeout_seconds", max_timeout_seconds)); fc::time_point now = fc::time_point::now(); fc::time_point new_lock_time = now + fc::seconds(timeout_seconds); if (new_lock_time.sec_since_epoch() <= now.sec_since_epoch()) FC_THROW_EXCEPTION(invalid_timeout, "Invalid timeout!"); if (password.size() < ALP_WALLET_MIN_PASSWORD_LENGTH) FC_THROW_EXCEPTION(password_too_short, "Invalid password!"); my->_wallet_password = fc::sha512::hash(password.c_str(), password.size()); if (!my->_wallet_db.validate_password(my->_wallet_password)) FC_THROW_EXCEPTION(invalid_password, "Invalid password!"); my->upgrade_version_unlocked(); my->_scheduled_lock_time = new_lock_time; ilog("Wallet unlocked at time: ${t}", ("t", fc::time_point_sec(now))); my->reschedule_relocker(); wallet_lock_state_changed(false); ilog("Wallet unlocked until time: ${t}", ("t", fc::time_point_sec(*my->_scheduled_lock_time))); my->scan_accounts(); } catch (...) { unlock_error = std::current_exception(); } if (unlock_error) { lock(); std::rethrow_exception(unlock_error); } } FC_CAPTURE_AND_RETHROW((timeout_seconds)) } void Wallet::lock() { cancel_scan(); try { my->_login_map_cleaner_done.cancel_and_wait("wallet::lock()"); } catch (const fc::exception& e) { wlog("Unexpected exception from wallet's login_map_cleaner() : ${e}", ("e", e)); } catch (...) { wlog("Unexpected exception from wallet's login_map_cleaner()"); } my->_stealth_private_keys.clear(); my->_dirty_accounts = true; my->_dirty_contracts = true; my->_wallet_password = fc::sha512(); my->_scheduled_lock_time = fc::optional<fc::time_point>(); wallet_lock_state_changed(true); ilog("Wallet locked at time: ${t}", ("t", blockchain::now())); if (my->_blockchain->get_is_in_sandbox()) { my->_blockchain->set_is_in_sandbox(false); my->_blockchain->clear_sandbox_pending_state(); } } bool Wallet::check_passphrase(const string& passphrase) { try { if (NOT is_open()) FC_CAPTURE_AND_THROW(wallet_closed); if (NOT is_unlocked()) FC_CAPTURE_AND_THROW(wallet_locked); if (passphrase.size() < ALP_WALLET_MIN_PASSWORD_LENGTH) FC_CAPTURE_AND_THROW(password_too_short); fc::sha512 password_input = fc::sha512::hash(passphrase.c_str(), passphrase.size()); return my->_wallet_db.validate_password(password_input); } FC_CAPTURE_AND_RETHROW() } void Wallet::change_passphrase(const string & old_passphrase, const string& new_passphrase) { try { if (NOT is_open()) FC_CAPTURE_AND_THROW(wallet_closed); if (NOT is_unlocked()) FC_CAPTURE_AND_THROW(wallet_locked); if (new_passphrase.size() < ALP_WALLET_MIN_PASSWORD_LENGTH) FC_CAPTURE_AND_THROW(password_too_short); fc::sha512 password_input = fc::sha512::hash(old_passphrase.c_str(), old_passphrase.size()); if (!my->_wallet_db.validate_password(password_input)) { FC_THROW_EXCEPTION(invalid_password, "Invalid password!"); } auto new_password = fc::sha512::hash(new_passphrase.c_str(), new_passphrase.size()); my->_wallet_db.change_password(my->_wallet_password, new_password); my->_wallet_password = new_password; my->_dirty_accounts = true; } FC_CAPTURE_AND_RETHROW() } bool Wallet::is_unlocked()const { FC_ASSERT(is_open(), "Wallet not open!"); return !Wallet::is_locked(); } bool Wallet::is_locked()const { FC_ASSERT(is_open(), "Wallet not open!"); return my->_wallet_password == fc::sha512(); } fc::optional<fc::time_point_sec> Wallet::unlocked_until()const { FC_ASSERT(is_open(), "Wallet not open!"); return my->_scheduled_lock_time ? *my->_scheduled_lock_time : fc::optional<fc::time_point_sec>(); } void Wallet::set_setting(const string& name, const variant& value) { my->_wallet_db.store_setting(name, value); } fc::optional<variant> Wallet::get_setting(const string& name)const { return my->_wallet_db.lookup_setting(name); } bool Wallet::delete_account(const string& account_name) { FC_ASSERT(is_open()); FC_ASSERT(is_unlocked()); const auto current_account = my->_wallet_db.lookup_account(account_name); if (!current_account.valid()) FC_THROW_EXCEPTION(invalid_name, "No account with this name in wallet!"); my->_wallet_db.remove_account(account_name); start_scan(0, -1); return true; } PublicKeyType Wallet::create_account(const string& account_name, const variant& private_data) { try { if (!my->_blockchain->is_valid_account_name(account_name)) FC_THROW_EXCEPTION(invalid_name, "Invalid account name!", ("account_name", account_name)); FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!!"); const auto num_accounts_before = list_my_accounts().size(); const auto current_account = my->_wallet_db.lookup_account(account_name); if (current_account.valid()) FC_THROW_EXCEPTION(invalid_name, "This name is already in your wallet!"); const auto existing_registered_account = my->_blockchain->get_account_entry(account_name); if (existing_registered_account.valid()) FC_THROW_EXCEPTION(invalid_name, "This name is already registered with the blockchain!"); const PublicKeyType account_public_key = my->_wallet_db.generate_new_account(my->_wallet_password, account_name, private_data); if (num_accounts_before == 0) { set_last_scanned_block_number(my->_blockchain->get_head_block_num()); set_last_scanned_block_number_for_alp(0); } my->_dirty_accounts = true; return account_public_key; } FC_CAPTURE_AND_RETHROW((account_name)) } void Wallet::account_set_favorite(const string& account_name, const bool is_favorite) { try { FC_ASSERT(is_open(), "Wallet not open!"); auto judged_account = my->_wallet_db.lookup_account(account_name); if (!judged_account.valid()) { const auto blockchain_acct = my->_blockchain->get_account_entry(account_name); if (!blockchain_acct.valid()) FC_THROW_EXCEPTION(unknown_account, "Unknown account name!"); add_contact_account(account_name, blockchain_acct->owner_key); return account_set_favorite(account_name, is_favorite); } judged_account->is_favorite = is_favorite; my->_wallet_db.store_account(*judged_account); } FC_CAPTURE_AND_RETHROW((account_name)(is_favorite)) } /** * A contact is an account for which this wallet does not have the private * key. If account_name is globally registered then this call will assume * it is the same account and will fail if the key is not the same. * * @param account_name - the name the account is known by to this wallet * @param key - the public key that will be used for sending TITAN transactions * to the account. */ void Wallet::add_contact_account(const string& account_name, const PublicKeyType& key, const variant& private_data) { try { FC_ASSERT(is_open(), "Wallet not open!"); if (!my->_blockchain->is_valid_account_name(account_name)) FC_THROW_EXCEPTION(invalid_name, "Invalid account name!", ("account_name", account_name)); auto current_registered_account = my->_blockchain->get_account_entry(account_name); if (current_registered_account.valid() && current_registered_account->owner_key != key) FC_THROW_EXCEPTION(invalid_name, "Account name is already registered under a different key! Provided: ${p}, registered: ${r}", ("p", key)("r", current_registered_account->active_key())); if (current_registered_account.valid() && current_registered_account->is_retracted()) FC_CAPTURE_AND_THROW(account_retracted, (current_registered_account)); auto current_account = my->_wallet_db.lookup_account(account_name); if (current_account.valid()) { wlog("current account is valid... ${account}", ("account", *current_account)); FC_ASSERT(current_account->owner_address() == Address(key), "Account with ${name} already exists", ("name", account_name)); if (current_registered_account.valid()) { blockchain::AccountEntry& as_blockchain_account_entry = *current_account; as_blockchain_account_entry = *current_registered_account; } if (!private_data.is_null()) current_account->private_data = private_data; my->_wallet_db.store_account(*current_account); return; } else { current_account = my->_wallet_db.lookup_account(Address(key)); if (current_account.valid()) { if (current_account->name != account_name) FC_THROW_EXCEPTION(duplicate_key, "Provided key already belongs to another wallet account! Provided: ${p}, existing: ${e}", ("p", account_name)("e", current_account->name)); if (current_registered_account.valid()) { blockchain::AccountEntry& as_blockchain_account_entry = *current_account; as_blockchain_account_entry = *current_registered_account; } if (!private_data.is_null()) current_account->private_data = private_data; my->_wallet_db.store_account(*current_account); return; } if (!current_registered_account.valid()) { const time_point_sec now = blockchain::now(); current_registered_account = AccountEntry(); current_registered_account->name = account_name; current_registered_account->owner_key = key; current_registered_account->set_active_key(now, key); current_registered_account->last_update = now; } my->_wallet_db.add_contact_account(*current_registered_account, private_data); } } FC_CAPTURE_AND_RETHROW((account_name)(key)) } // TODO: This function is sometimes used purely for error checking of the account_name; refactor WalletAccountEntry Wallet::get_account(const string& account_name)const { try { FC_ASSERT(is_open(), "Wallet not open!"); if (!my->_blockchain->is_valid_account_name(account_name)) FC_THROW_EXCEPTION(invalid_name, "Invalid account name!", ("account_name", account_name)); auto local_account = my->_wallet_db.lookup_account(account_name); auto chain_account = my->_blockchain->get_account_entry(account_name); if (!local_account.valid() && !chain_account.valid()) FC_THROW_EXCEPTION(unknown_account, "Unknown account name!", ("account_name", account_name)); if (local_account.valid() && chain_account.valid()) { if (local_account->owner_key != chain_account->owner_key) { wlog("local account is owned by someone different public key than blockchain account"); wdump((local_account)(chain_account)); } } if (chain_account.valid()) { my->_wallet_db.store_account(*chain_account); local_account = my->_wallet_db.lookup_account(account_name); } FC_ASSERT(local_account.valid()); return *local_account; } FC_CAPTURE_AND_RETHROW() } void Wallet::accountsplit(const string & original, string & to_account, string & sub_account) { if (original.size() < 66) { to_account = original; return; } to_account = original.substr(0, original.size() - 32); sub_account = original.substr(original.size() - 32); if (INVALIDE_SUB_ADDRESS == sub_account) { sub_account = ""; } else { sub_account = original; } } bool Wallet::is_valid_account_name(const string & accountname) { try { return my->_blockchain->is_valid_account_name(accountname); } FC_CAPTURE_AND_RETHROW() } void Wallet::remove_contact_account(const string& account_name) { try { if (!my->is_unique_account(account_name)) FC_THROW_EXCEPTION(duplicate_account_name, "Local account name conflicts with registered name. Please rename your local account first.", ("account_name", account_name)); const auto oaccount = my->_wallet_db.lookup_account(account_name); if (my->_wallet_db.has_private_key(Address(oaccount->owner_key))) FC_THROW_EXCEPTION(not_contact_account, "You can only remove contact accounts!", ("account_name", account_name)); my->_wallet_db.remove_contact_account(account_name); } FC_CAPTURE_AND_RETHROW((account_name)) } void Wallet::rename_account(const string& old_account_name, const string& new_account_name) { try { FC_ASSERT(is_open(), "Wallet not open!"); if (!my->_blockchain->is_valid_account_name(old_account_name)) FC_THROW_EXCEPTION(invalid_name, "Invalid old account name!", ("old_account_name", old_account_name)); if (!my->_blockchain->is_valid_account_name(new_account_name)) FC_THROW_EXCEPTION(invalid_name, "Invalid new account name!", ("new_account_name", new_account_name)); optional<PublicKeyType> old_key; auto registered_account = my->_blockchain->get_account_entry(old_account_name); bool have_registered = false; for (const auto& item : my->_wallet_db.get_accounts()) { const WalletAccountEntry& local_account = item.second; if (local_account.name != old_account_name) continue; if (!registered_account.valid() || registered_account->owner_key != local_account.owner_key) { old_key = local_account.owner_key; break; } have_registered |= registered_account.valid() && registered_account->owner_key == local_account.owner_key; } if (!old_key.valid()) { if (registered_account.valid()) FC_THROW_EXCEPTION(key_already_registered, "You cannot rename a registered account!"); FC_THROW_EXCEPTION(unknown_account, "Unknown account name!", ("old_account_name", old_account_name)); } registered_account = my->_blockchain->get_account_entry(*old_key); if (registered_account.valid() && registered_account->name != new_account_name) { FC_THROW_EXCEPTION(key_already_registered, "That account is already registered to a different name!", ("desired_name", new_account_name)("registered_name", registered_account->name)); } registered_account = my->_blockchain->get_account_entry(new_account_name); if (registered_account.valid()) FC_THROW_EXCEPTION(duplicate_account_name, "Your new account name is already registered!"); const auto new_account = my->_wallet_db.lookup_account(new_account_name); if (new_account.valid()) FC_THROW_EXCEPTION(duplicate_account_name, "You already have the new account name in your wallet!"); my->_wallet_db.rename_account(*old_key, new_account_name); my->_dirty_accounts = true; } FC_CAPTURE_AND_RETHROW((old_account_name)(new_account_name)) } bool Wallet::friendly_import_private_key(const PrivateKeyType& key, const string& account_name) { try { const auto addr = Address(key.get_public_key()); try { get_private_key(addr); // We already have this key and import_private_key would fail if we tried. Do nothing. return false; } catch (const fc::exception&) { } const oAccountEntry blockchain_account_entry = my->_blockchain->get_account_entry(addr); if (blockchain_account_entry.valid() && blockchain_account_entry->name != account_name) { // This key exists on the blockchain and I don't have it - don't associate it with a name when you import it import_private_key(key, optional<string>(), false); } else { import_private_key(key, account_name, false); } return true; } FC_CAPTURE_AND_RETHROW((account_name)) } PublicKeyType Wallet::import_private_key(const PrivateKeyType& new_private_key, const optional<string>& account_name, bool create_account) { try { if (NOT is_open()) FC_CAPTURE_AND_THROW(wallet_closed); if (NOT is_unlocked()) FC_CAPTURE_AND_THROW(wallet_locked); const PublicKeyType new_public_key = new_private_key.get_public_key(); const Address new_address = Address(new_public_key); // Try to associate with an existing registered account const oAccountEntry blockchain_account_entry = my->_blockchain->get_account_entry(new_address); if (blockchain_account_entry.valid()) { if (account_name.valid()) { FC_ASSERT(*account_name == blockchain_account_entry->name, "That key already belongs to a registered account with a different name!", ("blockchain_account_entry", *blockchain_account_entry) ("account_name", *account_name)); } my->_wallet_db.store_account(*blockchain_account_entry); my->_wallet_db.import_key(my->_wallet_password, blockchain_account_entry->name, new_private_key, true); PrivateKeyType active_private_key; PublicKeyType active_public_key; Address active_address; active_private_key = my->_wallet_db.get_account_child_key(new_private_key, 0); active_public_key = active_private_key.get_public_key(); active_address = Address(active_public_key); KeyData active_key; active_key.account_address = Address(new_public_key); active_key.public_key = active_public_key; active_key.encrypt_private_key(my->_wallet_password, active_private_key); my->_wallet_db.store_key(active_key); oWalletAccountEntry account_entry = my->_wallet_db.lookup_account(blockchain_account_entry->name); FC_ASSERT(account_entry.valid(), "Account name not found!"); account_entry->set_active_key(blockchain::now(), active_public_key); account_entry->last_update = blockchain::now(); my->_wallet_db.store_account(*account_entry); return new_public_key; } // Try to associate with an existing local account oWalletAccountEntry account_entry = my->_wallet_db.lookup_account(new_address); if (account_entry.valid()) { if (account_name.valid()) { FC_ASSERT(*account_name == account_entry->name, "That key already belongs to a local account with a different name!", ("account_entry", *account_entry) ("account_name", *account_name)); } my->_wallet_db.import_key(my->_wallet_password, account_entry->name, new_private_key, true); return new_public_key; } FC_ASSERT(account_name.valid(), "Unknown key! You must specify an account name!"); // Check if key is already associated with an existing local account const oWalletKeyEntry key_entry = my->_wallet_db.lookup_key(new_address); if (key_entry.valid() && key_entry->has_private_key()) { account_entry = my->_wallet_db.lookup_account(key_entry->account_address); if (account_entry.valid()) { FC_ASSERT(*account_name == account_entry->name, "That key already belongs to a local account with a different name!", ("account_entry", *account_entry) ("account_name", *account_name)); } my->_wallet_db.import_key(my->_wallet_password, account_entry->name, new_private_key, true); return new_public_key; } account_entry = my->_wallet_db.lookup_account(*account_name); if (!account_entry.valid()) { FC_ASSERT(create_account, "Could not find an account with that name!", ("account_name", *account_name)); // TODO: Replace with wallet_db.add_account add_contact_account(*account_name, new_public_key); account_entry = my->_wallet_db.lookup_account(*account_name); FC_ASSERT(account_entry.valid(), "Error creating new account!"); } my->_wallet_db.import_key(my->_wallet_password, account_entry->name, new_private_key, true); return new_public_key; } FC_CAPTURE_AND_RETHROW((account_name)(create_account)) } PublicKeyType Wallet::import_wif_private_key(const string& wif_key, const optional<string>& account_name, bool create_account) { try { FC_ASSERT(is_open(), "Need open"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); auto key = thinkyoung::utilities::wif_to_key(wif_key); if (key.valid()) { import_private_key(*key, account_name, create_account); my->_dirty_accounts = true; return key->get_public_key(); } FC_ASSERT(false, "Error parsing WIF private key"); } FC_CAPTURE_AND_RETHROW((account_name)) } void Wallet::start_scan(const uint32_t start_block_num, const uint32_t limit) { try { if (NOT is_open()) FC_CAPTURE_AND_THROW(wallet_closed); if (NOT is_unlocked()) FC_CAPTURE_AND_THROW(wallet_locked); if (my->_scan_in_progress.valid() && !my->_scan_in_progress.ready()) return; if (!get_transaction_scanning()) { my->_scan_progress = 1; ulog("Wallet transaction scanning is disabled!"); return; } const auto scan_chain_task = [=]() { my->start_scan_task(start_block_num, limit); }; my->_scan_in_progress = fc::async(scan_chain_task, "scan_chain_task"); my->_scan_in_progress.on_complete([](fc::exception_ptr ep) { if (ep) elog("Error during scanning: ${e}", ("e", ep->to_detail_string())); }); } FC_CAPTURE_AND_RETHROW((start_block_num)(limit)) } void Wallet::cancel_scan() { try { try { ilog("Canceling wallet scan_chain_task..."); my->_scan_in_progress.cancel_and_wait("wallet::cancel_scan()"); ilog("Wallet scan_chain_task canceled..."); } catch (const fc::exception& e) { wlog("Unexpected exception from wallet's scan_chain_task: ${e}", ("e", e.to_detail_string())); } catch (...) { wlog("Unexpected exception from wallet's scan_chain_task"); } my->_scan_progress = 1; } FC_CAPTURE_AND_RETHROW() } vector<string> Wallet::get_contracts(const string &account_name) { try { vector<string> result; // get all the contract in the accounts of the wallet auto id_account = my->_wallet_db.get_id_contract_map(); if ("" == account_name) { for (auto iter = id_account.begin(); iter != id_account.end(); ++iter) { result.push_back(iter->first.AddressToString(AddressType::contract_address)); } } else { oWalletAccountEntry account_entry = my->_wallet_db.lookup_account(account_name); if (!account_entry.valid()) FC_THROW_EXCEPTION(invalid_account_name, "the account is not in this wallet!"); for (auto iter = id_account.begin(); iter != id_account.end(); ++iter) { if (iter->second.owner == account_entry->owner_key) result.push_back(iter->first.AddressToString(AddressType::contract_address)); } } return result; } FC_CAPTURE_AND_RETHROW() } VoteSummary Wallet::get_vote_status(const string& account_name) { uint64_t total_possible = 0; uint64_t total = 0; auto summary = VoteSummary(); summary.up_to_date_with_recommendation = true; auto my_slate = my->get_delegate_slate(vote_recommended); const AccountBalanceEntrySummaryType items = get_spendable_account_balance_entrys(account_name); for (const auto& item : items) { const auto& entrys = item.second; for (const auto& entry : entrys) { if (entry.asset_id() != 0) continue; if (entry.slate_id() != my_slate.id() && entry.balance > 1 * ALP_BLOCKCHAIN_PRECISION) // ignore dust summary.up_to_date_with_recommendation = false; auto oslate = my->_blockchain->get_slate_entry(entry.slate_id()); if (oslate.valid()) total += entry.get_spendable_balance(my->_blockchain->now()).amount * oslate->slate.size(); total_possible += entry.get_spendable_balance(my->_blockchain->now()).amount * ALP_BLOCKCHAIN_MAX_SLATE_SIZE; } } if (total_possible == 0) summary.utilization = 0; else summary.utilization = float(total) / float(total_possible); summary.negative_utilization = 0; return summary; } PrivateKeyType Wallet::get_private_key(const Address& addr)const { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); auto key = my->_wallet_db.lookup_key(addr); FC_ASSERT(key.valid()); FC_ASSERT(key->has_private_key()); return key->decrypt_private_key(my->_wallet_password); } FC_CAPTURE_AND_RETHROW((addr)) } PublicKeyType Wallet::get_public_key(const Address& addr) const { FC_ASSERT(is_open(), "Wallet not open!"); auto key = my->_wallet_db.lookup_key(addr); FC_ASSERT(key.valid(), "No known key for this address."); return key->public_key; } bool Wallet::wallet_get_delegate_statue(const string & account) { FC_ASSERT(is_open()); auto delegate_entry = get_account(account); FC_ASSERT(delegate_entry.is_delegate(), "${name} is not a delegate.", ("name", account)); auto key = my->_wallet_db.lookup_key(delegate_entry.signing_address()); FC_ASSERT(key.valid() && key->has_private_key(), "Unable to find private key for ${name}.", ("name", account)); return delegate_entry.block_production_enabled; } void Wallet::set_delegate_block_production(const string& delegate_name, bool enabled) { FC_ASSERT(is_open(), "Wallet not open!"); std::vector<WalletAccountEntry> delegate_entrys; const auto empty_before = get_my_delegates(enabled_delegate_status).empty(); if (delegate_name != "ALL") { if (!my->_blockchain->is_valid_account_name(delegate_name)) FC_THROW_EXCEPTION(invalid_name, "Invalid delegate name!", ("delegate_name", delegate_name)); auto delegate_entry = get_account(delegate_name); FC_ASSERT(delegate_entry.is_delegate(), "${name} is not a delegate.", ("name", delegate_name)); auto key = my->_wallet_db.lookup_key(delegate_entry.signing_address()); FC_ASSERT(key.valid() && key->has_private_key(), "Unable to find private key for ${name}.", ("name", delegate_name)); delegate_entrys.push_back(delegate_entry); } else { delegate_entrys = get_my_delegates(any_delegate_status); } for (auto& delegate_entry : delegate_entrys) { delegate_entry.block_production_enabled = enabled; my->_wallet_db.store_account(delegate_entry); } const auto empty_after = get_my_delegates(enabled_delegate_status).empty(); if (empty_before == empty_after) { return; } else if (empty_before) { ulog("Wallet transaction scanning has been automatically disabled due to enabled delegates!"); set_transaction_scanning(false); } else { /* if( empty_after ) */ ulog("Wallet transaction scanning has been automatically re-enabled!"); set_transaction_scanning(true); } } vector<WalletAccountEntry> Wallet::get_my_delegates(uint32_t delegates_to_retrieve)const { FC_ASSERT(is_open(), "Wallet not open!"); vector<WalletAccountEntry> delegate_entrys; const auto& account_entrys = list_my_accounts(); for (const auto& account_entry : account_entrys) { if (!account_entry.is_delegate()) continue; if (delegates_to_retrieve & enabled_delegate_status && !account_entry.block_production_enabled) continue; if (delegates_to_retrieve & disabled_delegate_status && account_entry.block_production_enabled) continue; if (delegates_to_retrieve & active_delegate_status && !my->_blockchain->is_active_delegate(account_entry.id)) continue; if (delegates_to_retrieve & inactive_delegate_status && my->_blockchain->is_active_delegate(account_entry.id)) continue; delegate_entrys.push_back(account_entry); } return delegate_entrys; } optional<time_point_sec> Wallet::get_next_producible_block_timestamp(const vector<WalletAccountEntry>& delegate_entrys)const { try { if (!is_open() || is_locked()) return optional<time_point_sec>(); vector<AccountIdType> delegate_ids; delegate_ids.reserve(delegate_entrys.size()); for (const auto& delegate_entry : delegate_entrys) delegate_ids.push_back(delegate_entry.id); return my->_blockchain->get_next_producible_block_timestamp(delegate_ids); } FC_CAPTURE_AND_RETHROW() } fc::ecc::compact_signature Wallet::sign_hash(const string& signer, const fc::sha256& hash)const { try { auto key = PublicKeyType(signer); auto privkey = get_private_key(Address(key)); return privkey.sign_compact(hash); } catch (...) { try { auto addr = Address(signer); auto privkey = get_private_key(addr); return privkey.sign_compact(hash); } catch (...) { return get_active_private_key(signer).sign_compact(hash); } } } void Wallet::sign_block(SignedBlockHeader& header)const { try { if (NOT is_open()) FC_CAPTURE_AND_THROW(wallet_closed); if (NOT is_unlocked()) FC_CAPTURE_AND_THROW(wallet_locked); const vector<AccountIdType>& active_delegate_ids = my->_blockchain->get_active_delegates(); const AccountEntry delegate_entry = my->_blockchain->get_slot_signee(header.timestamp, active_delegate_ids); FC_ASSERT(delegate_entry.is_delegate()); const PublicKeyType public_signing_key = delegate_entry.signing_key(); const PrivateKeyType private_signing_key = get_private_key(Address(public_signing_key)); FC_ASSERT(delegate_entry.delegate_info.valid()); const uint32_t last_produced_block_num = delegate_entry.delegate_info->last_block_num_produced; const optional<SecretHashType>& prev_secret_hash = delegate_entry.delegate_info->next_secret_hash; if (prev_secret_hash.valid()) { FC_ASSERT(!delegate_entry.delegate_info->signing_key_history.empty()); const map<uint32_t, PublicKeyType>& signing_key_history = delegate_entry.delegate_info->signing_key_history; const uint32_t last_signing_key_change_block_num = signing_key_history.crbegin()->first; if (last_produced_block_num > last_signing_key_change_block_num) { header.previous_secret = my->get_secret(last_produced_block_num, private_signing_key); } else { // We need to use the old key to reveal the previous secret FC_ASSERT(signing_key_history.size() >= 2); auto iter = signing_key_history.crbegin(); ++iter; const PublicKeyType& prev_public_signing_key = iter->second; const PrivateKeyType prev_private_signing_key = get_private_key(Address(prev_public_signing_key)); header.previous_secret = my->get_secret(last_produced_block_num, prev_private_signing_key); } FC_ASSERT(fc::ripemd160::hash(header.previous_secret) == *prev_secret_hash); } header.next_secret_hash = fc::ripemd160::hash(my->get_secret(header.block_num, private_signing_key)); header.sign(private_signing_key); FC_ASSERT(header.validate_signee(public_signing_key)); } FC_CAPTURE_AND_RETHROW((header)) } std::shared_ptr<TransactionBuilder> Wallet::create_transaction_builder() { try { return std::make_shared<TransactionBuilder>(my.get()); } FC_CAPTURE_AND_RETHROW() } std::shared_ptr<TransactionBuilder> Wallet::create_transaction_builder(const TransactionBuilder& old_builder) { try { auto builder = std::make_shared<TransactionBuilder>(old_builder, my.get()); return builder; } FC_CAPTURE_AND_RETHROW() } std::shared_ptr<TransactionBuilder> Wallet::create_transaction_builder_from_file(const string& old_builder_path) { try { auto path = old_builder_path; if (path == "") { path = (get_data_directory() / "trx").string() + "/latest.trx"; } auto old_builder = fc::json::from_file(path).as<TransactionBuilder>(); auto builder = std::make_shared<TransactionBuilder>(old_builder, my.get()); return builder; } FC_CAPTURE_AND_RETHROW() } // TODO: Refactor publish_{slate|version} are exactly the same WalletTransactionEntry Wallet::publish_slate( const string& account_to_publish_under, const string& account_to_pay_with, bool sign) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); string paying_account = account_to_pay_with; if (paying_account.empty()) paying_account = account_to_publish_under; if (!my->is_receive_account(paying_account)) FC_THROW_EXCEPTION(unknown_receive_account, "Unknown paying account!", ("paying_account", paying_account)); auto current_account = my->_blockchain->get_account_entry(account_to_publish_under); if (!current_account.valid()) FC_THROW_EXCEPTION(unknown_account, "Unknown publishing account!", ("account_to_publish_under", account_to_publish_under)); SignedTransaction trx; unordered_set<Address> required_signatures; trx.expiration = blockchain::now() + get_transaction_expiration(); const auto payer_public_key = get_owner_public_key(paying_account); const auto slate_id = my->set_delegate_slate(trx, vote_all); if (slate_id == 0) FC_THROW_EXCEPTION(invalid_slate, "Cannot publish the null slate!"); fc::mutable_variant_object public_data; if (current_account->public_data.is_object()) public_data = current_account->public_data.get_object(); public_data["slate_id"] = slate_id; trx.update_account(current_account->id, current_account->delegate_pay_rate(), fc::variant_object(public_data), optional<PublicKeyType>()); my->authorize_update(required_signatures, current_account); const auto required_fees = get_transaction_fee(); if (current_account->is_delegate() && required_fees.amount < current_account->delegate_pay_balance()) { // withdraw delegate pay... trx.withdraw_pay(current_account->id, required_fees.amount); required_signatures.insert(current_account->active_key()); } else { my->withdraw_to_transaction(required_fees, paying_account, trx, required_signatures); } auto entry = LedgerEntry(); entry.from_account = payer_public_key; entry.to_account = payer_public_key; entry.memo = "publish slate " + fc::variant(slate_id).as_string(); auto trans_entry = WalletTransactionEntry(); trans_entry.ledger_entries.push_back(entry); trans_entry.fee = required_fees; if (sign) my->sign_transaction(trx, required_signatures); trans_entry.trx = trx; return trans_entry; } FC_CAPTURE_AND_RETHROW((account_to_publish_under)(account_to_pay_with)(sign)) } // TODO: Refactor publish_{slate|version} are exactly the same WalletTransactionEntry Wallet::publish_version( const string& account_to_publish_under, const string& account_to_pay_with, bool sign) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); string paying_account = account_to_pay_with; if (paying_account.empty()) paying_account = account_to_publish_under; if (!my->is_receive_account(paying_account)) FC_THROW_EXCEPTION(unknown_receive_account, "Unknown paying account!", ("paying_account", paying_account)); auto current_account = my->_blockchain->get_account_entry(account_to_publish_under); if (!current_account.valid()) FC_THROW_EXCEPTION(unknown_account, "Unknown publishing account!", ("account_to_publish_under", account_to_publish_under)); SignedTransaction trx; unordered_set<Address> required_signatures; trx.expiration = blockchain::now() + get_transaction_expiration(); const auto payer_public_key = get_owner_public_key(paying_account); fc::mutable_variant_object public_data; if (current_account->public_data.is_object()) public_data = current_account->public_data.get_object(); const auto version = thinkyoung::client::version_info()["client_version"].as_string(); public_data["version"] = version; trx.update_account(current_account->id, current_account->delegate_pay_rate(), fc::variant_object(public_data), optional<PublicKeyType>()); my->authorize_update(required_signatures, current_account); const auto required_fees = get_transaction_fee(); if (current_account->is_delegate() && required_fees.amount < current_account->delegate_pay_balance()) { // withdraw delegate pay... trx.withdraw_pay(current_account->id, required_fees.amount); required_signatures.insert(current_account->active_key()); } else { my->withdraw_to_transaction(required_fees, paying_account, trx, required_signatures); } auto entry = LedgerEntry(); entry.from_account = payer_public_key; entry.to_account = payer_public_key; entry.memo = "publish version " + version; auto trans_entry = WalletTransactionEntry(); trans_entry.ledger_entries.push_back(entry); trans_entry.fee = required_fees; if (sign) my->sign_transaction(trx, required_signatures); trans_entry.trx = trx; return trans_entry; } FC_CAPTURE_AND_RETHROW((account_to_publish_under)(account_to_pay_with)(sign)) } WalletTransactionEntry Wallet::collect_account_balances(const string& account_name, const function<bool(const BalanceEntry&)> filter, const string& memo_message, bool sign) { try { if (NOT is_open()) FC_CAPTURE_AND_THROW(wallet_closed); if (NOT is_unlocked()) FC_CAPTURE_AND_THROW(wallet_locked); const oWalletAccountEntry account_entry = my->_wallet_db.lookup_account(account_name); if (!account_entry.valid() || !account_entry->is_my_account) FC_CAPTURE_AND_THROW(unknown_receive_account, (account_name)); AccountBalanceEntrySummaryType balance_entrys; const time_point_sec now = my->_blockchain->get_pending_state()->now(); const auto scan_balance = [&](const BalanceIdType& id, const BalanceEntry& entry) { if (!filter(entry)) return; const Asset balance = entry.get_spendable_balance(now); if (balance.amount == 0) return; const optional<Address> owner = entry.owner(); if (!owner.valid()) return; const oWalletKeyEntry key_entry = my->_wallet_db.lookup_key(*owner); if (!key_entry.valid() || !key_entry->has_private_key()) return; const oWalletAccountEntry account_entry = my->_wallet_db.lookup_account(key_entry->account_address); if (!account_entry.valid() || account_entry->name != account_name) return; balance_entrys[account_name].push_back(entry); }; scan_balances(scan_balance); if (balance_entrys.find(account_name) == balance_entrys.end()) FC_CAPTURE_AND_THROW(insufficient_funds, (account_name)); SignedTransaction trx; trx.expiration = blockchain::now() + get_transaction_expiration(); unordered_set<Address> required_signatures; Asset total_balance; for (const BalanceEntry& entry : balance_entrys.at(account_name)) { const Asset balance = entry.get_spendable_balance(my->_blockchain->get_pending_state()->now()); trx.withdraw(entry.id(), balance.amount); const auto owner = entry.owner(); if (!owner.valid()) continue; required_signatures.insert(*owner); total_balance += balance; } trx.deposit_to_account(account_entry->active_key(), total_balance - get_transaction_fee(), get_private_key(account_entry->active_key()), memo_message, account_entry->active_key(), my->get_new_private_key(account_name), from_memo, account_entry->is_titan_account() ); auto entry = LedgerEntry(); entry.from_account = account_entry->owner_key; entry.to_account = account_entry->owner_key; entry.amount = total_balance - get_transaction_fee(); entry.memo = memo_message; auto trans_entry = WalletTransactionEntry(); trans_entry.ledger_entries.push_back(entry); trans_entry.fee = get_transaction_fee(); if (sign) my->sign_transaction(trx, required_signatures); trans_entry.trx = trx; return trans_entry; } FC_CAPTURE_AND_RETHROW((account_name)(sign)) } TransactionBuilder Wallet::set_vote_info( const BalanceIdType& balance_id, const Address& voter_address, VoteStrategy strategy) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); auto builder = create_transaction_builder(); const auto required_fees = get_transaction_fee(); auto balance = my->_blockchain->get_balance_entry(balance_id); FC_ASSERT(balance.valid(), "No such balance!"); SignedTransaction trx; trx.expiration = blockchain::now() + get_transaction_expiration(); trx.update_balance_vote(balance_id, voter_address); my->set_delegate_slate(trx, strategy); if (balance->restricted_owner == voter_address) { // not an owner update builder->required_signatures.insert(voter_address); } else { const auto owner = balance->owner(); FC_ASSERT(owner.valid()); builder->required_signatures.insert(*owner); } auto entry = LedgerEntry(); entry.memo = "Set balance vote info"; auto trans_entry = WalletTransactionEntry(); trans_entry.ledger_entries.push_back(entry); trans_entry.fee = required_fees; trans_entry.trx = trx; builder->transaction_entry = trans_entry; return *builder; } FC_CAPTURE_AND_RETHROW((balance_id)(voter_address)(strategy)) } WalletTransactionEntry Wallet::update_signing_key( const string& authorizing_account_name, const string& delegate_name, const PublicKeyType& signing_key, bool sign ) { try { if (NOT is_open()) FC_CAPTURE_AND_THROW(wallet_closed); if (NOT is_unlocked()) FC_CAPTURE_AND_THROW(wallet_locked); TransactionBuilderPtr builder = create_transaction_builder(); builder->update_signing_key(authorizing_account_name, delegate_name, signing_key); builder->finalize(); if (sign) { my->_dirty_accounts = true; return builder->sign(); } return builder->transaction_entry; } FC_CAPTURE_AND_RETHROW((authorizing_account_name)(delegate_name)(signing_key)(sign)) } void Wallet::repair_entrys(const optional<string>& collecting_account_name) { try { if (NOT is_open()) FC_CAPTURE_AND_THROW(wallet_closed); if (NOT is_unlocked()) FC_CAPTURE_AND_THROW(wallet_locked); ulog("Repairing wallet entrys. This may take a while..."); my->_wallet_db.repair_entrys(my->_wallet_password); if (!collecting_account_name.valid()) return; const oWalletAccountEntry account_entry = my->_wallet_db.lookup_account(*collecting_account_name); FC_ASSERT(account_entry.valid(), "Cannot find a local account with that name!", ("collecting_account_name", *collecting_account_name)); map<string, vector<BalanceEntry>> items = get_spendable_account_balance_entrys(); for (const auto& item : items) { const auto& name = item.first; const auto& entrys = item.second; if (name.find(ALP_ADDRESS_PREFIX) != 0) continue; for (const auto& entry : entrys) { const auto owner = entry.owner(); if (!owner.valid()) continue; oWalletKeyEntry key_entry = my->_wallet_db.lookup_key(*owner); if (key_entry.valid()) { key_entry->account_address = account_entry->owner_address(); my->_wallet_db.store_key(*key_entry); } } } start_scan(0, -1); } FC_CAPTURE_AND_RETHROW((collecting_account_name)) } uint32_t Wallet::regenerate_keys(const string& account_name, uint32_t num_keys_to_regenerate) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); FC_ASSERT(num_keys_to_regenerate > 0); oWalletAccountEntry account_entry = my->_wallet_db.lookup_account(account_name); FC_ASSERT(account_entry.valid()); // Update local account entrys with latest global state my->scan_accounts(); ulog("This may take a while..."); uint32_t total_regenerated_key_count = 0; // Regenerate wallet child keys ulog("Regenerating wallet child keys and importing into account: ${name}", ("name", account_name)); uint32_t key_index = 0; for (; key_index < num_keys_to_regenerate; ++key_index) { fc::oexception regenerate_key_error; try { const PrivateKeyType private_key = my->_wallet_db.get_wallet_child_key(my->_wallet_password, key_index); import_private_key(private_key, account_name); ++total_regenerated_key_count; } catch (const fc::exception& e) { regenerate_key_error = e; } if (regenerate_key_error.valid()) ulog("${e}", ("e", regenerate_key_error->to_detail_string())); } // Update wallet last used child key index my->_wallet_db.set_last_wallet_child_key_index(std::max(my->_wallet_db.get_last_wallet_child_key_index(), key_index - 1)); // Regenerate v1 account child keys ulog("Regenerating type 1 account child keys for account: ${name}", ("name", account_name)); uint32_t seq_num = 0; for (; seq_num < num_keys_to_regenerate; ++seq_num) { fc::oexception regenerate_key_error; try { const PrivateKeyType private_key = my->_wallet_db.get_account_child_key_v1(my->_wallet_password, account_entry->owner_address(), seq_num); import_private_key(private_key, account_name); ++total_regenerated_key_count; } catch (const fc::exception& e) { regenerate_key_error = e; } if (regenerate_key_error.valid()) ulog("${e}", ("e", regenerate_key_error->to_detail_string())); } // Regenerate v2 account child keys const oWalletKeyEntry key_entry = my->_wallet_db.lookup_key(Address(account_entry->active_key())); if (key_entry.valid() && key_entry->has_private_key()) { ulog("Regenerating type 2 account child keys for account: ${name}", ("name", account_name)); const PrivateKeyType active_private_key = key_entry->decrypt_private_key(my->_wallet_password); seq_num = 0; for (; seq_num < num_keys_to_regenerate; ++seq_num) { fc::oexception regenerate_key_error; try { const PrivateKeyType private_key = my->_wallet_db.get_account_child_key(active_private_key, seq_num); import_private_key(private_key, account_name); ++total_regenerated_key_count; } catch (const fc::exception& e) { regenerate_key_error = e; } if (regenerate_key_error.valid()) ulog("${e}", ("e", regenerate_key_error->to_detail_string())); } } // Update account last used key sequence number account_entry->last_used_gen_sequence = std::max(account_entry->last_used_gen_sequence, seq_num - 1); my->_wallet_db.store_account(*account_entry); ulog("Successfully generated ${n} keys.", ("n", total_regenerated_key_count)); my->_dirty_balances = true; my->_dirty_accounts = true; if (total_regenerated_key_count > 0) start_scan(0, -1); ulog("Key regeneration may leave the wallet in an inconsistent state."); ulog("It is recommended to create a new wallet and transfer all funds."); return total_regenerated_key_count; } FC_CAPTURE_AND_RETHROW() } int32_t Wallet::recover_accounts(int32_t number_of_accounts, int32_t max_number_of_attempts) { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); int attempts = 0; int recoveries = 0; uint32_t key_index = my->_wallet_db.get_last_wallet_child_key_index() + 1; while (recoveries < number_of_accounts && attempts++ < max_number_of_attempts) { const PrivateKeyType new_priv_key = my->_wallet_db.get_wallet_child_key(my->_wallet_password, key_index); fc::ecc::public_key new_pub_key = new_priv_key.get_public_key(); auto recovered_account = my->_blockchain->get_account_entry(new_pub_key); if (recovered_account.valid()) { my->_wallet_db.set_last_wallet_child_key_index(key_index); import_private_key(new_priv_key, recovered_account->name, true); ++recoveries; } ++key_index; } my->_dirty_accounts = true; if (recoveries) start_scan(0, -1); return recoveries; } // TODO: Rename to recover_titan_transaction_info WalletTransactionEntry Wallet::recover_transaction(const string& transaction_id_prefix, const string& recipient_account) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); auto transaction_entry = get_transaction(transaction_id_prefix); /* Only support standard transfers for now */ FC_ASSERT(transaction_entry.ledger_entries.size() == 1); auto ledger_entry = transaction_entry.ledger_entries.front(); /* In case the transaction was not saved in the entry */ if (transaction_entry.trx.operations.empty()) { const auto blockchain_transaction_entry = my->_blockchain->get_transaction(transaction_entry.entry_id, true); FC_ASSERT(blockchain_transaction_entry.valid()); transaction_entry.trx = blockchain_transaction_entry->trx; } /* Only support a single deposit */ DepositOperation deposit_op; bool has_deposit = false; for (const auto& op : transaction_entry.trx.operations) { switch (OperationTypeEnum(op.type)) { case deposit_op_type: FC_ASSERT(!has_deposit); deposit_op = op.as<DepositOperation>(); has_deposit = true; break; default: break; } } FC_ASSERT(has_deposit); /* Only support standard withdraw by signature condition with memo */ FC_ASSERT(WithdrawConditionTypes(deposit_op.condition.type) == withdraw_signature_type); const auto withdraw_condition = deposit_op.condition.as<WithdrawWithSignature>(); FC_ASSERT(withdraw_condition.memo.valid()); /* We had to have stored the one-time key */ const auto key_entry = my->_wallet_db.lookup_key(withdraw_condition.memo->one_time_key); FC_ASSERT(key_entry.valid() && key_entry->has_private_key()); const auto private_key = key_entry->decrypt_private_key(my->_wallet_password); /* Get shared secret and check memo decryption */ bool found_recipient = false; PublicKeyType recipient_public_key; ExtendedMemoData memo; if (!recipient_account.empty()) { recipient_public_key = get_owner_public_key(recipient_account); const auto shared_secret = private_key.get_shared_secret(recipient_public_key); memo = withdraw_condition.decrypt_memo_data(shared_secret); found_recipient = true; } else { const auto check_account = [&](const AccountEntry& entry) { try { recipient_public_key = entry.owner_key; // TODO: Need to check active keys as well as owner key const auto shared_secret = private_key.get_shared_secret(recipient_public_key); memo = withdraw_condition.decrypt_memo_data(shared_secret); } catch (...) { return; } found_recipient = true; FC_ASSERT(false); /* Kill scanning since we found it */ }; try { my->_blockchain->scan_unordered_accounts(check_account); } catch (...) { } } FC_ASSERT(found_recipient); /* Update ledger entry with recipient and memo info */ ledger_entry.to_account = recipient_public_key; ledger_entry.memo = memo.get_message(); transaction_entry.ledger_entries[0] = ledger_entry; my->_wallet_db.store_transaction(transaction_entry); return transaction_entry; } FC_CAPTURE_AND_RETHROW() } optional<variant_object> Wallet::verify_titan_deposit(const string& transaction_id_prefix) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); // TODO: Separate this finding logic if (transaction_id_prefix.size() < 8 || transaction_id_prefix.size() > string(TransactionIdType()).size()) FC_THROW_EXCEPTION(invalid_transaction_id, "Invalid transaction id!", ("transaction_id_prefix", transaction_id_prefix)); const TransactionIdType transaction_id = variant(transaction_id_prefix).as<TransactionIdType>(); const oTransactionEntry transaction_entry = my->_blockchain->get_transaction(transaction_id, false); if (!transaction_entry.valid()) FC_THROW_EXCEPTION(transaction_not_found, "Transaction not found!", ("transaction_id_prefix", transaction_id_prefix)); /* Only support a single deposit */ DepositOperation deposit_op; bool has_deposit = false; for (const auto& op : transaction_entry->trx.operations) { switch (OperationTypeEnum(op.type)) { case deposit_op_type: FC_ASSERT(!has_deposit); deposit_op = op.as<DepositOperation>(); has_deposit = true; break; default: break; } } FC_ASSERT(has_deposit); /* Only support standard withdraw by signature condition with memo */ FC_ASSERT(WithdrawConditionTypes(deposit_op.condition.type) == withdraw_signature_type); const WithdrawWithSignature withdraw_condition = deposit_op.condition.as<WithdrawWithSignature>(); FC_ASSERT(withdraw_condition.memo.valid()); oMessageStatus status; const map<PrivateKeyType, string> account_keys = my->_wallet_db.get_account_private_keys(my->_wallet_password); for (const auto& key_item : account_keys) { const PrivateKeyType& key = key_item.first; const string& account_name = key_item.second; status = withdraw_condition.decrypt_memo_data(key); if (status.valid()) { my->_wallet_db.cache_memo(*status, key, my->_wallet_password); mutable_variant_object info; info["from"] = variant(); info["to"] = account_name; info["amount"] = Asset(deposit_op.amount, deposit_op.condition.asset_id); info["memo"] = variant(); if (status->has_valid_signature) { const Address from_address(status->from); const oAccountEntry chain_account_entry = my->_blockchain->get_account_entry(from_address); const oWalletAccountEntry local_account_entry = my->_wallet_db.lookup_account(from_address); if (chain_account_entry.valid()) info["from"] = chain_account_entry->name; else if (local_account_entry.valid()) info["from"] = local_account_entry->name; } const string memo = status->get_message(); if (!memo.empty()) info["memo"] = memo; return variant_object(info); } } return optional<variant_object>(); } FC_CAPTURE_AND_RETHROW() } DelegatePaySalary Wallet::query_delegate_salary(const string& delegate_name) { FC_ASSERT(is_open()); FC_ASSERT(is_unlocked()); //FC_ASSERT(my->is_receive_account(delegate_name)); auto asset_rec = my->_blockchain->get_asset_entry(AssetIdType(0)); auto pending_state = my->_blockchain->get_pending_state(); auto delegate_account_entry = pending_state->get_account_entry(delegate_name); FC_ASSERT(delegate_account_entry.valid()); FC_ASSERT(delegate_account_entry->is_delegate()); //auto required_fees = get_transaction_fee(); DelegatePaySalary delepatbal; delepatbal.total_balance = delegate_account_entry->delegate_info->total_paid; delepatbal.pay_balance = delegate_account_entry->delegate_info->pay_balance; return delepatbal; } std::map<std::string, thinkyoung::blockchain::DelegatePaySalary> Wallet::query_delegate_salarys() { FC_ASSERT(is_open()); FC_ASSERT(is_unlocked()); std::map<std::string, thinkyoung::blockchain::DelegatePaySalary> Salary_Map; auto all_delegate_ids = my->_blockchain->get_delegates_by_vote(0, 99); auto asset_rec = my->_blockchain->get_asset_entry(AssetIdType(0)); auto pending_state = my->_blockchain->get_pending_state(); for (auto id : all_delegate_ids) { auto delegate_account_entry = pending_state->get_account_entry(id); FC_ASSERT(delegate_account_entry.valid()); FC_ASSERT(delegate_account_entry->is_delegate()); DelegatePaySalary delepatbal; delepatbal.total_balance = delegate_account_entry->delegate_info->total_paid; delepatbal.pay_balance = delegate_account_entry->delegate_info->pay_balance; Salary_Map[delegate_account_entry->name] = delepatbal; } return Salary_Map; } WalletTransactionEntry Wallet::withdraw_delegate_pay( const string& delegate_name, const string& real_amount_to_withdraw, const string& withdraw_to_account_name, bool sign) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); FC_ASSERT(my->is_receive_account(delegate_name)); auto asset_rec = my->_blockchain->get_asset_entry(AssetIdType(0)); auto ipos = real_amount_to_withdraw.find("."); if (ipos != string::npos) { string str = real_amount_to_withdraw.substr(ipos + 1); int64_t precision_input = static_cast<int64_t>(pow(10, str.size())); FC_ASSERT((static_cast<uint64_t>(precision_input) <= asset_rec->precision), "Precision is not correct"); } double dAmountToWithdraw = std::stod(real_amount_to_withdraw); ShareType amount_to_withdraw((ShareType)(floor(dAmountToWithdraw * asset_rec->precision + 0.5))); auto delegate_account_entry = my->_blockchain->get_account_entry(delegate_name); FC_ASSERT(delegate_account_entry.valid()); FC_ASSERT(delegate_account_entry->is_delegate()); auto required_fees = get_transaction_fee(); FC_ASSERT(delegate_account_entry->delegate_info->pay_balance >= (amount_to_withdraw + required_fees.amount), "", ("delegate_account_entry", delegate_account_entry)); SignedTransaction trx; unordered_set<Address> required_signatures; trx.expiration = blockchain::now() + get_transaction_expiration(); oWalletKeyEntry delegate_key = my->_wallet_db.lookup_key(delegate_account_entry->active_key()); FC_ASSERT(delegate_key && delegate_key->has_private_key()); const auto delegate_private_key = delegate_key->decrypt_private_key(my->_wallet_password); const auto delegate_public_key = delegate_private_key.get_public_key(); required_signatures.insert(delegate_public_key); const WalletAccountEntry receiver_account = get_account(withdraw_to_account_name); const string memo_message = "withdraw pay"; trx.withdraw_pay(delegate_account_entry->id, amount_to_withdraw + required_fees.amount); trx.deposit(receiver_account.owner_address(), Asset(amount_to_withdraw, 0)); //changed by CCW for deleting Titan Transfer // trx.deposit_to_account( receiver_account.active_key(), // asset( amount_to_withdraw, 0 ), // delegate_private_key, // memo_message, // delegate_public_key, // my->get_new_private_key( delegate_name ), // from_memo, // receiver_account.is_titan_account() // ); //my->set_delegate_slate(trx, strategy); if (sign) my->sign_transaction(trx, required_signatures); auto entry = LedgerEntry(); entry.from_account = delegate_public_key; entry.amount = Asset(amount_to_withdraw, 0); entry.memo = memo_message; entry.to_account = receiver_account.owner_key; auto trans_entry = WalletTransactionEntry(); trans_entry.ledger_entries.push_back(entry); trans_entry.fee = required_fees; trans_entry.extra_addresses.push_back(receiver_account.owner_address()); trans_entry.trx = trx; //changed by CCW for deleting Titan Transfer //if (sign) my->sign_transaction(trx, required_signatures); // // auto entry = ledger_entry(); // entry.from_account = delegate_public_key; // entry.to_account = receiver_account.active_key(), // entry.amount = asset( amount_to_withdraw ); // entry.memo = memo_message; // // auto entry = wallet_transaction_entry(); // entry.ledger_entries.push_back( entry ); // entry.fee = required_fees; // // if( sign ) // my->sign_transaction( trx, required_signatures ); // // entry.trx = trx; return trans_entry; } FC_CAPTURE_AND_RETHROW((delegate_name)(real_amount_to_withdraw)) } PublicKeyType Wallet::get_new_public_key(const string& account_name) { return my->get_new_public_key(account_name); } Address Wallet::create_new_address(const string& account_name, const string& label) { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); FC_ASSERT(my->is_receive_account(account_name)); auto addr = my->get_new_address(account_name, label); return addr; } void Wallet::set_address_label(const Address& addr, const string& label) { FC_ASSERT(false, "This doesn't do anything right now."); FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); auto okey = my->_wallet_db.lookup_key(addr); FC_ASSERT(okey.valid(), "No such address."); //FC_ASSERT( okey->btc_data.valid(), "Trying to set a label for a TITAN address." ); //okey->btc_data->label = label; my->_wallet_db.store_key(*okey); } string Wallet::get_address_label(const Address& addr) { FC_ASSERT(false, "This doesn't do anything right now."); FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); auto okey = my->_wallet_db.lookup_key(addr); //FC_ASSERT( okey.valid(), "No such address." ); //FC_ASSERT( okey->btc_data.valid(), "This address has no label (it is a TITAN address)!" ); //return okey->btc_data->label; return ""; } void Wallet::set_address_group_label(const Address& addr, const string& group_label) { FC_ASSERT(false, "This doesn't do anything right now."); FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); auto okey = my->_wallet_db.lookup_key(addr); FC_ASSERT(okey.valid(), "No such address."); //FC_ASSERT( okey->btc_data.valid(), "Trying to set a group label for a TITAN address" ); //okey->btc_data->group_label = group_label; my->_wallet_db.store_key(*okey); } string Wallet::get_address_group_label(const Address& addr) { FC_ASSERT(false, "This doesn't do anything right now."); FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); auto okey = my->_wallet_db.lookup_key(addr); FC_ASSERT(okey.valid(), "No such address."); //FC_ASSERT( okey->btc_data.valid(), "This address has no group label (it is a TITAN address)!" ); return ""; //return okey->btc_data->group_label; } vector<Address> Wallet::get_addresses_for_group_label(const string& group_label) { FC_ASSERT(false, "This doesn't do anything right now."); vector<Address> addrs; for (auto item : my->_wallet_db.get_keys()) { auto key = item.second; //if( key.btc_data.valid() && key.btc_data->group_label == group_label ) addrs.push_back(item.first); } return addrs; } ChainInterfacePtr Wallet::get_correct_state_ptr() const { if (my->_blockchain->get_is_in_sandbox()) return my->_blockchain->get_sandbox_pending_state(); return my->_blockchain; } WalletTransactionEntry Wallet::register_contract(const string& owner, const fc::path codefile, const string& asset_symbol, double init_limit, bool is_testing) { ChainInterfacePtr data_ptr = get_correct_state_ptr(); string codefile_str = codefile.string(); size_t pos; pos = codefile_str.find_last_of('.'); if ((pos == string::npos) || codefile_str.substr(pos) != ".gpc") { FC_THROW_EXCEPTION(thinkyoung::blockchain::invalid_contract_filename, "contract bytecode file name should end with .gpc"); } FC_ASSERT(init_limit > 0, "init_limit should greater than 0"); FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); FC_ASSERT(data_ptr->is_valid_symbol(asset_symbol), "Invalid asset symbol"); FC_ASSERT(my->is_receive_account(owner), "Invalid account name"); FC_ASSERT(asset_symbol == ALP_BLOCKCHAIN_SYMBOL, "asset_symbol must be ACT"); const auto asset_rec = data_ptr->get_asset_entry(asset_symbol); FC_ASSERT(asset_rec.valid(), "Asset not exist!"); FC_ASSERT(asset_rec->id == 0, "asset_symbol must be ACT"); const auto asset_id = asset_rec->id; const int64_t precision = asset_rec->precision ? asset_rec->precision : 1; //ShareType amount_limit = init_limit * precision; //Asset asset_limit(amount_limit, asset_id); Asset asset_limit = to_asset(asset_rec->id, precision, init_limit); SignedTransaction trx; unordered_set<Address> required_signatures; Code contract_code(codefile); PublicKeyType owner_public_key = get_owner_public_key(owner); Address owner_address = Address(owner_public_key); Asset register_fee = data_ptr->get_contract_register_fee(contract_code); Asset margin = data_ptr->get_default_margin(asset_id); Asset fee = get_transaction_fee(asset_limit.asset_id); map<BalanceIdType, ShareType> balances; if (!is_testing) { if (my->_blockchain->get_is_in_sandbox()) sandbox_get_enough_balances(owner, asset_limit + register_fee + fee + margin, balances, required_signatures); else get_enough_balances(owner, asset_limit + register_fee + fee + margin, balances, required_signatures); } else required_signatures.insert(owner_address); ContractIdType contract_id = trx.register_contract(contract_code, owner_public_key, asset_limit, fee, balances); //插入合约注册op FC_ASSERT(register_fee.asset_id == 0, "register fee must be ACT"); FC_ASSERT(margin.asset_id == 0, "register fee must be ACT"); FC_ASSERT(fee.asset_id == 0, "register fee must be ACT"); trx.expiration = blockchain::now() + get_transaction_expiration(); my->sign_transaction(trx, required_signatures); //for scanning contract because upgrading contract my->_dirty_contracts = true; auto trans_entry = WalletTransactionEntry(); trans_entry.fee = register_fee + fee; trans_entry.trx = trx; return trans_entry; } std::vector<thinkyoung::blockchain::Asset> Wallet::register_contract_testing(const string& owner, const fc::path codefile) { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); FC_ASSERT(my->is_receive_account(owner), "Invalid account name"); Code contract_code(codefile); if (!contract_code.valid()) { FC_CAPTURE_AND_THROW(contract_code_invalid, (codefile)); } Asset fee = get_transaction_fee(0); Asset margin = my->_blockchain->get_default_margin(0); Asset register_fee = my->_blockchain->get_contract_register_fee(contract_code); Asset asset_for_exec = my->_blockchain->get_amount(CONTRACT_TESTING_LIMIT_MAX); auto trans_entry = register_contract(owner, codefile, ALP_BLOCKCHAIN_SYMBOL, (double)asset_for_exec.amount / ALP_BLOCKCHAIN_PRECISION, true ); SignedTransaction trx; trx = trans_entry.trx; ChainInterfacePtr state_ptr = get_correct_state_ptr(); PendingChainStatePtr pend_state = std::make_shared<PendingChainState>(state_ptr); TransactionEvaluationStatePtr trx_eval_state = std::make_shared<TransactionEvaluationState>(pend_state.get()); trx_eval_state->skipexec = false; trx_eval_state->evaluate_contract_testing = true; trx_eval_state->evaluate(trx); std::vector<thinkyoung::blockchain::Asset> asset_vec; asset_vec.emplace_back(fee); asset_vec.emplace_back(margin); asset_vec.emplace_back(register_fee); asset_vec.emplace_back(trx_eval_state->exec_cost); return asset_vec; } thinkyoung::wallet::WalletTransactionEntry Wallet::call_contract(const string caller, const ContractIdType contract, const string method, const string& arguments, const string& asset_symbol, double cost_limit, bool is_testing) { ChainInterfacePtr data_ptr = get_correct_state_ptr(); if (arguments.length() > CONTRACT_PARAM_MAX_LEN) FC_CAPTURE_AND_THROW(contract_parameter_length_over_limit, ("the parameter length of contract function is over limit")); if (CallContractOperation::is_function_not_allow_call(method)) { FC_CAPTURE_AND_THROW(method_can_not_be_called_explicitly, (method)("method can't be called explicitly !")); } FC_ASSERT(cost_limit < 20000, "cost_limit should less than 20000"); FC_ASSERT(cost_limit > 0, "cost_limit should greater than 0"); FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); FC_ASSERT(data_ptr->is_valid_symbol(asset_symbol), "Invalid asset symbol"); FC_ASSERT(my->is_receive_account(caller), "Invalid account name"); FC_ASSERT(asset_symbol == ALP_BLOCKCHAIN_SYMBOL, "asset_symbol must be ACT"); const auto asset_rec = data_ptr->get_asset_entry(asset_symbol); FC_ASSERT(asset_rec.valid(), "Asset not exist!"); FC_ASSERT(asset_rec->id == 0, "asset_symbol must be ACT"); const auto asset_id = asset_rec->id; const int64_t precision = asset_rec->precision ? asset_rec->precision : 1; //ShareType amount_limit = cost_limit * precision; //Asset asset_limit(amount_limit, asset_id); Asset asset_limit = to_asset(asset_rec->id, precision, cost_limit); SignedTransaction trx; unordered_set<Address> required_signatures; PublicKeyType caller_public_key = get_owner_public_key(caller); Address caller_address = Address(caller_public_key); Asset fee = get_transaction_fee(asset_limit.asset_id); map<BalanceIdType, ShareType> balances; if (!is_testing) { if (my->_blockchain->get_is_in_sandbox()) sandbox_get_enough_balances(caller, asset_limit + fee, balances, required_signatures); else get_enough_balances(caller, asset_limit + fee, balances, required_signatures); } else required_signatures.insert(caller_address); trx.call_contract(contract, method, arguments, caller_public_key, asset_limit, fee, balances); //插入合约调用op FC_ASSERT(fee.asset_id == 0, "register fee must be ACT"); trx.expiration = blockchain::now() + get_transaction_expiration(); my->sign_transaction(trx, required_signatures); auto trans_entry = WalletTransactionEntry(); trans_entry.fee = fee; trans_entry.trx = trx; trans_entry.entry_id = trx.id(); return trans_entry; } std::vector<thinkyoung::blockchain::Asset> Wallet::call_contract_testing(const string caller, const ContractIdType contract, const string method, const string& arguments) { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); FC_ASSERT(my->is_receive_account(caller), "Invalid account name"); Asset fee = get_transaction_fee(0); Asset asset_for_exec = my->_blockchain->get_amount(CONTRACT_TESTING_LIMIT_MAX); auto trans_entry = call_contract(caller, contract, method, arguments, ALP_BLOCKCHAIN_SYMBOL, (double)asset_for_exec.amount / ALP_BLOCKCHAIN_PRECISION, true); SignedTransaction trx; trx = trans_entry.trx; ChainInterfacePtr state_ptr = get_correct_state_ptr(); PendingChainStatePtr pend_state = std::make_shared<PendingChainState>(state_ptr); TransactionEvaluationStatePtr trx_eval_state = std::make_shared<TransactionEvaluationState>(pend_state.get()); trx_eval_state->skipexec = false; trx_eval_state->evaluate_contract_testing = true; trx_eval_state->evaluate(trx); std::vector<thinkyoung::blockchain::Asset> asset_vec; asset_vec.emplace_back(fee); asset_vec.emplace_back(trx_eval_state->exec_cost); return asset_vec; } std::vector<thinkyoung::blockchain::EventOperation> Wallet::call_contract_local_emit(const string caller, const ContractIdType contract, const string method, const string& arguments) { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); FC_ASSERT(my->is_receive_account(caller), "Invalid account name"); Asset fee = get_transaction_fee(0); Asset asset_for_exec = my->_blockchain->get_amount(CONTRACT_TESTING_LIMIT_MAX); auto trans_entry = call_contract(caller, contract, method, arguments, ALP_BLOCKCHAIN_SYMBOL, (double)asset_for_exec.amount / ALP_BLOCKCHAIN_PRECISION, true); SignedTransaction trx; trx = trans_entry.trx; ChainInterfacePtr state_ptr = get_correct_state_ptr(); PendingChainStatePtr pend_state = std::make_shared<PendingChainState>(state_ptr); TransactionEvaluationStatePtr trx_eval_state = std::make_shared<TransactionEvaluationState>(pend_state.get()); trx_eval_state->skipexec = false; trx_eval_state->evaluate_contract_testing = true; trx_eval_state->evaluate(trx); vector<EventOperation> ops; for (const auto& op : trx_eval_state->p_result_trx.operations) { if (op.type == OperationTypeEnum::event_op_type) { ops.push_back(op.as<EventOperation>()); } } return ops; } std::string Wallet::call_contract_offline(const string caller, const ContractIdType contract, const string method, const string& arguments) { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); FC_ASSERT(my->is_receive_account(caller), "Invalid account name"); oContractEntry entry = my->_blockchain->get_contract_entry(contract); if (!entry.valid()) { FC_CAPTURE_AND_THROW(contract_not_exist, (contract)); } set<string>::iterator vsit = entry->code.offline_abi.begin(); while (vsit != entry->code.offline_abi.end()) { if (method == *vsit) break; vsit++; } if (vsit == entry->code.offline_abi.end()) FC_CAPTURE_AND_THROW(method_not_exist, (method)); PublicKeyType caller_public_key = get_owner_public_key(caller); Address caller_address = Address(caller_public_key); PendingChainStatePtr pend_state = std::make_shared<PendingChainState>(my->_blockchain); TransactionEvaluationStatePtr trx_eval_state = std::make_shared<TransactionEvaluationState>(pend_state.get()); GluaStateValue statevalue; statevalue.pointer_value = trx_eval_state.get(); lua::lib::GluaStateScope scope; lua::lib::add_global_string_variable(scope.L(), "caller", (((string)(caller_public_key)).c_str())); lua::lib::add_global_string_variable(scope.L(), "caller_address", ((string)(Address(caller_address))).c_str()); lua::lib::set_lua_state_value(scope.L(), "evaluate_state", statevalue, GluaStateValueType::LUA_STATE_VALUE_POINTER); lua::api::global_glua_chain_api->clear_exceptions(scope.L()); std::string result; scope.set_instructions_limit(CONTRACT_OFFLINE_LIMIT_MAX); scope.execute_contract_api_by_address(contract.AddressToString(AddressType::contract_address).c_str(), method.c_str(), arguments.c_str(), &result); int exception_code = lua::lib::get_lua_state_value(scope.L(), "exception_code").int_value; char* exception_msg = (char*)lua::lib::get_lua_state_value(scope.L(), "exception_msg").string_value; if (exception_code > 0) { thinkyoung::blockchain::contract_error con_err(32000, "exception", exception_msg); throw con_err; } return result; } void Wallet::get_enough_balances(const string& account_name, const Asset target, std::map<BalanceIdType, ShareType>& balances, unordered_set<Address>& required_signatures) { try { FC_ASSERT(!account_name.empty()); auto amount_remaining = target; const AccountBalanceEntrySummaryType balance_entrys = get_spendable_account_balance_entrys(account_name); if (balance_entrys.find(account_name) == balance_entrys.end()) FC_CAPTURE_AND_THROW(insufficient_funds, (account_name)(target)(balance_entrys)); for (const auto& entry : balance_entrys.at(account_name)) { const Asset balance = entry.get_spendable_balance(my->_blockchain->get_pending_state()->now()); if (balance.amount <= 0 || balance.asset_id != amount_remaining.asset_id) continue; const auto owner = entry.owner(); if (!owner.valid()) continue; if (amount_remaining.amount > balance.amount) { balances.insert(std::make_pair(entry.id(), balance.amount)); required_signatures.insert(*owner); amount_remaining -= balance; } else { balances.insert(std::make_pair(entry.id(), amount_remaining.amount)); required_signatures.insert(*owner); return; } } const string required = my->_blockchain->to_pretty_asset(target); const string available = my->_blockchain->to_pretty_asset(target - amount_remaining); FC_CAPTURE_AND_THROW(insufficient_funds, (required)(available)(balance_entrys)); } FC_CAPTURE_AND_RETHROW((account_name)(target)(balances)(required_signatures)) } AccountBalanceEntrySummaryType Wallet::sandbox_get_spendable_account_balance_entries(const string& account_name) { try { // set the balances state for sandbox PendingChainStatePtr sandbox_state = my->_blockchain->get_sandbox_pending_state(); if (!account_name.empty() && !sandbox_state->is_valid_account_name(account_name)) FC_THROW_EXCEPTION(invalid_name, "Invalid account name!", ("account_name", account_name)); map<string, vector<BalanceEntry>> account_balances; const time_point_sec now = sandbox_state->now(); for (auto iter = sandbox_state->_balance_id_to_entry.begin(); iter != sandbox_state->_balance_id_to_entry.end(); ++iter) { auto entry = iter->second; if (entry.condition.type != withdraw_signature_type) continue; const Asset balance = entry.get_spendable_balance(now); if (balance.amount == 0) continue; const optional<Address> owner = entry.owner(); if (!owner.valid()) continue; const oWalletKeyEntry key_entry = my->_wallet_db.lookup_key(*owner); if (!key_entry.valid() || !key_entry->has_private_key()) continue; const oWalletAccountEntry account_entry = my->_wallet_db.lookup_account(key_entry->account_address); const string name = account_entry.valid() ? account_entry->name : string(key_entry->public_key); if (!account_name.empty() && name != account_name) continue; account_balances[account_name].push_back(entry); } return account_balances; } FC_CAPTURE_AND_RETHROW() } void Wallet::sandbox_get_enough_balances(const string& account_name, const Asset target, std::map<BalanceIdType, ShareType>& balances, unordered_set<Address>& required_signatures) { try { // set the balances state for sandbox PendingChainStatePtr sandbox_state = my->_blockchain->get_sandbox_pending_state(); const AccountBalanceEntrySummaryType balance_entrys = sandbox_get_spendable_account_balance_entries(account_name); if (balance_entrys.find(account_name) == balance_entrys.end()) FC_CAPTURE_AND_THROW(insufficient_funds, (account_name)(target)(balance_entrys)); auto amount_remaining = target; for (const auto& entry : balance_entrys.at(account_name)) { const Asset balance = entry.get_spendable_balance(sandbox_state->now()); if (balance.amount <= 0 || balance.asset_id != amount_remaining.asset_id) continue; const auto owner = entry.owner(); if (!owner.valid()) continue; if (amount_remaining.amount > balance.amount) { balances.insert(std::make_pair(entry.id(), balance.amount)); required_signatures.insert(*owner); amount_remaining -= balance; } else { balances.insert(std::make_pair(entry.id(), amount_remaining.amount)); required_signatures.insert(*owner); return; } } const string required = my->_blockchain->to_pretty_asset(target); const string available = my->_blockchain->to_pretty_asset(target - amount_remaining); FC_CAPTURE_AND_THROW(insufficient_funds, (required)(available)(balance_entrys)); } FC_CAPTURE_AND_RETHROW((account_name)(target)(balances)(required_signatures)) } vector<ScriptEntry> Wallet::list_scripts() { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); vector<ScriptEntry> res; auto it = my->script_id_to_script_entry_db.unordered_begin(); auto end = my->script_id_to_script_entry_db.unordered_end(); while (it != end) { res.push_back(it->second); it++; } std::sort(res.begin(), res.end()); return res; } FC_CAPTURE_AND_RETHROW() } thinkyoung::wallet::oScriptEntry Wallet::get_script_entry(const ScriptIdType& script_id) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); auto it = my->script_id_to_script_entry_db.unordered_find(script_id); if (it != my->script_id_to_script_entry_db.unordered_end()) { return it->second; } return oScriptEntry(); } FC_CAPTURE_AND_RETHROW((script_id)) } thinkyoung::blockchain::ScriptIdType Wallet::add_script(const fc::path& filename, const string& description/*=string("")*/) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); ScriptEntry script_entry(filename, description); my->script_id_to_script_entry_db.store(script_entry.id, script_entry); return script_entry.id; } FC_CAPTURE_AND_RETHROW((filename)(description)) } void Wallet::delete_script(const ScriptIdType& script_id) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); if (my->script_id_to_script_entry_db.unordered_find(script_id) == my->script_id_to_script_entry_db.unordered_end()) FC_CAPTURE_AND_THROW(script_not_found_in_db, (script_id)); my->script_id_to_script_entry_db.remove(script_id); for (auto it = my->contract_id_event_to_script_id_vector_db.unordered_begin(); it != my->contract_id_event_to_script_id_vector_db.unordered_end(); it++) { ScriptRelationKey key = it->first; vector<ScriptIdType> vec = it->second; bool modified = false; for (auto vecit = vec.begin(); vecit!=vec.end();) { if (*vecit == script_id) { vecit = vec.erase(vecit); modified = true; } else vecit++; } if (modified) my->contract_id_event_to_script_id_vector_db.store(key, vec); } } FC_CAPTURE_AND_RETHROW((script_id)) } void Wallet::disable_script(const ScriptIdType& script_id) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); auto it = my->script_id_to_script_entry_db.unordered_find(script_id); if (it != my->script_id_to_script_entry_db.unordered_end()) { ScriptEntry entry = it->second; if (!entry.enable) FC_CAPTURE_AND_THROW(script_has_been_disabled, (script_id)); entry.enable = false; my->script_id_to_script_entry_db.store(script_id, entry); } else { FC_THROW_EXCEPTION(script_not_found_in_db, "No such script exsited"); } } FC_CAPTURE_AND_RETHROW((script_id)) } void Wallet::enable_script(const ScriptIdType& script_id) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); auto it = my->script_id_to_script_entry_db.unordered_find(script_id); if (it != my->script_id_to_script_entry_db.unordered_end()) { ScriptEntry entry = it->second; entry.enable = true; my->script_id_to_script_entry_db.store(script_id, entry); } else { FC_THROW_EXCEPTION(script_not_found_in_db, "No such script exsited"); } } FC_CAPTURE_AND_RETHROW((script_id)) } vector<ScriptIdType> Wallet::list_event_handler(const ContractIdType& contract_id, const std::string& event_type) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); auto contract_entry = my->_blockchain->get_contract_entry(contract_id); if (NOT contract_entry.valid()) FC_THROW_EXCEPTION(contract_not_exist, "contract id is not existed"); auto it = my->contract_id_event_to_script_id_vector_db.unordered_find(ScriptRelationKey(contract_id, event_type)); if (it != my->contract_id_event_to_script_id_vector_db.unordered_end()) { return it->second; } return vector<ScriptIdType>(); } FC_CAPTURE_AND_RETHROW((contract_id)(event_type)) } void Wallet::add_event_handler(const ContractIdType& contract_id, const std::string& event_type, const ScriptIdType& script_id, uint32_t index) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); auto contract_entry = my->_blockchain->get_contract_entry(contract_id); if (NOT contract_entry.valid()) FC_THROW_EXCEPTION(contract_not_exist, "contract id is not existed"); if (std::find(contract_entry->code.events.begin(), contract_entry->code.events.end(), event_type) == contract_entry->code.events.end()) FC_CAPTURE_AND_THROW(EventType_not_found, (event_type)); auto script_entry = get_script_entry(script_id); if (NOT script_entry.valid()) FC_THROW_EXCEPTION(script_not_found_in_db, "script id is not existed"); vector<ScriptIdType> script_id_vec; auto it = my->contract_id_event_to_script_id_vector_db.unordered_find(ScriptRelationKey(contract_id, event_type)); if (it != my->contract_id_event_to_script_id_vector_db.unordered_end()) { script_id_vec = it->second; for (const auto& id : script_id_vec) { if (id == script_id) FC_THROW_EXCEPTION(event_handler_existed, "Event handler existed"); } if (index > script_id_vec.size()) { script_id_vec.push_back(script_id); } else { script_id_vec.insert(script_id_vec.begin() + index, script_id); } } else { script_id_vec.push_back(script_id); } my->contract_id_event_to_script_id_vector_db.store(ScriptRelationKey(contract_id, event_type), script_id_vec); } FC_CAPTURE_AND_RETHROW((contract_id)(event_type)(script_id)(index)) } void Wallet::delete_event_handler(const ContractIdType& contract_id, const std::string& event_type, const ScriptIdType& script_id) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); auto contract_entry = my->_blockchain->get_contract_entry(contract_id); if (NOT contract_entry.valid()) FC_THROW_EXCEPTION(contract_not_exist, "contract id is not existed"); auto script_entry = get_script_entry(script_id); if (NOT script_entry.valid()) FC_THROW_EXCEPTION(script_not_found_in_db, "script id is not existed"); bool get_delete = false; vector<ScriptIdType> new_script_id_vec; auto it = my->contract_id_event_to_script_id_vector_db.unordered_find(ScriptRelationKey(contract_id, event_type)); if (it != my->contract_id_event_to_script_id_vector_db.unordered_end()) { vector<ScriptIdType> script_id_vec(it->second); for (auto iter = script_id_vec.begin(); iter != script_id_vec.end(); ++iter) { if (*iter != script_id) { new_script_id_vec.push_back(*iter); } else { get_delete = true; } } } if (get_delete == true) { my->contract_id_event_to_script_id_vector_db.store(ScriptRelationKey(contract_id, event_type), new_script_id_vec); } else { FC_CAPTURE_AND_THROW(EventHandler_not_found, (contract_id)(event_type)(script_id)); } } FC_CAPTURE_AND_RETHROW((contract_id)(event_type)(script_id)) } std::vector<std::string> Wallet::get_events_bound(const std::string& script_id) { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); ScriptIdType id(script_id, AddressType::script_id); auto it = my->contract_id_event_to_script_id_vector_db.unordered_begin(); std::vector<std::string> res; while(it!= my->contract_id_event_to_script_id_vector_db.unordered_end()) { for (auto script_id_it : it->second) { if (script_id_it == id) { res.push_back(it->first.contract_id.AddressToString(AddressType::contract_address) + "," + it->first.event_type); } } it++; } return res; } WalletTransactionEntry Wallet::transfer_asset_to_address( const string& real_amount_to_transfer, const string& amount_to_transfer_symbol, const string& from_account_name, const Address& to_address, const string& memo_message, VoteStrategy strategy, bool sign, const string& alp_account) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); FC_ASSERT(my->_blockchain->is_valid_symbol(amount_to_transfer_symbol), "Invalid asset symbol"); FC_ASSERT(my->is_receive_account(from_account_name), "Invalid account name"); const auto asset_rec = my->_blockchain->get_asset_entry(amount_to_transfer_symbol); FC_ASSERT(asset_rec.valid(), "Asset not exist!"); const auto asset_id = asset_rec->id; const int64_t precision = asset_rec->precision ? asset_rec->precision : 1; FC_ASSERT(utilities::isNumber(real_amount_to_transfer), "inputed amount is not a number"); auto ipos = real_amount_to_transfer.find("."); if (ipos != string::npos) { string str = real_amount_to_transfer.substr(ipos + 1); int64_t precision_input = static_cast<int64_t>(pow(10, str.size())); FC_ASSERT((precision_input <= precision), "Precision is not correct"); } double dAmountToTransfer = std::stod(real_amount_to_transfer); ShareType amount_to_transfer = static_cast<ShareType>(floor(dAmountToTransfer * precision + 0.5)); Asset asset_to_transfer(amount_to_transfer, asset_id); PrivateKeyType sender_private_key = get_active_private_key(from_account_name); PublicKeyType sender_public_key = sender_private_key.get_public_key(); Address sender_account_address(sender_private_key.get_public_key()); SignedTransaction trx; unordered_set<Address> required_signatures; if (alp_account != "") { //trx.from_account = from_account_name; trx.alp_account = alp_account; trx.alp_inport_asset = asset_to_transfer; } // if (memo_message != "") // { // trx.AddtionImessage(memo_message); // } const auto required_fees = get_transaction_fee(asset_to_transfer.asset_id); const auto required_imessage_fee = get_transaction_imessage_fee(memo_message); if (required_fees.asset_id == asset_to_transfer.asset_id) { my->withdraw_to_transaction(required_fees + asset_to_transfer + required_imessage_fee, from_account_name, trx, required_signatures); } else { my->withdraw_to_transaction(asset_to_transfer, from_account_name, trx, required_signatures); my->withdraw_to_transaction(required_fees + required_imessage_fee, from_account_name, trx, required_signatures); } trx.deposit(to_address, asset_to_transfer); trx.expiration = blockchain::now() + get_transaction_expiration(); my->set_delegate_slate(trx, strategy); // if( sign ) // my->sign_transaction( trx, required_signatures ); auto entry = LedgerEntry(); entry.from_account = sender_public_key; entry.amount = asset_to_transfer; if (memo_message != "") { entry.memo = memo_message; trx.AddtionImessage(memo_message); //AddtionImessage(memo_message); } else entry.memo = "To: " + string(to_address).substr(0, 8) + "..."; if (sign) my->sign_transaction(trx, required_signatures); try { auto account_rec = my->_blockchain->get_account_entry(to_address); if (account_rec.valid()) { entry.to_account = account_rec->owner_key; } else { auto acc_rec = get_account_for_address(to_address); if (acc_rec.valid()) { entry.to_account = acc_rec->owner_key; } } } catch (...) { } auto trans_entry = WalletTransactionEntry(); trans_entry.ledger_entries.push_back(entry); trans_entry.fee = required_fees + required_imessage_fee; trans_entry.extra_addresses.push_back(to_address); trans_entry.trx = trx; return trans_entry; } FC_CAPTURE_AND_RETHROW((real_amount_to_transfer)(amount_to_transfer_symbol)(from_account_name)(to_address)(memo_message)) } // common account -> contract account (contract balance) thinkyoung::wallet::WalletTransactionEntry Wallet::transfer_asset_to_contract( double real_amount_to_transfer, const string& amount_to_transfer_symbol, const string& from_account_name, const Address& to_contract_address, double exec_cost, bool sign, bool is_testing) { try { ChainInterfacePtr data_ptr = get_correct_state_ptr(); FC_ASSERT(exec_cost >= 0, "exec_cost should greater or equal than 0"); FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); FC_ASSERT(amount_to_transfer_symbol == ALP_BLOCKCHAIN_SYMBOL, "Asset symbol should be ACT"); FC_ASSERT(data_ptr->is_valid_symbol(amount_to_transfer_symbol), "Invalid asset symbol"); FC_ASSERT(my->is_receive_account(from_account_name), "Invalid account name"); const auto asset_rec = data_ptr->get_asset_entry(amount_to_transfer_symbol); FC_ASSERT(asset_rec.valid(), "Asset not exist!"); const auto asset_id = asset_rec->id; FC_ASSERT(asset_id == 0, "Asset symbol should be ACT"); const int64_t precision = asset_rec->precision ? asset_rec->precision : 1; //ShareType amount_to_transfer = real_amount_to_transfer * precision; //Asset asset_to_transfer(amount_to_transfer, asset_id); FC_ASSERT(real_amount_to_transfer>=1.0/precision, "transfer amount must bigger than 0"); Asset asset_to_transfer = to_asset(asset_rec->id, precision, real_amount_to_transfer); //ShareType amount_for_exec = exec_cost * precision; //Asset asset_for_exec(amount_for_exec, asset_id); Asset asset_for_exec = to_asset(asset_rec->id, precision, exec_cost); PublicKeyType sender_public_key = get_owner_public_key(from_account_name); Address sender_account_address = Address(sender_public_key); SignedTransaction trx; unordered_set<Address> required_signatures; const auto required_fees = get_transaction_fee(asset_to_transfer.asset_id); map<BalanceIdType, ShareType> balances; if (!is_testing) { if (my->_blockchain->get_is_in_sandbox()) sandbox_get_enough_balances(from_account_name, required_fees + asset_to_transfer + asset_for_exec, balances, required_signatures); else get_enough_balances(from_account_name, required_fees + asset_to_transfer + asset_for_exec, balances, required_signatures); } else required_signatures.insert(sender_account_address); my->transfer_to_contract_trx(trx, to_contract_address, asset_to_transfer, asset_for_exec, required_fees, sender_public_key, balances); //trx.deposit_to_contract(to_contract_address, asset_to_transfer); trx.expiration = blockchain::now() + get_transaction_expiration(); if (sign) my->sign_transaction(trx, required_signatures); auto entry = LedgerEntry(); entry.from_account = sender_public_key; entry.amount = asset_to_transfer; auto trans_entry = WalletTransactionEntry(); trans_entry.entry_id = trx.id(); trans_entry.ledger_entries.push_back(entry); trans_entry.fee = required_fees; trans_entry.extra_addresses.push_back(to_contract_address); trans_entry.trx = trx; return trans_entry; } FC_CAPTURE_AND_RETHROW((real_amount_to_transfer)(amount_to_transfer_symbol)(from_account_name)(to_contract_address)) } std::vector<thinkyoung::blockchain::Asset> Wallet::transfer_asset_to_contract_testing( double real_amount_to_transfer, const string& amount_to_transfer_symbol, const string& from_account_name, const Address& to_contract_address, bool sign) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); FC_ASSERT(amount_to_transfer_symbol == ALP_BLOCKCHAIN_SYMBOL, "Asset symbol should be ACT"); FC_ASSERT(my->_blockchain->is_valid_symbol(amount_to_transfer_symbol), "Invalid asset symbol"); FC_ASSERT(my->is_receive_account(from_account_name), "Invalid account name"); const auto asset_rec = my->_blockchain->get_asset_entry(amount_to_transfer_symbol); FC_ASSERT(asset_rec.valid(), "Asset not exist!"); const auto asset_id = asset_rec->id; FC_ASSERT(asset_id == 0, "Asset symbol should be ACT"); const int64_t precision = asset_rec->precision ? asset_rec->precision : 1; ShareType amount_to_transfer = real_amount_to_transfer * precision; Asset asset_to_transfer(amount_to_transfer, asset_id); Asset asset_for_exec = my->_blockchain->get_amount(CONTRACT_TESTING_LIMIT_MAX); Asset required_fees = get_transaction_fee(asset_to_transfer.asset_id); SignedTransaction trx; auto trans_entry = transfer_asset_to_contract(real_amount_to_transfer, amount_to_transfer_symbol, from_account_name, to_contract_address, (double)asset_for_exec.amount / ALP_BLOCKCHAIN_PRECISION, true, true); trx = trans_entry.trx; ChainInterfacePtr state_ptr = get_correct_state_ptr(); PendingChainStatePtr pend_state = std::make_shared<PendingChainState>(state_ptr); TransactionEvaluationStatePtr trx_eval_state = std::make_shared<TransactionEvaluationState>(pend_state.get()); trx_eval_state->skipexec = false; trx_eval_state->evaluate_contract_testing = true; trx_eval_state->evaluate(trx); std::vector<thinkyoung::blockchain::Asset> asset_vec; asset_vec.emplace_back(required_fees); asset_vec.emplace_back(asset_to_transfer); asset_vec.emplace_back(trx_eval_state->exec_cost); return asset_vec; } FC_CAPTURE_AND_RETHROW((real_amount_to_transfer)(amount_to_transfer_symbol)(from_account_name)(to_contract_address)) } std::vector<thinkyoung::blockchain::Asset> Wallet::upgrade_contract_testing( const Address& contract_id, const string& upgrader_name, const string& new_contract_name, const string& new_contract_desc, bool sign ) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); ChainInterfacePtr data_ptr = get_correct_state_ptr(); oContractEntry contract_entry = data_ptr->get_contract_entry(contract_id); PublicKeyType upgrader_owner_key = get_owner_public_key(upgrader_name); if (!contract_entry.valid()) FC_CAPTURE_AND_THROW(contract_not_exist, (contract_id)); if (NOT data_ptr->is_temporary_contract(contract_entry->level)) FC_CAPTURE_AND_THROW(contract_upgraded, (contract_id)); if (data_ptr->is_destroyed_contract(contract_entry->state)) FC_CAPTURE_AND_THROW(contract_destroyed, (contract_id)); if (upgrader_owner_key != contract_entry->owner) FC_CAPTURE_AND_THROW(contract_no_permission, (upgrader_name)); if (NOT data_ptr->is_valid_contract_name(new_contract_name)) FC_CAPTURE_AND_THROW(contract_name_illegal, (new_contract_name)); if (NOT data_ptr->is_valid_contract_description(new_contract_desc)) FC_CAPTURE_AND_THROW(contract_desc_illegal, (new_contract_desc)); if (data_ptr->get_contract_entry(new_contract_name).valid()) FC_CAPTURE_AND_THROW(contract_name_in_use, (new_contract_name)); // check contract margin BalanceIdType margin_balance_id = data_ptr->get_balanceid(contract_entry->id, WithdrawBalanceTypes::withdraw_margin_type); oBalanceEntry margin_balance_entry = data_ptr->get_balance_entry(margin_balance_id); FC_ASSERT(margin_balance_entry.valid(), "invalid margin balance id"); FC_ASSERT(margin_balance_entry->asset_id() == 0, "invalid margin balance asset type"); if (margin_balance_entry->balance != ALP_DEFAULT_CONTRACT_MARGIN) FC_CAPTURE_AND_THROW(invalid_margin_amount, (margin_balance_entry->balance)); Asset margin = Asset(ALP_DEFAULT_CONTRACT_MARGIN); Asset fee = get_transaction_fee(0); Asset asset_for_exec = my->_blockchain->get_amount(CONTRACT_TESTING_LIMIT_MAX); auto trans_entry = upgrade_contract(contract_id, upgrader_name, new_contract_name, new_contract_desc, ALP_BLOCKCHAIN_SYMBOL, (double)asset_for_exec.amount / ALP_BLOCKCHAIN_PRECISION, sign, true ); SignedTransaction trx = trans_entry.trx; ChainInterfacePtr state_ptr = get_correct_state_ptr(); PendingChainStatePtr pend_state = std::make_shared<PendingChainState>(state_ptr); TransactionEvaluationStatePtr trx_eval_state = std::make_shared<TransactionEvaluationState>(pend_state.get()); trx_eval_state->skipexec = false; trx_eval_state->evaluate_contract_testing = true; trx_eval_state->evaluate(trx); std::vector<thinkyoung::blockchain::Asset> asset_vec; asset_vec.emplace_back(fee); asset_vec.emplace_back(trx_eval_state->exec_cost); asset_vec.emplace_back(margin); return asset_vec; } FC_CAPTURE_AND_RETHROW((contract_id)(upgrader_name)(new_contract_name)(new_contract_desc)) } WalletTransactionEntry Wallet::upgrade_contract( const Address& contract_id, const string& upgrader_name, const string& new_contract_name, const string& new_contract_desc, const std::string& asset_symbol, const double exec_limit, bool sign, bool is_testing) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); PublicKeyType upgrader_owner_key = get_owner_public_key(upgrader_name); Address upgrader_owner_address = Address(upgrader_owner_key); SignedTransaction trx; unordered_set<Address> required_signatures; ChainInterfacePtr data_ptr = get_correct_state_ptr(); const auto required_fees = get_transaction_fee(); oContractEntry contract_entry = data_ptr->get_contract_entry(contract_id); if (!contract_entry.valid()) FC_CAPTURE_AND_THROW(contract_not_exist, (contract_id)); if (NOT data_ptr->is_temporary_contract(contract_entry->level)) FC_CAPTURE_AND_THROW(contract_upgraded, (contract_id)); if (data_ptr->is_destroyed_contract(contract_entry->state)) FC_CAPTURE_AND_THROW(contract_destroyed, (contract_id)); if (upgrader_owner_key != contract_entry->owner) FC_CAPTURE_AND_THROW(contract_no_permission, (upgrader_name)); if (NOT data_ptr->is_valid_contract_name(new_contract_name)) FC_CAPTURE_AND_THROW(contract_name_illegal, (new_contract_name)); if (NOT data_ptr->is_valid_contract_description(new_contract_desc)) FC_CAPTURE_AND_THROW(contract_desc_illegal, (new_contract_desc)); if (data_ptr->get_contract_entry(new_contract_name).valid()) FC_CAPTURE_AND_THROW(contract_name_in_use, (new_contract_name)); const auto asset_rec = data_ptr->get_asset_entry(asset_symbol); FC_ASSERT(asset_rec.valid(), "Asset not exist!"); FC_ASSERT(asset_rec->id == 0, "asset_symbol must be ACT"); const auto asset_id = asset_rec->id; const int64_t precision = asset_rec->precision ? asset_rec->precision : 1; //ShareType amount_limit = cost_limit * precision; //Asset asset_limit(amount_limit, asset_id); Asset asset_limit = to_asset(asset_rec->id, precision, exec_limit); map<BalanceIdType, ShareType> balances; if (!is_testing) { if (my->_blockchain->get_is_in_sandbox()) sandbox_get_enough_balances(upgrader_name, required_fees + asset_limit, balances, required_signatures); else get_enough_balances(upgrader_name, required_fees + asset_limit, balances, required_signatures); } else required_signatures.insert(upgrader_owner_address); trx.upgrade_contract(contract_id, new_contract_name, new_contract_desc, asset_limit, required_fees, balances); BalanceIdType margin_balance_id = data_ptr->get_balanceid(contract_id, WithdrawBalanceTypes::withdraw_margin_type); oBalanceEntry balance_entry = data_ptr->get_balance_entry(margin_balance_id); FC_ASSERT(balance_entry.valid(), "Can not get margin balance!"); FC_ASSERT(balance_entry->asset_id() == 0, "margin balance asset is not ACT!"); Asset margin_balance(balance_entry->balance, balance_entry->asset_id()); trx.expiration = blockchain::now() + get_transaction_expiration(); if (sign) my->sign_transaction(trx, required_signatures); //for scanning contract because upgrading contract my->_dirty_contracts = true; auto entry = LedgerEntry(); auto trans_entry = WalletTransactionEntry(); trans_entry.ledger_entries.push_back(entry); trans_entry.fee = required_fees + margin_balance; trans_entry.extra_addresses.push_back(contract_id); trans_entry.trx = trx; return trans_entry; } FC_CAPTURE_AND_RETHROW((contract_id)(upgrader_name)(new_contract_name)(new_contract_desc)) } std::vector<thinkyoung::blockchain::Asset> Wallet::destroy_contract_testing( const Address& contract_id, const string& destroyer_name, bool sign ) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); ChainInterfacePtr data_ptr = get_correct_state_ptr(); const auto required_fees = get_transaction_fee(); PublicKeyType destroyer_owner_key = get_owner_public_key(destroyer_name); oContractEntry contract_entry = data_ptr->get_contract_entry(contract_id); if (!contract_entry.valid()) FC_CAPTURE_AND_THROW(contract_not_exist, (contract_id)); if (data_ptr->is_destroyed_contract(contract_entry->state)) FC_CAPTURE_AND_THROW(contract_destroyed, (contract_id)); if (NOT data_ptr->is_temporary_contract(contract_entry->level)) FC_CAPTURE_AND_THROW(permanent_contract, (contract_id)); if (destroyer_owner_key != contract_entry->owner) FC_CAPTURE_AND_THROW(contract_no_permission, (destroyer_name)); Asset fee = get_transaction_fee(0); Asset asset_for_exec = my->_blockchain->get_amount(CONTRACT_TESTING_LIMIT_MAX); auto trans_entry = destroy_contract(contract_id, destroyer_name, ALP_BLOCKCHAIN_SYMBOL, (double)asset_for_exec.amount / ALP_BLOCKCHAIN_PRECISION, sign, true ); SignedTransaction trx = trans_entry.trx; ChainInterfacePtr state_ptr = get_correct_state_ptr(); PendingChainStatePtr pend_state = std::make_shared<PendingChainState>(state_ptr); TransactionEvaluationStatePtr trx_eval_state = std::make_shared<TransactionEvaluationState>(pend_state.get()); trx_eval_state->skipexec = false; trx_eval_state->evaluate_contract_testing = true; trx_eval_state->evaluate(trx); std::vector<thinkyoung::blockchain::Asset> asset_vec; asset_vec.emplace_back(fee); asset_vec.emplace_back(trx_eval_state->exec_cost); return asset_vec; } FC_CAPTURE_AND_RETHROW((contract_id)(destroyer_name)) } WalletTransactionEntry Wallet::destroy_contract( const Address& contract_id, const string& destroyer_name, const std::string& asset_symbol, double exec_limit, bool sign, bool is_testing) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); PublicKeyType destroyer_owner_key = get_owner_public_key(destroyer_name); Address destroyer_owner_address = Address(destroyer_owner_key); SignedTransaction trx; unordered_set<Address> required_signatures; ChainInterfacePtr data_ptr = get_correct_state_ptr(); const auto required_fees = get_transaction_fee(); oContractEntry contract_entry = data_ptr->get_contract_entry(contract_id); if (!contract_entry.valid()) FC_CAPTURE_AND_THROW(contract_not_exist, (contract_id)); if (data_ptr->is_destroyed_contract(contract_entry->state)) FC_CAPTURE_AND_THROW(contract_destroyed, (contract_id)); if (NOT data_ptr->is_temporary_contract(contract_entry->level)) FC_CAPTURE_AND_THROW(permanent_contract, (contract_id)); if (destroyer_owner_key != contract_entry->owner) FC_CAPTURE_AND_THROW(contract_no_permission, (destroyer_name)); const auto asset_rec = data_ptr->get_asset_entry(asset_symbol); FC_ASSERT(asset_rec.valid(), "Asset not exist!"); FC_ASSERT(asset_rec->id == 0, "asset_symbol must be ACT"); const auto asset_id = asset_rec->id; const int64_t precision = asset_rec->precision ? asset_rec->precision : 1; //ShareType amount_limit = cost_limit * precision; //Asset asset_limit(amount_limit, asset_id); Asset asset_limit = to_asset(asset_rec->id, precision, exec_limit); map<BalanceIdType, ShareType> balances; if (!is_testing) { if (my->_blockchain->get_is_in_sandbox()) sandbox_get_enough_balances(destroyer_name, required_fees + asset_limit, balances, required_signatures); else get_enough_balances(destroyer_name, required_fees + asset_limit, balances, required_signatures); } else required_signatures.insert(destroyer_owner_address); trx.destroy_contract(contract_id, asset_limit, required_fees, balances); BalanceIdType margin_balance_id = data_ptr->get_balanceid(contract_id, WithdrawBalanceTypes::withdraw_margin_type); oBalanceEntry balance_entry = data_ptr->get_balance_entry(margin_balance_id); FC_ASSERT(balance_entry.valid(), "Can not get margin balance!"); FC_ASSERT(balance_entry->asset_id() == 0, "margin balance asset is not ACT!"); FC_ASSERT(balance_entry->balance == ALP_DEFAULT_CONTRACT_MARGIN, "invalid margin balance amount"); BalanceIdType contract_balance_id = data_ptr->get_balanceid(contract_id, WithdrawBalanceTypes::withdraw_contract_type); balance_entry = data_ptr->get_balance_entry(contract_balance_id); if (balance_entry.valid()) { FC_ASSERT(balance_entry->asset_id() == 0, "contract balance asset is not ACT!"); } trx.expiration = blockchain::now() + get_transaction_expiration(); if (sign) my->sign_transaction(trx, required_signatures); //for scanning contract because destroying contract my->_dirty_contracts = true; auto entry = LedgerEntry(); auto trans_entry = WalletTransactionEntry(); trans_entry.ledger_entries.push_back(entry); trans_entry.fee = required_fees; trans_entry.extra_addresses.push_back(contract_id); trans_entry.trx = trx; return trans_entry; } FC_CAPTURE_AND_RETHROW((contract_id)(destroyer_name)) } WalletTransactionEntry Wallet::transfer_asset_to_many_address( const string& amount_to_transfer_symbol, const string& from_account_name, const std::unordered_map<Address, double>& to_address_amounts, const string& memo_message, bool sign) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); FC_ASSERT(my->_blockchain->is_valid_symbol(amount_to_transfer_symbol)); FC_ASSERT(my->is_receive_account(from_account_name)); FC_ASSERT(to_address_amounts.size() > 0); auto asset_rec = my->_blockchain->get_asset_entry(amount_to_transfer_symbol); FC_ASSERT(asset_rec.valid()); auto asset_id = asset_rec->id; PrivateKeyType sender_private_key = get_active_private_key(from_account_name); PublicKeyType sender_public_key = sender_private_key.get_public_key(); Address sender_account_address(sender_private_key.get_public_key()); SignedTransaction trx; unordered_set<Address> required_signatures; trx.expiration = blockchain::now() + get_transaction_expiration(); Asset total_asset_to_transfer(0, asset_id); auto required_fees = get_transaction_fee(); vector<Address> to_addresses; for (const auto& address_amount : to_address_amounts) { auto real_amount_to_transfer = address_amount.second; ShareType amount_to_transfer((ShareType)(real_amount_to_transfer * asset_rec->precision)); Asset asset_to_transfer(amount_to_transfer, asset_id); my->withdraw_to_transaction(asset_to_transfer, from_account_name, trx, required_signatures); total_asset_to_transfer += asset_to_transfer; trx.deposit(address_amount.first, asset_to_transfer); to_addresses.push_back(address_amount.first); } my->withdraw_to_transaction(required_fees, from_account_name, trx, required_signatures); auto entry = LedgerEntry(); entry.from_account = sender_public_key; entry.amount = total_asset_to_transfer; if (memo_message != "") entry.memo = memo_message; else entry.memo = "Transfer to many addresses"; auto trans_entry = WalletTransactionEntry(); trans_entry.ledger_entries.push_back(entry); trans_entry.fee = required_fees; trans_entry.extra_addresses = to_addresses; if (sign) my->sign_transaction(trx, required_signatures); trans_entry.trx = trx; return trans_entry; } FC_CAPTURE_AND_RETHROW((amount_to_transfer_symbol)(from_account_name)(to_address_amounts)(memo_message)) } WalletTransactionEntry Wallet::register_account( const string& account_to_register, const variant& public_data, uint8_t delegate_pay_rate, const string& pay_with_account_name, AccountType new_account_type, bool sign) { try { if (!my->_blockchain->is_valid_account_name(account_to_register)) FC_THROW_EXCEPTION(invalid_name, "Invalid account name!", ("account_to_register", account_to_register)); if (delegate_pay_rate < 100) { FC_THROW_EXCEPTION(invalid_delegate_pay_rate, "invalid_delegate_pay_rate", ("delegate_pay_rate", delegate_pay_rate)); } FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); const auto registered_account = my->_blockchain->get_account_entry(account_to_register); if (registered_account.valid()) FC_THROW_EXCEPTION(duplicate_account_name, "This account name has already been registered!"); const auto payer_public_key = get_owner_public_key(pay_with_account_name); Address from_account_address(payer_public_key); const auto account_public_key = get_owner_public_key(account_to_register); SignedTransaction trx; unordered_set<Address> required_signatures; trx.expiration = blockchain::now() + get_transaction_expiration(); optional<AccountMetaInfo> meta_info = AccountMetaInfo(new_account_type); // TODO: This is a hack to register with different owner and active keys until the API is fixed try { const WalletAccountEntry local_account = get_account(account_to_register); trx.register_account(account_to_register, public_data, local_account.owner_key, local_account.active_key(), delegate_pay_rate <= 100 ? delegate_pay_rate : -1, meta_info); } catch (...) { trx.register_account(account_to_register, public_data, account_public_key, // master account_public_key, // active delegate_pay_rate <= 100 ? delegate_pay_rate : -1, meta_info); } auto required_fees = get_transaction_fee(); bool as_delegate = false; if (delegate_pay_rate <= 100) { required_fees += Asset(my->_blockchain->get_delegate_registration_fee(delegate_pay_rate), 0); as_delegate = true; } else { #if 0 required_fees += Asset(ALP_BLOCKCHAIN_REGISTER_ACCOUNT_FEE, 0); #endif } my->withdraw_to_transaction(required_fees, pay_with_account_name, trx, required_signatures); auto entry = LedgerEntry(); entry.from_account = payer_public_key; entry.to_account = account_public_key; entry.memo = "register " + account_to_register + (as_delegate ? " as a delegate" : ""); auto trans_entry = WalletTransactionEntry(); trans_entry.ledger_entries.push_back(entry); trans_entry.fee = required_fees; if (sign) my->sign_transaction(trx, required_signatures); trans_entry.trx = trx; return trans_entry; } FC_CAPTURE_AND_RETHROW((account_to_register)(public_data)(pay_with_account_name)(delegate_pay_rate)) } WalletTransactionEntry Wallet::create_asset( const string& symbol, const string& asset_name, const string& description, const variant& data, const string& issuer_account_name, const string& max_share_supply, uint64_t precision, bool is_market_issued, bool sign) { try { FC_ASSERT(blockchain::is_power_of_ten(precision)); FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); FC_ASSERT(my->_blockchain->is_valid_symbol_name(symbol)); // valid length and characters FC_ASSERT(asset_name.size() <= ALP_BLOCKCHAIN_MAX_SYMBOL_NAME_SIZE, "Asset name too big"); FC_ASSERT(description.size() <= ALP_BLOCKCHAIN_MAX_SYMBOL_DES_SIZE, "Asset description too big"); FC_ASSERT(!my->_blockchain->is_valid_symbol(symbol)); // not yet registered SignedTransaction trx; unordered_set<Address> required_signatures; trx.expiration = blockchain::now() + get_transaction_expiration(); auto required_fees = get_transaction_fee(); required_fees += Asset(my->_blockchain->get_asset_registration_fee(symbol.size()), 0); if (!my->_blockchain->is_valid_account_name(issuer_account_name)) FC_THROW_EXCEPTION(invalid_name, "Invalid account name!", ("issuer_account_name", issuer_account_name)); auto from_account_address = get_owner_public_key(issuer_account_name); auto oname_rec = my->_blockchain->get_account_entry(issuer_account_name); if (!oname_rec.valid()) FC_THROW_EXCEPTION(account_not_registered, "Assets can only be created by registered accounts", ("issuer_account_name", issuer_account_name)); my->withdraw_to_transaction(required_fees, issuer_account_name, trx, required_signatures); required_signatures.insert(oname_rec->active_key()); //check this way to avoid overflow auto ipos = max_share_supply.find("."); if (ipos != string::npos) { FC_ASSERT(false, "Asset supply must be integer"); #if 0 string str = max_share_supply.substr(ipos + 1); int64_t precision_input = static_cast<int64_t>(pow(10, str.size())); FC_ASSERT((static_cast<uint64_t>(precision_input) <= precision), "Precision is not correct"); #endif } double dAmountToCreate = std::stod(max_share_supply); ShareType max_share_supply_in_internal_units = floor(dAmountToCreate * precision + 0.5); FC_ASSERT(ALP_BLOCKCHAIN_MAX_SHARES > max_share_supply_in_internal_units); if (NOT is_market_issued) { trx.create_asset(symbol, asset_name, description, data, oname_rec->id, max_share_supply_in_internal_units, precision); } else { trx.create_asset(symbol, asset_name, description, data, AssetEntry::market_issuer_id, max_share_supply_in_internal_units, precision); } auto entry = LedgerEntry(); entry.from_account = from_account_address; entry.to_account = from_account_address; entry.memo = "create " + symbol + " (" + asset_name + ")"; auto trans_entry = WalletTransactionEntry(); trans_entry.ledger_entries.push_back(entry); trans_entry.fee = required_fees; if (sign) my->sign_transaction(trx, required_signatures); trans_entry.trx = trx; return trans_entry; } FC_CAPTURE_AND_RETHROW((symbol)(asset_name)(description)(issuer_account_name)) } WalletTransactionEntry Wallet::update_asset( const string& symbol, const optional<string>& name, const optional<string>& description, const optional<variant>& public_data, const optional<double>& maximum_share_supply, const optional<uint64_t>& precision, const ShareType issuer_fee, double market_fee, uint32_t flags, uint32_t issuer_perms, const string& issuer_account_name, uint32_t required_sigs, const vector<Address>& authority, bool sign ) { try { if (NOT is_open()) FC_CAPTURE_AND_THROW(wallet_closed); if (NOT is_unlocked()) FC_CAPTURE_AND_THROW(wallet_locked); optional<AccountIdType> issuer_account_id; if (issuer_account_name != "") { auto issuer_account = my->_blockchain->get_account_entry(issuer_account_name); FC_ASSERT(issuer_account.valid()); issuer_account_id = issuer_account->id; } TransactionBuilderPtr builder = create_transaction_builder(); builder->update_asset(symbol, name, description, public_data, maximum_share_supply, precision, issuer_fee, market_fee, flags, issuer_perms, issuer_account_id, required_sigs, authority); builder->finalize(); if (sign) return builder->sign(); return builder->transaction_entry; } FC_CAPTURE_AND_RETHROW((symbol)(name)(description)(public_data)(precision)(issuer_fee)(restricted)(retractable)(required_sigs)(authority)(sign)) } WalletTransactionEntry Wallet::issue_asset( const string& amount_to_issue, const string& symbol, const string& to_account_name, const string& memo_message, bool sign) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); FC_ASSERT(my->_blockchain->is_valid_symbol(symbol)); SignedTransaction trx; unordered_set<Address> required_signatures; trx.expiration = blockchain::now() + get_transaction_expiration(); auto required_fees = get_transaction_fee(); auto asset_entry = my->_blockchain->get_asset_entry(symbol); FC_ASSERT(asset_entry.valid(), "no such asset entry"); auto issuer_account = my->_blockchain->get_account_entry(asset_entry->issuer_account_id); FC_ASSERT(issuer_account, "uh oh! no account for valid asset"); auto authority = asset_entry->authority; auto ipos = amount_to_issue.find("."); if (ipos != string::npos) { string str = amount_to_issue.substr(ipos + 1); int64_t precision_input = static_cast<int64_t>(pow(10, str.size())); FC_ASSERT((static_cast<uint32_t>(precision_input) <= asset_entry->precision), "Precision is not correct"); } double dAmountToIssue = std::stod(amount_to_issue); Asset shares_to_issue(static_cast<ShareType>(floor(dAmountToIssue * asset_entry->precision + 0.5)), asset_entry->id); my->withdraw_to_transaction(required_fees, issuer_account->name, trx, required_signatures); trx.issue(shares_to_issue); for (auto owner : authority.owners) required_signatures.insert(owner); oWalletAccountEntry issuer = my->_wallet_db.lookup_account(asset_entry->issuer_account_id); FC_ASSERT(issuer.valid()); oWalletKeyEntry issuer_key = my->_wallet_db.lookup_key(issuer->active_address()); FC_ASSERT(issuer_key && issuer_key->has_private_key()); auto sender_private_key = issuer_key->decrypt_private_key(my->_wallet_password); const WalletAccountEntry receiver_account = get_account(to_account_name); trx.deposit_to_account(receiver_account.active_key(), shares_to_issue, sender_private_key, memo_message, issuer->active_key(), my->get_new_private_key(issuer_account->name), from_memo, receiver_account.is_titan_account() ); auto entry = LedgerEntry(); entry.from_account = issuer->active_key(); entry.to_account = receiver_account.active_key(); entry.amount = shares_to_issue; entry.memo = "issue " + my->_blockchain->to_pretty_asset(shares_to_issue); auto trans_entry = WalletTransactionEntry(); trans_entry.ledger_entries.push_back(entry); trans_entry.fee = required_fees; if (sign) my->sign_transaction(trx, required_signatures); trans_entry.trx = trx; return trans_entry; } FC_CAPTURE_AND_RETHROW() } WalletTransactionEntry Wallet::issue_asset_to_addresses( const string& symbol, const map<string, ShareType>& addresses) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); FC_ASSERT(my->_blockchain->is_valid_symbol(symbol)); SignedTransaction trx; unordered_set<Address> required_signatures; trx.expiration = blockchain::now() + get_transaction_expiration(); auto required_fees = get_transaction_fee(); auto asset_entry = my->_blockchain->get_asset_entry(symbol); FC_ASSERT(asset_entry.valid(), "no such asset entry"); auto issuer_account = my->_blockchain->get_account_entry(asset_entry->issuer_account_id); FC_ASSERT(issuer_account, "uh oh! no account for valid asset"); auto authority = asset_entry->authority; Asset shares_to_issue(0, asset_entry->id); for (auto pair : addresses) { auto addr = Address(pair.first); auto amount = Asset(pair.second, asset_entry->id); trx.deposit(addr, amount); shares_to_issue += amount; } my->withdraw_to_transaction(required_fees, issuer_account->name, trx, required_signatures); trx.issue(shares_to_issue); for (auto owner : authority.owners) required_signatures.insert(owner); // required_signatures.insert( issuer_account->active_key() ); oWalletAccountEntry issuer = my->_wallet_db.lookup_account(asset_entry->issuer_account_id); FC_ASSERT(issuer.valid()); oWalletKeyEntry issuer_key = my->_wallet_db.lookup_key(issuer->owner_address()); FC_ASSERT(issuer_key && issuer_key->has_private_key()); auto entry = LedgerEntry(); entry.from_account = issuer->active_key(); entry.amount = shares_to_issue; entry.memo = "issue to many addresses"; auto trans_entry = WalletTransactionEntry(); trans_entry.ledger_entries.push_back(entry); trans_entry.fee = required_fees; // if( sign ) my->sign_transaction(trx, required_signatures); trans_entry.trx = trx; return trans_entry; } FC_CAPTURE_AND_RETHROW() } void Wallet::update_account_private_data(const string& account_to_update, const variant& private_data) { get_account(account_to_update); /* Just to check input */ auto oacct = my->_wallet_db.lookup_account(account_to_update); FC_ASSERT(oacct.valid()); oacct->private_data = private_data; my->_wallet_db.store_account(*oacct); } WalletTransactionEntry Wallet::update_registered_account( const string& account_to_update, const string& pay_from_account, optional<variant> public_data, uint8_t delegate_pay_rate, bool sign) { try { FC_ASSERT(is_unlocked(), "Wallet not unlock!"); if (delegate_pay_rate < 100) { FC_THROW_EXCEPTION(invalid_delegate_pay_rate, "invalid_delegate_pay_rate", ("delegate_pay_rate", delegate_pay_rate)); } auto account = get_account(account_to_update); oWalletAccountEntry payer; if (!pay_from_account.empty()) payer = get_account(pay_from_account); optional<uint8_t> pay; if (delegate_pay_rate <= 100) pay = delegate_pay_rate; TransactionBuilderPtr builder = create_transaction_builder(); builder->update_account_registration(account, public_data, optional<PublicKeyType>(), pay, payer). finalize(); if (sign) return builder->sign(); return builder->transaction_entry; } FC_CAPTURE_AND_RETHROW((account_to_update)(pay_from_account)(public_data)(sign)) } WalletTransactionEntry Wallet::update_active_key( const std::string& account_to_update, const std::string& pay_from_account, const std::string& new_active_key, bool sign) { try { FC_ASSERT(is_unlocked(), "Wallet not unlock!"); auto account = get_account(account_to_update); oWalletAccountEntry payer; if (!pay_from_account.empty()) payer = get_account(pay_from_account); PublicKeyType new_public_key; if (new_active_key.empty()) { new_public_key = my->get_new_public_key(account_to_update); } else { const optional<PrivateKeyType> new_private_key = utilities::wif_to_key(new_active_key); FC_ASSERT(new_private_key.valid(), "Unable to parse new active key."); new_public_key = import_private_key(*new_private_key, account_to_update, false); } TransactionBuilderPtr builder = create_transaction_builder(); builder->update_account_registration(account, optional<variant>(), new_public_key, optional<ShareType>(), payer). finalize(); if (sign) { my->_dirty_accounts = true; return builder->sign(); } return builder->transaction_entry; } FC_CAPTURE_AND_RETHROW((account_to_update)(pay_from_account)(sign)) } WalletTransactionEntry Wallet::retract_account( const std::string& account_to_retract, const std::string& pay_from_account, bool sign) { try { FC_ASSERT(is_unlocked(), "Wallet not unlock!"); auto account = get_account(account_to_retract); oWalletAccountEntry payer; if (!pay_from_account.empty()) payer = get_account(pay_from_account); fc::ecc::public_key empty_pk; PublicKeyType new_public_key(empty_pk); TransactionBuilderPtr builder = create_transaction_builder(); builder->update_account_registration(account, optional<variant>(), new_public_key, optional<ShareType>(), payer). finalize(); if (sign) return builder->sign(); return builder->transaction_entry; } FC_CAPTURE_AND_RETHROW((account_to_retract)(pay_from_account)(sign)) } void Wallet::set_transaction_fee(const Asset& fee) { try { FC_ASSERT(is_open(), "Wallet not open!"); if (fee.amount < ALP_DEFAULT_TRANSACTION_FEE || fee.asset_id != 0) FC_THROW_EXCEPTION(invalid_fee, "Invalid transaction fee!", ("fee", fee)); my->_wallet_db.set_property(default_transaction_priority_fee, variant(fee)); } FC_CAPTURE_AND_RETHROW((fee)) } Asset Wallet::get_transaction_imessage_fee(const std::string & imessage)const { try { FC_ASSERT(is_open(), "Wallet not open!"); Asset require_fee; auto max_soft_length = get_transaction_imessage_soft_max_length(); if (imessage.size() > max_soft_length) { FC_THROW_EXCEPTION(imessage_size_bigger_than_soft_max_lenth, "Invalid transaction imessage fee coefficient!", ("imessage_size", imessage.size())); } if (ALP_BLOCKCHAIN_MAX_FREE_MESSAGE_SIZE >= imessage.size()) { return require_fee; } auto min_fee_coe = get_transaction_imessage_fee_coe(); require_fee.amount = min_fee_coe * (imessage.size() - ALP_BLOCKCHAIN_MAX_FREE_MESSAGE_SIZE); // require_fee.asset_id = 2; return require_fee; } FC_CAPTURE_AND_RETHROW((imessage.size())) } Asset Wallet::get_transaction_fee(const AssetIdType desired_fee_asset_id)const { try { FC_ASSERT(is_open(), "Wallet not open!"); // TODO: support price conversion using price from blockchain Asset xts_fee(ALP_WALLET_DEFAULT_TRANSACTION_FEE, 0); try { xts_fee = my->_wallet_db.get_property(default_transaction_priority_fee).as<Asset>(); } catch (...) { } /* //delete??? if( desired_fee_asset_id != 0 ) { const auto asset_rec = my->_blockchain->get_asset_entry( desired_fee_asset_id ); FC_ASSERT( asset_rec.valid() ); if( asset_rec->is_market_issued() ) { auto median_price = my->_blockchain->get_median_delegate_price( desired_fee_asset_id, asset_id_type( 0 ) ); if( median_price ) { xts_fee += xts_fee + xts_fee; // fees paid in something other than XTS are discounted 50% auto alt_fees_paid = xts_fee * *median_price; return alt_fees_paid; } } } */ return xts_fee; } FC_CAPTURE_AND_RETHROW((desired_fee_asset_id)) } bool Wallet::asset_can_pay_fee(const AssetIdType desired_fee_asset_id) const { return get_transaction_fee(desired_fee_asset_id).asset_id == desired_fee_asset_id; } void Wallet::set_last_scanned_block_number(uint32_t block_num) { try { FC_ASSERT(is_open(), "Wallet not open!"); my->_wallet_db.set_property(last_unlocked_scanned_block_number, fc::variant(block_num)); } FC_CAPTURE_AND_RETHROW() } void Wallet::set_last_scanned_block_number_for_alp(uint32_t block_num) { try { FC_ASSERT(is_open(), "Wallet not open!"); my->_wallet_db.set_property(last_scanned_block_number_for_thinkyoung, fc::variant(block_num)); } FC_CAPTURE_AND_RETHROW() } uint32_t Wallet::get_last_scanned_block_number()const { try { FC_ASSERT(is_open(), "Wallet not open!"); try { return my->_wallet_db.get_property(last_unlocked_scanned_block_number).as<uint32_t>(); } catch (...) { } return my->_blockchain->get_head_block_num(); } FC_CAPTURE_AND_RETHROW() } uint32_t Wallet::get_last_scanned_block_number_for_alp()const { try { FC_ASSERT(is_open(), "Wallet not open!"); try { return my->_wallet_db.get_property(last_scanned_block_number_for_thinkyoung).as<uint32_t>(); } catch (...) { } return my->_blockchain->get_head_block_num(); } FC_CAPTURE_AND_RETHROW() } void Wallet::set_transaction_imessage_fee_coe(const ImessageIdType& coe) { try { FC_ASSERT(is_open(), "Wallet not open!"); if (coe < ALP_BLOCKCHAIN_MIN_MESSAGE_FEE_COE) { FC_THROW_EXCEPTION(invalid_transaction_imessage_fee_coe, "Invalid transaction imessage fee coefficient!", ("fee_coe", coe)); } my->_wallet_db.set_property(transaction_min_imessage_fee_coe, fc::variant(coe)); } FC_CAPTURE_AND_RETHROW() } ImessageIdType Wallet::get_transaction_imessage_fee_coe()const { try { FC_ASSERT(is_open(), "Wallet not open!"); try { return my->_wallet_db.get_property(transaction_min_imessage_fee_coe).as<ImessageIdType>(); } catch (...) { } return 0; } FC_CAPTURE_AND_RETHROW() } void Wallet::set_transaction_imessage_soft_max_length(const ImessageLengthIdType& length) { try { FC_ASSERT(is_open(), "Wallet not open!"); if (length < 0 || length > ALP_BLOCKCHAIN_MAX_MESSAGE_SIZE) { FC_THROW_EXCEPTION(invalid_transaction_imessage_soft_length, "invalid imessage soft max length!", ("length", length)); } my->_wallet_db.set_property(transaction_min_imessage_soft_length, fc::variant(length)); } FC_CAPTURE_AND_RETHROW() } ImessageLengthIdType Wallet::get_transaction_imessage_soft_max_length()const { try { FC_ASSERT(is_open(), "Wallet not open!"); try { return my->_wallet_db.get_property(transaction_min_imessage_soft_length).as<ImessageIdType>(); } catch (...) { } return 0; } FC_CAPTURE_AND_RETHROW() } void Wallet::set_transaction_expiration(uint32_t secs) { try { FC_ASSERT(is_open(), "Wallet not open!"); if (secs > ALP_BLOCKCHAIN_MAX_TRANSACTION_EXPIRATION_SEC) FC_THROW_EXCEPTION(invalid_expiration_time, "Invalid expiration time!", ("secs", secs)); my->_wallet_db.set_property(transaction_expiration_sec, fc::variant(secs)); } FC_CAPTURE_AND_RETHROW() } uint32_t Wallet::get_transaction_expiration()const { try { FC_ASSERT(is_open(), "Wallet not open!"); return my->_wallet_db.get_property(transaction_expiration_sec).as<uint32_t>(); } FC_CAPTURE_AND_RETHROW() } float Wallet::get_scan_progress()const { try { FC_ASSERT(is_open(), "Wallet not open!"); return my->_scan_progress; } FC_CAPTURE_AND_RETHROW() } string Wallet::get_key_label(const PublicKeyType& key)const { try { if (key == PublicKeyType()) return "ANONYMOUS"; auto account_entry = my->_wallet_db.lookup_account(key); if (account_entry.valid()) return account_entry->name; const auto blockchain_account_entry = my->_blockchain->get_account_entry(key); if (blockchain_account_entry.valid()) return blockchain_account_entry->name; const auto key_entry = my->_wallet_db.lookup_key(key); if (key_entry.valid()) { if (key_entry->memo.valid()) return *key_entry->memo; account_entry = my->_wallet_db.lookup_account(key_entry->account_address); if (account_entry.valid()) return account_entry->name; } return string(key); } FC_CAPTURE_AND_RETHROW((key)) } vector<string> Wallet::list() const { FC_ASSERT(is_enabled(), "Wallet is not enabled in this client!"); vector<string> wallets; if (!fc::is_directory(get_data_directory())) return wallets; auto path = get_data_directory(); fc::directory_iterator end_itr; // constructs terminator for (fc::directory_iterator itr(path); itr != end_itr; ++itr) { if (!itr->stem().string().empty() && fc::is_directory(*itr)) { wallets.push_back((*itr).stem().string()); } } std::sort(wallets.begin(), wallets.end()); return wallets; } bool Wallet::is_sending_address(const Address& addr)const { try { return !is_receive_address(addr); } FC_CAPTURE_AND_RETHROW() } bool Wallet::is_receive_address(const Address& addr)const { try { auto key_rec = my->_wallet_db.lookup_key(addr); if (key_rec.valid()) return key_rec->has_private_key(); return false; } FC_CAPTURE_AND_RETHROW() } vector<WalletAccountEntry> Wallet::list_accounts() const { try { const auto& accs = my->_wallet_db.get_accounts(); vector<WalletAccountEntry> accounts; accounts.reserve(accs.size()); for (const auto& item : accs) accounts.push_back(item.second); std::sort(accounts.begin(), accounts.end(), [](const WalletAccountEntry& a, const WalletAccountEntry& b) -> bool { return a.name.compare(b.name) < 0; }); return accounts; } FC_CAPTURE_AND_RETHROW() } vector<AccountAddressData> Wallet::list_addresses() const { try { const auto& accs = my->_wallet_db.get_accounts(); vector<AccountAddressData> receive_accounts; receive_accounts.reserve(accs.size()); for (const auto& item : accs) if (item.second.is_my_account) { receive_accounts.push_back((item.second)); } std::sort(receive_accounts.begin(), receive_accounts.end(), [](const AccountAddressData& a, const AccountAddressData& b) -> bool { return a.name.compare(b.name) < 0; }); return receive_accounts; } FC_CAPTURE_AND_RETHROW() } vector<WalletAccountEntry> Wallet::list_my_accounts() const { try { const auto& accs = my->_wallet_db.get_accounts(); vector<WalletAccountEntry> receive_accounts; receive_accounts.reserve(accs.size()); for (const auto& item : accs) if (item.second.is_my_account) receive_accounts.push_back(item.second); std::sort(receive_accounts.begin(), receive_accounts.end(), [](const WalletAccountEntry& a, const WalletAccountEntry& b) -> bool { return a.name.compare(b.name) < 0; }); return receive_accounts; } FC_CAPTURE_AND_RETHROW() } vector<WalletAccountEntry> Wallet::list_favorite_accounts() const { try { const auto& accs = my->_wallet_db.get_accounts(); vector<WalletAccountEntry> receive_accounts; receive_accounts.reserve(accs.size()); for (const auto& item : accs) { if (item.second.is_favorite) { receive_accounts.push_back(item.second); } } std::sort(receive_accounts.begin(), receive_accounts.end(), [](const WalletAccountEntry& a, const WalletAccountEntry& b) -> bool { return a.name.compare(b.name) < 0; }); return receive_accounts; } FC_CAPTURE_AND_RETHROW() } vector<WalletAccountEntry> Wallet::list_unregistered_accounts() const { try { const auto& accs = my->_wallet_db.get_accounts(); vector<WalletAccountEntry> receive_accounts; receive_accounts.reserve(accs.size()); for (const auto& item : accs) { if (item.second.id == 0) { receive_accounts.push_back(item.second); } } std::sort(receive_accounts.begin(), receive_accounts.end(), [](const WalletAccountEntry& a, const WalletAccountEntry& b) -> bool { return a.name.compare(b.name) < 0; }); return receive_accounts; } FC_CAPTURE_AND_RETHROW() } vector<WalletTransactionEntry> Wallet::get_pending_transactions()const { return my->get_pending_transactions(); } map<TransactionIdType, fc::exception> Wallet::get_pending_transaction_errors()const { try { map<TransactionIdType, fc::exception> transaction_errors; const auto& transaction_entrys = get_pending_transactions(); const auto relay_fee = my->_blockchain->get_relay_fee(); for (const auto& transaction_entry : transaction_entrys) { FC_ASSERT(!transaction_entry.is_virtual && !transaction_entry.is_confirmed); const auto error = my->_blockchain->get_transaction_error(transaction_entry.trx, relay_fee); if (!error.valid()) continue; transaction_errors[transaction_entry.trx.id()] = *error; } return transaction_errors; } FC_CAPTURE_AND_RETHROW() } PrivateKeyType Wallet::get_active_private_key(const string& account_name)const { try { if (!my->_blockchain->is_valid_account_name(account_name)) FC_THROW_EXCEPTION(invalid_name, "Invalid account name!", ("account_name", account_name)); FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); auto opt_account = my->_wallet_db.lookup_account(account_name); FC_ASSERT(opt_account.valid(), "Unable to find account '${name}'", ("name", account_name)); auto opt_key = my->_wallet_db.lookup_key(opt_account->active_address()); FC_ASSERT(opt_key.valid(), "Unable to find key for account '${name}", ("name", account_name)); FC_ASSERT(opt_key->has_private_key()); return opt_key->decrypt_private_key(my->_wallet_password); } FC_CAPTURE_AND_RETHROW((account_name)) } PublicKeyType Wallet::get_active_public_key(const string& account_name)const { try { if (!my->_blockchain->is_valid_account_name(account_name)) FC_THROW_EXCEPTION(invalid_name, "Invalid account name!", ("account_name", account_name)); FC_ASSERT(my->is_unique_account(account_name)); const auto registered_account = my->_blockchain->get_account_entry(account_name); if (registered_account.valid()) { if (registered_account->is_retracted()) FC_CAPTURE_AND_THROW(account_retracted, (registered_account)); return registered_account->active_key(); } FC_ASSERT(is_open(), "Wallet not open!"); const auto opt_account = my->_wallet_db.lookup_account(account_name); FC_ASSERT(opt_account.valid(), "Unable to find account '${name}'", ("name", account_name)); return opt_account->active_key(); } FC_CAPTURE_AND_RETHROW((account_name)) } PublicKeyType Wallet::get_owner_public_key(const string& account_name)const { try { if (!my->_blockchain->is_valid_account_name(account_name)) FC_THROW_EXCEPTION(invalid_name, "Invalid account name!", ("account_name", account_name)); FC_ASSERT(my->is_unique_account(account_name)); const auto registered_account = my->_blockchain->get_account_entry(account_name); if (registered_account.valid()) { if (registered_account->is_retracted()) FC_CAPTURE_AND_THROW(account_retracted, (registered_account)); return registered_account->owner_key; } FC_ASSERT(is_open(), "Wallet not open!"); const auto opt_account = my->_wallet_db.lookup_account(account_name); FC_ASSERT(opt_account.valid(), "Unable to find account '${name}'", ("name", account_name)); return opt_account->owner_key; } FC_CAPTURE_AND_RETHROW((account_name)) } vector<AccountEntry> Wallet::get_all_approved_accounts(const int8_t approval) { vector<AccountEntry> all_accounts; std::vector<AccountIdType> delegate_ids = my->_blockchain->get_all_delegates_by_vote(); for (const auto& item : delegate_ids) { auto delegate_account = my->_blockchain->get_account_entry(item); int8_t account_approval = this->get_account_approval(delegate_account->name); if (approval == account_approval) { all_accounts.push_back(*delegate_account); } } return all_accounts; } void Wallet::clear_account_approval(const string& account_name) { try { std::vector<AccountIdType> delegate_ids = my->_blockchain->get_all_delegates_by_vote(); for (const auto& item : delegate_ids) { auto delegate_account = my->_blockchain->get_account_entry(item); int8_t approval = this->get_account_approval(delegate_account->name); if (approval > 0) set_account_approval(delegate_account->name, 0); } // vector<account_id_type> delegate_ids = _chain_db->get_delegates_by_vote(first, count); // for (const auto& item : raw_votes) // { // auto delegate_account = pending_state->get_account_entry(item.first); // int8_t approval = this->get_account_approval(delegate_account->name); // if (approval>0) // set_account_approval(delegate_account->name, 0); // } } FC_CAPTURE_AND_RETHROW() } void Wallet::set_account_approval(const string& account_name, int8_t approval) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT((!account_name.empty()), "approve account name empty"); const auto account_entry = my->_blockchain->get_account_entry(account_name); auto war = my->_wallet_db.lookup_account(account_name); if (!account_entry.valid() && !war.valid()) FC_THROW_EXCEPTION(unknown_account, "Unknown account name!", ("account_name", account_name)); if (war.valid()) { war->approved = approval; my->_wallet_db.store_account(*war); return; } add_contact_account(account_name, account_entry->owner_key); set_account_approval(account_name, approval); } FC_CAPTURE_AND_RETHROW((account_name)(approval)) } int8_t Wallet::get_account_approval(const string& account_name)const { try { FC_ASSERT(is_open(), "Wallet not open!"); const auto account_entry = my->_blockchain->get_account_entry(account_name); auto war = my->_wallet_db.lookup_account(account_name); if (!account_entry.valid() && !war.valid()) FC_THROW_EXCEPTION(unknown_account, "Unknown account name!", ("account_name", account_name)); if (!war.valid()) return 0; return war->approved; } FC_CAPTURE_AND_RETHROW((account_name)) } oWalletAccountEntry Wallet::get_account_for_address(Address addr_in_account)const { try { FC_ASSERT(is_open(), "Wallet not open!"); const auto okey = my->_wallet_db.lookup_key(addr_in_account); if (!okey.valid()) return oWalletAccountEntry(); return my->_wallet_db.lookup_account(okey->account_address); } FC_CAPTURE_AND_RETHROW() } vector<EscrowSummary> Wallet::get_escrow_balances(const string& account_name) { vector<EscrowSummary> result; FC_ASSERT(is_open(), "Wallet not open!"); if (!account_name.empty()) get_account(account_name); /* Just to check input */ map<string, vector<BalanceEntry>> balance_entrys; const auto pending_state = my->_blockchain->get_pending_state(); const auto scan_balance = [&](const BalanceEntry& entry) { // check to see if it is a withdraw by escrow entry if (entry.condition.type == withdraw_escrow_type) { auto escrow_cond = entry.condition.as<withdraw_with_escrow>(); // lookup account for each key if known // lookup transaction that created the balance entry in the local wallet // if the sender or receiver is one of our accounts and isn't filtered by account_name // then add the escrow balance to the output. const auto sender_key_entry = my->_wallet_db.lookup_key(escrow_cond.sender); const auto receiver_key_entry = my->_wallet_db.lookup_key(escrow_cond.receiver); /* if( !((sender_key_entry && sender_key_entry->has_private_key()) || (receiver_key_entry && receiver_key_entry->has_private_key())) ) { ilog( "no private key for sender nor receiver" ); return; // no private key for the sender nor receiver } */ EscrowSummary sum; sum.balance_id = entry.id(); sum.balance = entry.get_spendable_balance(time_point_sec()); if (sender_key_entry) { const auto account_address = sender_key_entry->account_address; const auto account_entry = my->_wallet_db.lookup_account(account_address); if (account_entry) { const auto name = account_entry->name; sum.sender_account_name = name; } else { auto registered_account = my->_blockchain->get_account_entry(account_address); if (registered_account) sum.sender_account_name = registered_account->name; else sum.sender_account_name = string(escrow_cond.sender); } } else { sum.sender_account_name = string(escrow_cond.sender); } if (receiver_key_entry) { const auto account_address = receiver_key_entry->account_address; const auto account_entry = my->_wallet_db.lookup_account(account_address); const auto name = account_entry.valid() ? account_entry->name : string(account_address); sum.receiver_account_name = name; } else { sum.receiver_account_name = "UNKNOWN"; } auto agent_account = my->_blockchain->get_account_entry(escrow_cond.escrow); if (agent_account) { if (agent_account->is_retracted()) FC_CAPTURE_AND_THROW(account_retracted, (agent_account)); sum.escrow_agent_account_name = agent_account->name; } else { sum.escrow_agent_account_name = string(escrow_cond.escrow); } sum.agreement_digest = escrow_cond.agreement_digest; if (account_name.size()) { if (sum.sender_account_name == account_name || sum.receiver_account_name == account_name) { result.emplace_back(sum); } else { wlog("skip ${s}", ("s", sum)); } } else result.emplace_back(sum); } }; my->_blockchain->scan_balances(scan_balance); return result; } void Wallet::scan_balances(const function<void(const BalanceIdType&, const BalanceEntry&)> callback)const { for (const auto& item : my->_balance_entrys) callback(item.first, item.second); } AccountBalanceEntrySummaryType Wallet::get_spendable_account_balance_entrys(const string& account_name)const { try { map<string, vector<BalanceEntry>> balances; const time_point_sec now = my->_blockchain->get_pending_state()->now(); const auto scan_balance = [&](const BalanceIdType& id, const BalanceEntry& entry) { if (entry.condition.type != withdraw_signature_type) return; const Asset balance = entry.get_spendable_balance(now); if (balance.amount == 0) return; const optional<Address> owner = entry.owner(); if (!owner.valid()) return; const oWalletKeyEntry key_entry = my->_wallet_db.lookup_key(*owner); if (!key_entry.valid() || !key_entry->has_private_key()) return; const oWalletAccountEntry account_entry = my->_wallet_db.lookup_account(key_entry->account_address); const string name = account_entry.valid() ? account_entry->name : string(key_entry->public_key); if (!account_name.empty() && name != account_name) return; balances[name].push_back(entry); }; scan_balances(scan_balance); return balances; } FC_CAPTURE_AND_RETHROW((account_name)) } AccountBalanceSummaryType Wallet::get_spendable_account_balances(const string& account_name)const { try { map<string, map<AssetIdType, ShareType>> balances; const map<string, vector<BalanceEntry>> entrys = get_spendable_account_balance_entrys(account_name); const time_point_sec now = my->_blockchain->get_pending_state()->now(); for (const auto& item : entrys) { const string& name = item.first; for (const BalanceEntry& entry : item.second) { const Asset balance = entry.get_spendable_balance(now); balances[name][balance.asset_id] += balance.amount; } } return balances; } FC_CAPTURE_AND_RETHROW((account_name)) } AccountBalanceIdSummaryType Wallet::get_account_balance_ids(const string& account_name)const { try { map<string, unordered_set<BalanceIdType>> balances; const auto scan_balance = [&](const BalanceIdType& id, const BalanceEntry& entry) { const set<Address>& owners = entry.owners(); for (const Address& owner : owners) { const oWalletKeyEntry key_entry = my->_wallet_db.lookup_key(owner); if (!key_entry.valid() || !key_entry->has_private_key()) continue; const oWalletAccountEntry account_entry = my->_wallet_db.lookup_account(key_entry->account_address); const string name = account_entry.valid() ? account_entry->name : string(key_entry->public_key); if (!account_name.empty() && name != account_name) continue; balances[name].insert(id); } }; scan_balances(scan_balance); return balances; } FC_CAPTURE_AND_RETHROW((account_name)) } AccountVoteSummaryType Wallet::get_account_vote_summary(const string& account_name)const { try { const auto pending_state = my->_blockchain->get_pending_state(); auto raw_votes = map<AccountIdType, int64_t>(); auto result = AccountVoteSummaryType(); const AccountBalanceEntrySummaryType items = get_spendable_account_balance_entrys(account_name); for (const auto& item : items) { const auto& entrys = item.second; for (const auto& entry : entrys) { const auto owner = entry.owner(); if (!owner.valid()) continue; const auto okey_rec = my->_wallet_db.lookup_key(*owner); if (!okey_rec.valid() || !okey_rec->has_private_key()) continue; const auto oaccount_rec = my->_wallet_db.lookup_account(okey_rec->account_address); if (!oaccount_rec.valid()) FC_THROW_EXCEPTION(unknown_account, "Unknown account name!"); if (!account_name.empty() && oaccount_rec->name != account_name) continue; const auto obalance = pending_state->get_balance_entry(entry.id()); if (!obalance.valid()) continue; const auto balance = obalance->get_spendable_balance(pending_state->now()); if (balance.amount <= 0 || balance.asset_id != 0) continue; const auto slate_id = obalance->slate_id(); if (slate_id == 0) continue; const auto slate_entry = pending_state->get_slate_entry(slate_id); if (!slate_entry.valid()) FC_THROW_EXCEPTION(unknown_slate, "Unknown slate!"); for (const AccountIdType delegate_id : slate_entry->slate) { if (raw_votes.count(delegate_id) <= 0) raw_votes[delegate_id] = balance.amount; else raw_votes[delegate_id] += balance.amount; } } } for (const auto& item : raw_votes) { auto delegate_account = pending_state->get_account_entry(item.first); result[delegate_account->name] = item.second; } return result; } FC_CAPTURE_AND_RETHROW() } variant Wallet::get_info()const { const time_point_sec now = blockchain::now(); mutable_variant_object info; info["data_dir"] = fc::absolute(my->_data_directory); info["num_scanning_threads"] = my->_num_scanner_threads; const auto is_open = this->is_open(); info["open"] = is_open; info["name"] = variant(); info["automatic_backups"] = variant(); info["transaction_scanning_enabled"] = variant(); info["last_scanned_block_num"] = variant(); info["last_scanned_block_timestamp"] = variant(); info["transaction_fee"] = variant(); info["transaction_expiration_secs"] = variant(); info["unlocked"] = variant(); info["unlocked_until"] = variant(); info["unlocked_until_timestamp"] = variant(); info["scan_progress"] = variant(); info["version"] = variant(); if (is_open) { info["name"] = my->_current_wallet_path.filename().string(); info["automatic_backups"] = get_automatic_backups(); info["transaction_scanning_enabled"] = get_transaction_scanning(); const auto last_scanned_block_num = get_last_scanned_block_number(); if (last_scanned_block_num > 0) { info["last_scanned_block_num"] = last_scanned_block_num; try { info["last_scanned_block_timestamp"] = my->_blockchain->get_block_header(last_scanned_block_num).timestamp; } catch (...) { } } info["transaction_fee"] = get_transaction_fee(); info["transaction_expiration_secs"] = get_transaction_expiration(); info["unlocked"] = is_unlocked(); const auto unlocked_until = this->unlocked_until(); if (unlocked_until.valid()) { info["unlocked_until"] = (*unlocked_until - now).to_seconds(); info["unlocked_until_timestamp"] = *unlocked_until; info["scan_progress"] = get_scan_progress(); } info["version"] = get_version(); } return info; } PublicKeySummary Wallet::get_public_key_summary(const PublicKeyType& pubkey) const { PublicKeySummary summary; summary.hex = variant(fc::ecc::public_key_data(pubkey)).as_string(); summary.native_pubkey = string(pubkey); summary.native_address = (string(Address(pubkey)) + INVALIDE_SUB_ADDRESS); summary.pts_normal_address = string(PtsAddress(pubkey, false, 56)); summary.pts_compressed_address = string(PtsAddress(pubkey, true, 56)); summary.btc_normal_address = string(PtsAddress(pubkey, false, 0)); summary.btc_compressed_address = string(PtsAddress(pubkey, true, 0)); return summary; } vector<PublicKeyType> Wallet::get_public_keys_in_account(const string& account_name)const { const auto account_rec = my->_wallet_db.lookup_account(account_name); if (!account_rec.valid()) FC_THROW_EXCEPTION(unknown_account, "Unknown account name!"); const auto account_address = Address(get_owner_public_key(account_name)); vector<PublicKeyType> account_keys; const auto keys = my->_wallet_db.get_keys(); for (const auto& key : keys) { if (key.second.account_address == account_address || key.first == account_address) account_keys.push_back(key.second.public_key); } return account_keys; } void Wallet::write_latest_builder(const TransactionBuilder& builder, const string& alternate_path) { std::ofstream fs; if (alternate_path == "") { auto dir = (get_data_directory() / "trx").string(); auto default_path = dir + "/latest.trx"; if (!fc::exists(default_path)) fc::create_directories(dir); fs.open(default_path); } else { if (fc::exists(alternate_path)) FC_THROW_EXCEPTION(file_already_exists, "That filename already exists!", ("filename", alternate_path)); fs.open(alternate_path); } fs << fc::json::to_pretty_string(builder); fs.close(); } /*wallet_transaction_entry Wallet::asset_authorize_key( const string& paying_account_name, //check???? const string& symbol, const address& key, const object_id_type meta, bool sign ) { if( NOT is_open() ) FC_CAPTURE_AND_THROW( wallet_closed ); if( NOT is_unlocked() ) FC_CAPTURE_AND_THROW( wallet_locked ); auto payer_key = get_owner_public_key( paying_account_name ); transaction_builder_ptr builder = create_transaction_builder(); builder->asset_authorize_key( symbol, key, meta ); builder->deduct_balance( payer_key, asset() ); builder->finalize(); if( sign ) return builder->sign(); return builder->transaction_entry; }*/ void Wallet::initialize_transaction_creator(TransactionCreationState& c, const string& account_name) { c.pending_state._balance_id_to_entry = my->_balance_entrys; vector<PublicKeyType> keys = get_public_keys_in_account(account_name); for (auto key : keys) c.add_known_key(key); } void Wallet::sign_transaction_creator(TransactionCreationState& c) { } vector<WalletContactEntry> Wallet::list_contacts()const { try { if (!is_open()) FC_CAPTURE_AND_THROW(wallet_closed); if (!is_unlocked()) FC_CAPTURE_AND_THROW(wallet_locked); vector<WalletContactEntry> contacts; const auto& entrys = my->_wallet_db.get_contacts(); contacts.reserve(entrys.size()); for (const auto& item : entrys) contacts.push_back(item.second); std::sort(contacts.begin(), contacts.end(), [](const WalletContactEntry& a, const WalletContactEntry& b) -> bool { return a.label.compare(b.label) < 0; }); return contacts; } FC_CAPTURE_AND_RETHROW() } oWalletContactEntry Wallet::get_contact(const variant& data)const { try { if (!is_open()) FC_CAPTURE_AND_THROW(wallet_closed); if (!is_unlocked()) FC_CAPTURE_AND_THROW(wallet_locked); return my->_wallet_db.lookup_contact(data); } FC_CAPTURE_AND_RETHROW((data)) } oWalletContactEntry Wallet::get_contact(const string& label)const { try { if (!is_open()) FC_CAPTURE_AND_THROW(wallet_closed); if (!is_unlocked()) FC_CAPTURE_AND_THROW(wallet_locked); return my->_wallet_db.lookup_contact(label); } FC_CAPTURE_AND_RETHROW((label)) } WalletContactEntry Wallet::add_contact(const ContactData& contact) { try { if (!is_open()) FC_CAPTURE_AND_THROW(wallet_closed); if (!is_unlocked()) FC_CAPTURE_AND_THROW(wallet_locked); return my->_wallet_db.store_contact(contact); } FC_CAPTURE_AND_RETHROW((contact)) } oWalletContactEntry Wallet::remove_contact(const variant& data) { try { if (!is_open()) FC_CAPTURE_AND_THROW(wallet_closed); if (!is_unlocked()) FC_CAPTURE_AND_THROW(wallet_locked); return my->_wallet_db.remove_contact(data); } FC_CAPTURE_AND_RETHROW((data)) } oWalletContactEntry Wallet::remove_contact(const string& label) { try { if (!is_open()) FC_CAPTURE_AND_THROW(wallet_closed); if (!is_unlocked()) FC_CAPTURE_AND_THROW(wallet_locked); return my->_wallet_db.remove_contact(label); } FC_CAPTURE_AND_RETHROW((label)) } void Wallet::import_script_db(const fc::path& src_path) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); fc::path db_path = get_data_directory() / get_wallet_name(); if (db_path == src_path) return; if (!fc::exists(src_path)) FC_CAPTURE_AND_THROW(file_not_found, ("src_path is illegal")); if (fc::exists(src_path / "script_id_to_script_entry_db")) { db::fast_level_map <ScriptIdType, ScriptEntry> src_db; src_db.open(src_path / "script_id_to_script_entry_db"); auto it = src_db.unordered_begin(); auto end = src_db.unordered_end(); while (it != end) { my->script_id_to_script_entry_db.store(it->first, it->second); it++; } src_db.close(); } if (fc::exists(src_path / "contract_id_event_to_script_id_vector_db")) { db::fast_level_map <ScriptRelationKey, vector<ScriptIdType>> src_db_relation; src_db_relation.open(src_path / "contract_id_event_to_script_id_vector_db"); auto it_relation = src_db_relation.unordered_begin(); auto end_relation = src_db_relation.unordered_end(); while (it_relation != end_relation) { my->contract_id_event_to_script_id_vector_db.store(it_relation->first, it_relation->second); it_relation++; } src_db_relation.close(); } } FC_CAPTURE_AND_RETHROW((src_path)) } void Wallet::export_script_db(const fc::path & des_path) { try { FC_ASSERT(is_open(), "Wallet not open!"); FC_ASSERT(is_unlocked(), "Wallet not unlock!"); fc::path db_path = get_data_directory() / get_wallet_name(); if (db_path == des_path) return; fc::remove_all(des_path / "script_id_to_script_entry_db"); fc::remove_all(des_path / "contract_id_event_to_script_id_vector_db"); db::fast_level_map <ScriptIdType, ScriptEntry> des_db; des_db.open(des_path / "script_id_to_script_entry_db"); auto script_db_it = my->script_id_to_script_entry_db.unordered_begin(); auto script_db_end = my->script_id_to_script_entry_db.unordered_end(); while (script_db_it != script_db_end) { des_db.store(script_db_it->first, script_db_it->second); script_db_it++; } des_db.close(); db::fast_level_map <ScriptRelationKey, vector<ScriptIdType>> des_db_relation; des_db_relation.open(des_path / "contract_id_event_to_script_id_vector_db"); auto it_relation = my->contract_id_event_to_script_id_vector_db.unordered_begin(); auto end_relation = my->contract_id_event_to_script_id_vector_db.unordered_end(); while (it_relation != end_relation) { des_db_relation.store(it_relation->first, it_relation->second); it_relation++; } des_db_relation.close(); } FC_CAPTURE_AND_RETHROW((des_path)) } } } // thinkyoung::wallet
288,039
75,688
#include <Windows.h> #include <iostream> #include <fstream> #include <vector> #include <string> //const unsigned int SCREEN_WIDTH = 256; //const unsigned int SCREEN_HEIGHT = 224; const unsigned int SCREEN_WIDTH = 500; const unsigned int SCREEN_HEIGHT = 500; const unsigned int CELL_WIDTH = SCREEN_WIDTH; const unsigned int CELL_HEIGHT = SCREEN_HEIGHT; //const unsigned int CELL_WIDTH = static_cast<unsigned int>(SCREEN_WIDTH / 2); //const unsigned int CELL_HEIGHT = static_cast<unsigned int>(SCREEN_HEIGHT / 2); const unsigned int MAX_FILE_LINE = 5000; bool wroteTagOnce = false; unsigned int xCells = 0, yCells = 0; enum class GridFileSection { GRIDFILE_SECTION_UNKNOWN, GRIDFILE_SECTION_SCENESIZE, GRIDFILE_SECTION_ENTITYDATA }; std::vector<std::string> SplitStr(std::string line, std::string delimeter = "\t") { std::vector<std::string> tokens; size_t last = 0, next = 0; while ((next = line.find(delimeter, last)) != std::string::npos) { tokens.push_back(line.substr(last, next - last)); last = next + 1; } tokens.push_back(line.substr(last)); return tokens; } void ParseSceneSize(std::ofstream& outFile, std::string line) { std::vector<std::string> tokens = SplitStr(line); if (tokens.size() < 2) { return; } unsigned int sceneWidth = std::stoul(tokens.at(0)); unsigned int sceneHeight = std::stoul(tokens.at(1)); //Do float division and then ceil the value to get an extra row and column offscreen //Then cast it to unsigned int to truncate the decimals xCells = static_cast<unsigned int>(ceil((static_cast<float>(sceneWidth) / CELL_WIDTH))); yCells = static_cast<unsigned int>(ceil((static_cast<float>(sceneHeight) / CELL_HEIGHT))); //Be sure to use std::flush or std::endl to flush the buffer outFile << "[GRIDCELLS]\n"; outFile << xCells << '\t' << yCells << '\n'; outFile << "[/]\n\n"; } void ParseEntityData(std::ofstream& outFile, std::string line) { std::vector<std::string> tokens = SplitStr(line); if (tokens.size() < 5) { return; } float posX = std::stof(tokens.at(3)); float posY = std::stof(tokens.at(4)); unsigned int objectID = std::stoul(tokens.at(0)); //Convert world space to cell space unsigned int cellPosX = static_cast<unsigned int>(posX / CELL_WIDTH); unsigned int cellPosY = static_cast<unsigned int>(posY / CELL_HEIGHT); if (cellPosX < 0) { cellPosX = 0; } else if (cellPosX >= xCells) { cellPosX = xCells - 1; } if (cellPosY < 0) { cellPosY = 0; } else if (cellPosY >= yCells) { cellPosY = yCells - 1; } if (!wroteTagOnce) { wroteTagOnce = true; outFile << "#objID" << '\t' << "Cell_X" << '\t' << "Cell_Y\n"; outFile << "[POSITIONS]\n"; } outFile << objectID << '\t' << cellPosX << '\t' << cellPosY << '\n'; } int main() { std::string file = "stage_fortress.txt"; std::ofstream outputFile("grid_stage_fortress.txt"); outputFile.clear(); std::ifstream readFile; readFile.open(file, std::ios::in); if (!readFile.is_open()) { std::cout << "[GRID GENERATOR] Failed to read file\n"; return -1; } GridFileSection gridFileSection = GridFileSection::GRIDFILE_SECTION_UNKNOWN; char str[MAX_FILE_LINE]; while (readFile.getline(str, MAX_FILE_LINE)) { std::string line(str); if (line.empty() || line.front() == '#') { continue; } if (line == "[/]") { gridFileSection = GridFileSection::GRIDFILE_SECTION_UNKNOWN; continue; } if (line == "[SCENESIZE]") { gridFileSection = GridFileSection::GRIDFILE_SECTION_SCENESIZE; continue; } if (line == "[ENTITYDATA]") { gridFileSection = GridFileSection::GRIDFILE_SECTION_ENTITYDATA; continue; } switch (gridFileSection) { case GridFileSection::GRIDFILE_SECTION_SCENESIZE: ParseSceneSize(outputFile, line); break; case GridFileSection::GRIDFILE_SECTION_ENTITYDATA: ParseEntityData(outputFile, line); break; } } //Add closing tag outputFile << "[/]" << std::flush; readFile.close(); std::cout << xCells << '\t' << yCells << std::endl; /*unsigned int width = 240; unsigned int height = 32; unsigned int offset = 16; unsigned int starting_x = 1856; unsigned int starting_y = 752; char format[] = "231\t34\t247\t50\t%u\t%u\n"; char debug[100]; for (unsigned int i = 0; i < width; i += offset) { for (unsigned int j = 0; j < height; j += offset) { sprintf_s(debug, format, starting_x + i, starting_y + j); OutputDebugStringA(debug); } }*/ return 0; }
4,922
1,708
// license:BSD-3-Clause // copyright-holders:Ryan Holtz, David Haywood #include "emu.h" #include "includes/spg2xx.h" class shredmjr_game_state : public spg2xx_game_state { public: shredmjr_game_state(const machine_config &mconfig, device_type type, const char *tag) : spg2xx_game_state(mconfig, type, tag), m_porta_data(0x0000), m_shiftamount(0) { } void shredmjr(machine_config &config); void taikeegr(machine_config &config); void taikeegrp(machine_config &config); void init_taikeegr(); protected: uint16_t porta_r(); virtual void porta_w(offs_t offset, uint16_t data, uint16_t mem_mask = ~0) override; private: uint16_t m_porta_data; int m_shiftamount; }; // Shredmaster Jr uses the same input order as the regular Taikee Guitar, but reads all inputs through a single multiplexed bit void shredmjr_game_state::porta_w(offs_t offset, uint16_t data, uint16_t mem_mask) { if (data != m_porta_data) { if ((data & 0x0800) != (m_porta_data & 0x0800)) { if (data & 0x0800) { //logerror("0x0800 low -> high\n"); } else { //logerror("0x0800 high -> low\n"); } } if ((data & 0x0200) != (m_porta_data & 0x0200)) { if (data & 0x0200) { //logerror("0x0200 low -> high\n"); m_shiftamount++; } else { //logerror("0x0200 high -> low\n"); } } if ((data & 0x0100) != (m_porta_data & 0x0100)) { if (data & 0x0100) { //logerror("0x0100 low -> high\n"); m_shiftamount = 0; } else { //logerror("0x0100 high -> low\n"); } } } m_porta_data = data; } uint16_t shredmjr_game_state::porta_r() { //logerror("porta_r with shift amount %d \n", m_shiftamount); uint16_t ret = 0x0000; uint16_t portdata = m_io_p1->read(); portdata = (portdata >> m_shiftamount) & 0x1; if (portdata) ret |= 0x0400; return ret; } static INPUT_PORTS_START( taikeegr ) PORT_START("P1") PORT_BIT( 0x0001, IP_ACTIVE_LOW, IPT_JOYSTICK_DOWN ) PORT_NAME("Strum Bar Down") PORT_BIT( 0x0002, IP_ACTIVE_LOW, IPT_JOYSTICK_UP ) PORT_NAME("Strum Bar Up") PORT_BIT( 0x0004, IP_ACTIVE_LOW, IPT_BUTTON6 ) PORT_NAME("Whamming Bar") PORT_BIT( 0x0008, IP_ACTIVE_LOW, IPT_BUTTON3 ) PORT_NAME("Yellow") PORT_BIT( 0x0010, IP_ACTIVE_LOW, IPT_BUTTON4 ) PORT_NAME("Green") PORT_BIT( 0x0020, IP_ACTIVE_LOW, IPT_BUTTON2 ) PORT_NAME("Red") PORT_BIT( 0x0040, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_NAME("Blue") PORT_BIT( 0x0080, IP_ACTIVE_LOW, IPT_BUTTON5 ) PORT_NAME("Pink") PORT_BIT( 0xff00, IP_ACTIVE_LOW, IPT_UNKNOWN ) PORT_START("P2") PORT_BIT( 0xffff, IP_ACTIVE_LOW, IPT_UNKNOWN ) PORT_START("P3") PORT_BIT( 0xffff, IP_ACTIVE_LOW, IPT_UNKNOWN ) INPUT_PORTS_END static INPUT_PORTS_START( guitarstp ) PORT_START("P1") PORT_BIT( 0x0001, IP_ACTIVE_LOW, IPT_JOYSTICK_DOWN ) PORT_NAME("Strum Bar Down") PORT_BIT( 0x0002, IP_ACTIVE_LOW, IPT_JOYSTICK_UP ) PORT_NAME("Strum Bar Up") PORT_BIT( 0x0004, IP_ACTIVE_LOW, IPT_BUTTON6 ) PORT_NAME("Whamming Bar") PORT_BIT( 0x0008, IP_ACTIVE_LOW, IPT_BUTTON3 ) PORT_NAME("Yellow") PORT_BIT( 0x0010, IP_ACTIVE_LOW, IPT_BUTTON4 ) PORT_NAME("Blue") PORT_BIT( 0x0020, IP_ACTIVE_LOW, IPT_BUTTON2 ) PORT_NAME("Red") PORT_BIT( 0x0040, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_NAME("Green") PORT_BIT( 0x0080, IP_ACTIVE_LOW, IPT_BUTTON5 ) PORT_NAME("Orange") PORT_BIT( 0xff00, IP_ACTIVE_LOW, IPT_UNKNOWN ) PORT_START("P2") PORT_BIT( 0xffff, IP_ACTIVE_LOW, IPT_UNKNOWN ) PORT_START("P3") PORT_BIT( 0xffff, IP_ACTIVE_LOW, IPT_UNKNOWN ) INPUT_PORTS_END void shredmjr_game_state::shredmjr(machine_config &config) { SPG24X(config, m_maincpu, XTAL(27'000'000), m_screen); m_maincpu->set_addrmap(AS_PROGRAM, &shredmjr_game_state::mem_map_4m); spg2xx_base(config); m_maincpu->porta_in().set(FUNC(shredmjr_game_state::porta_r)); m_maincpu->porta_out().set(FUNC(shredmjr_game_state::porta_w)); } void shredmjr_game_state::taikeegr(machine_config &config) { SPG24X(config, m_maincpu, XTAL(27'000'000), m_screen); m_maincpu->set_addrmap(AS_PROGRAM, &shredmjr_game_state::mem_map_4m); spg2xx_base(config); m_maincpu->porta_in().set_ioport("P1"); } void shredmjr_game_state::taikeegrp(machine_config &config) { taikeegr(config); m_maincpu->set_pal(true); m_screen->set_refresh_hz(50); } void shredmjr_game_state::init_taikeegr() { u16 *src = (u16*)memregion("maincpu")->base(); for (int i = 0x00000; i < 0x800000/2; i++) { u16 dat = src[i]; dat = bitswap<16>(dat, 15,14,13,12, 11,10,9,8, 7,6,5,4, 0,1,2,3 ); src[i] = dat; } std::vector<u16> buffer(0x800000/2); for (int i = 0; i < 0x800000/2; i++) { int j = 0; switch (i & 0x00e00) { case 0x00000: j = (i & 0xfff1ff) | 0x000; break; case 0x00200: j = (i & 0xfff1ff) | 0x800; break; case 0x00400: j = (i & 0xfff1ff) | 0x400; break; case 0x00600: j = (i & 0xfff1ff) | 0xc00; break; case 0x00800: j = (i & 0xfff1ff) | 0x200; break; case 0x00a00: j = (i & 0xfff1ff) | 0xa00; break; case 0x00c00: j = (i & 0xfff1ff) | 0x600; break; case 0x00e00: j = (i & 0xfff1ff) | 0xe00; break; } buffer[j] = src[i]; } std::copy(buffer.begin(), buffer.end(), &src[0]); } ROM_START( taikeegr ) ROM_REGION( 0x800000, "maincpu", ROMREGION_ERASE00 ) ROM_LOAD16_WORD_SWAP( "taikee_guitar.bin", 0x000000, 0x800000, CRC(8cbe2feb) SHA1(d72e816f259ba6a6260d6bbaf20c5e9b2cf7140b) ) ROM_END ROM_START( rockstar ) ROM_REGION( 0x800000, "maincpu", ROMREGION_ERASE00 ) ROM_LOAD16_WORD_SWAP( "29gl064.bin", 0x000000, 0x800000, CRC(40de50ff) SHA1(b33ae7a3d32911addf833998d7419f4830be5a07) ) ROM_END ROM_START( shredmjr ) ROM_REGION( 0x800000, "maincpu", ROMREGION_ERASE00 ) ROM_LOAD16_WORD_SWAP( "shredmasterjr.bin", 0x000000, 0x800000, CRC(95a6dcf1) SHA1(44893cd6ebe6b7f33a73817b72ae7be70c3126dc) ) ROM_END ROM_START( guitarst ) ROM_REGION( 0x800000, "maincpu", ROMREGION_ERASE00 ) ROM_LOAD16_WORD_SWAP( "guitarstar_s29gl064m11tfir4_0001227e.bin", 0x000000, 0x800000, CRC(feaace47) SHA1(dd426bb4f03a16b1b96b63b4e0d79ea75097bf72) ) ROM_END ROM_START( guitarstp ) ROM_REGION( 0x800000, "maincpu", ROMREGION_ERASE00 ) ROM_LOAD16_WORD_SWAP( "29gl064.u2", 0x000000, 0x800000, CRC(1dbcff73) SHA1(b179e4da6f38e7d5ec796bf846a63492d30eb0f5) ) ROM_END // These were all sold as different products, use a different sets of songs / presentation styles (2D or perspective gameplay, modified titlescreens etc.) // and sometimes even slightly different hardware, so aren't set as clones of each other // box title not confirmed, Guitar Rock on title screen, has Bon Jovi etc. CONS( 2007, taikeegr, 0, 0, taikeegrp, taikeegr, shredmjr_game_state, init_taikeegr, "TaiKee", "Guitar Rock (PAL)", MACHINE_IMPERFECT_TIMING | MACHINE_IMPERFECT_SOUND | MACHINE_IMPERFECT_GRAPHICS ) // timing not quite correct yet // Plug 'N' Play Rockstar Guitar on box, Guitar Rock on title screen, has Manic Street Preachers etc. CONS( 2007, rockstar, 0, 0, taikeegrp, taikeegr, shredmjr_game_state, init_taikeegr, "Ultimate Products / TaiKee", "Plug 'N' Play Rockstar Guitar / Guitar Rock (PAL)", MACHINE_IMPERFECT_TIMING | MACHINE_IMPERFECT_SOUND | MACHINE_IMPERFECT_GRAPHICS ) // timing not quite correct yet // dreamGEAR branded presentation, modified hardware (buttons read in a different way) same song seletion as taikeegr CONS( 2007, shredmjr, 0, 0, shredmjr, taikeegr, shredmjr_game_state, init_taikeegr, "dreamGEAR", "Shredmaster Jr (NTSC)", MACHINE_IMPERFECT_TIMING | MACHINE_IMPERFECT_SOUND | MACHINE_IMPERFECT_GRAPHICS ) // ^ // doesn't have a Senario logo ingame, but does on box. unique song selection CONS( 200?, guitarst, 0, 0, taikeegr, taikeegr, shredmjr_game_state, init_taikeegr, "Senario", "Guitar Star (US, Senario, NTSC)", MACHINE_IMPERFECT_TIMING | MACHINE_IMPERFECT_SOUND | MACHINE_IMPERFECT_GRAPHICS ) // ^ // This one has the same songs as 'rockstar' but different game style / presentation. // Unit found in Ireland "imported by Cathay Product Sourcing Ltd." on the box, with address in Ireland // ITEM #01109 on instruction sheet, no manufacturer named on either box or instructions CONS( 200?, guitarstp, 0, 0, taikeegrp, guitarstp,shredmjr_game_state, init_taikeegr, "<unknown>", "Guitar Star (Europe, PAL)", MACHINE_IMPERFECT_TIMING | MACHINE_IMPERFECT_SOUND | MACHINE_IMPERFECT_GRAPHICS ) // ^
8,330
4,294
// Copyright (c) 2016-2021 Daniel Frey and Dr. Colin Hirsch // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt) #include "../getenv.hpp" #include "../macros.hpp" #include <tao/pq/connection.hpp> #if defined( _WIN32 ) #include <winsock.h> #else #include <unistd.h> #endif std::size_t counter = 0; void handle_notification( const tao::pq::notification& n ) { std::cout << "channel '" << n.channel() << "' received '" << n.payload() << "'\n"; ++counter; } std::size_t foo_counter = 0; void handle_foo_notification( const char* payload ) { std::cout << "foo handler received '" << payload << "'\n"; ++foo_counter; } void run() { // overwrite the default with an environment variable if needed const auto connection_string = tao::pq::internal::getenv( "TAOPQ_TEST_DATABASE", "dbname=template1" ); const auto connection = tao::pq::connection::create( connection_string ); TEST_EXECUTE( connection->set_notification_handler( handle_notification ) ); TEST_EXECUTE( connection->listen( "FOO", handle_foo_notification ) ); TEST_ASSERT( counter == 0 ); TEST_ASSERT( foo_counter == 0 ); TEST_EXECUTE( connection->notify( "FOO" ) ); TEST_ASSERT( counter == 1 ); TEST_ASSERT( foo_counter == 1 ); TEST_ASSERT( connection->notification_handler( "FOO" ) ); TEST_ASSERT( !connection->notification_handler( "BAR" ) ); TEST_EXECUTE( connection->reset_notification_handler( "FOO" ) ); TEST_ASSERT( !connection->notification_handler( "FOO" ) ); TEST_EXECUTE( connection->notify( "FOO", "with payload" ) ); TEST_ASSERT( counter == 2 ); TEST_ASSERT( foo_counter == 1 ); TEST_EXECUTE( connection->unlisten( "FOO" ) ); TEST_EXECUTE( connection->notify( "FOO" ) ); TEST_EXECUTE( connection->get_notifications() ); TEST_ASSERT( counter == 2 ); TEST_ASSERT( connection->notification_handler() ); TEST_EXECUTE( connection->reset_notification_handler() ); TEST_ASSERT( !connection->notification_handler() ); #if defined( _WIN32 ) closesocket( PQsocket( connection->underlying_raw_ptr() ) ); #else close( PQsocket( connection->underlying_raw_ptr() ) ); #endif TEST_THROWS( connection->get_notifications() ); } auto main() -> int // NOLINT(bugprone-exception-escape) { try { run(); } // LCOV_EXCL_START catch( const std::exception& e ) { std::cerr << "exception: " << e.what() << std::endl; throw; } catch( ... ) { std::cerr << "unknown exception" << std::endl; throw; } // LCOV_EXCL_STOP }
2,625
943
/******************************************************************************* * Copyright 2020 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ #ifndef CPU_X64_LRN_JIT_AVX512_COMMON_LRN_BWD_NHWC_HPP #define CPU_X64_LRN_JIT_AVX512_COMMON_LRN_BWD_NHWC_HPP #include "cpu/x64/lrn/jit_avx512_common_lrn_bwd_base.hpp" #include "cpu/x64/lrn/jit_avx512_common_lrn_utils.hpp" namespace dnnl { namespace impl { namespace cpu { namespace x64 { namespace lrn { using namespace dnnl::impl::status; using namespace dnnl::impl::utils; using namespace data_type; using namespace Xbyak; using namespace Xbyak::util; template <data_type_t d_type> class jit_avx512_common_lrn_kernel_bwd_nhwc_t : public jit_avx512_common_lrn_kernel_bwd_t<d_type> { public: jit_avx512_common_lrn_kernel_bwd_nhwc_t(unsigned C, float alpha, float beta, int local_size, void *code_ptr = nullptr, size_t code_size = 1 * Xbyak::DEFAULT_MAX_CODE_SIZE); DECLARE_CPU_JIT_AUX_FUNCTIONS(jit_avx512_common_lrn_kernel_bwd_nhwc_t) private: void set_up_ker_params(); void execute_compute_loop(unsigned num_full_16c_blocks, unsigned C_tail); void compute_loop(across_version version, tail_mode tail_proc, unsigned C_tail = 0, int loop_size_param = 1); void compute(int loop_size_param, tail_mode tail_proc); void increment_loop_params(std::size_t offset); void load_compute_data( across_version version, tail_mode tail_proc, int loop_size_param); void store_compute_data( int loop_size_param, tail_mode tail_m, unsigned C_tail); void reserve_stack_space(std::size_t space); void unreserve_stack_space(std::size_t space); void load_data_to_stack( unsigned C_tail, across_version version, tail_mode tail_proc); int get_stack_offset(const Reg64 reg, tail_mode tail_proc); const std::vector<int> tmp_mask_prev_; const std::vector<int> tmp_mask_next_; static constexpr int zmm_size_ = 64; static constexpr int tmp_load_to_stack_idx_prev_ = 12; static constexpr int tmp_load_to_stack_idx_tail_ = 13; static constexpr int tmp_store_from_stack_idx_tail_ = 14; const Reg64 mask_ = r11; const Reg64 blockC_ = r12; const int half_ls_; }; } // namespace lrn } // namespace x64 } // namespace cpu } // namespace impl } // namespace dnnl #endif
2,955
1,046
#include <iostream> #include <algorithm> void rotate_image(int a[][4], int n) { for (int i = 0; i < n; i++) { std::reverse(a[i], a[i] + n); } //take transpose for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if(i<j) std::swap(a[i][j], a[j][i]); } } for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { std::cout << a[i][j] << " "; } std::cout << std::endl; } } int main() { int a[4][4] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { std::cout << a[i][j] << " "; } std::cout << std::endl; } std::cout << "Rotated matrix-" << std::endl; rotate_image(a, 4); return 0; }
875
396
/////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // // Copyright (c) 2018, The Regents of the University of California // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. /////////////////////////////////////////////////////////////////////////////// #include "stt/pdrev.h" #include "stt/LinesRenderer.h" #include "graph.h" #include "utl/Logger.h" namespace pdr { class Graph; using stt::Tree; using stt::Branch; using utl::PDR; class PdRev { public: PdRev(std::vector<int>& x, std::vector<int>& y, int root_index, Logger* logger); ~PdRev(); void runPD(float alpha); void runPDII(float alpha); Tree translateTree(); void graphLines(std::vector<std::pair<std::pair<int, int>, std::pair<int, int>>> &lines); void highlightGraph(); private: void replaceNode(Graph* graph, int originalNode); void transferChildren(int originalNode); void printTree(Tree fluteTree); Graph* graph_; Logger* logger_; }; Tree primDijkstra(std::vector<int>& x, std::vector<int>& y, int drvr_index, float alpha, Logger* logger) { pdr::PdRev pd(x, y, drvr_index, logger); pd.runPD(alpha); Tree tree = pd.translateTree(); //pd.highlightSteinerTree(tree); return tree; } Tree primDijkstraRevII(std::vector<int>& x, std::vector<int>& y, int drvr_index, float alpha, Logger* logger) { // pdrev fails with non-zero root index despite showing signs of supporting it. std::vector<int> x1(x); std::vector<int> y1(y); // Move driver to pole position until drvr_index arg works. std::swap(x1[0], x1[drvr_index]); std::swap(y1[0], y1[drvr_index]); drvr_index = 0; pdr::PdRev pd(x1, y1, drvr_index, logger); pd.runPDII(alpha); Tree tree = pd.translateTree(); return tree; } PdRev::PdRev(std::vector<int>& x, std::vector<int>& y, int root_index, Logger* logger) : logger_(logger) { graph_ = new Graph(x, y, root_index, logger_); } PdRev::~PdRev() { delete graph_; } void PdRev::runPD(float alpha) { graph_->buildNearestNeighborsForSPT(); graph_->run_PD_brute_force(alpha); graph_->doSteiner_HoVW(); // The following slightly improves wire length but the cost is the use // of absolutely horrid unreliable code. //graph_->fix_max_dc(); } void PdRev::runPDII(float alpha) { graph_->buildNearestNeighborsForSPT(); graph_->PDBU_new_NN(alpha); graph_->doSteiner_HoVW(); graph_->fix_max_dc(); } void PdRev::replaceNode(Graph* tree, int originalNode) { std::vector<Node>& nodes = tree->nodes; Node& node = nodes[originalNode]; int nodeParent = node.parent; std::vector<int>& nodeChildren = node.children; int newNode = tree->nodes.size(); Node newSP(newNode, node.x, node.y); // Replace parent in old node children // Add children to new node for (int child : nodeChildren) { tree->replaceParent(tree->nodes[child], originalNode, newNode); tree->addChild(newSP, child); } // Delete children from old node nodeChildren.clear(); // Set new node as old node's parent node.parent = newNode; // Set new node parent if (nodeParent != originalNode) { newSP.parent = nodeParent; // Replace child in parent tree->replaceChild(tree->nodes[nodeParent], originalNode, newNode); } else newSP.parent = newNode; // Add old node as new node's child tree->addChild(newSP, originalNode); nodes.push_back(newSP); } void PdRev::transferChildren(int originalNode) { std::vector<Node>& nodes = graph_->nodes; Node& node = nodes[originalNode]; std::vector<int> nodeChildren = node.children; int newNode = nodes.size(); Node newSP(newNode, node.x, node.y); // Replace parent in old node children // Add children to new node int count = 0; node.children.clear(); for (int child : nodeChildren) { if (count < 2) { graph_->replaceParent(nodes[child], originalNode, newNode); graph_->addChild(newSP, child); } else { graph_->addChild(node, child); } count++; } newSP.parent = originalNode; graph_->addChild(node, newNode); nodes.push_back(newSP); } Tree PdRev::translateTree() { if (graph_->num_terminals > 2) { for (int i = 0; i < graph_->num_terminals; ++i) { Node& node = graph_->nodes[i]; if (!(node.children.empty() || (node.parent == i // is root node && node.children.size() == 1 && node.children[0] >= graph_->num_terminals))) { replaceNode(graph_, i); } } int nNodes = graph_->nodes.size(); for (int i = graph_->num_terminals; i < nNodes; ++i) { while (graph_->nodes[i].children.size() > 3 || (graph_->nodes[i].parent != i && graph_->nodes[i].children.size() == 3)) { transferChildren(i); } } graph_->RemoveSTNodes(); } Tree tree; int num_terminals = graph_->num_terminals; tree.deg = num_terminals; if (num_terminals < 2) { tree.branch.resize(0); tree.length = 0; } else { int branch_count = tree.branchCount(); tree.branch.resize(branch_count); tree.length = graph_->calc_tree_wl_pd(); if (graph_->nodes.size() != branch_count) logger_->error(PDR, 666, "steiner branch count inconsistent"); for (int i = 0; i < graph_->nodes.size(); ++i) { Node& child = graph_->nodes[i]; int parent = child.parent; if (parent >= graph_->nodes.size()) logger_->error(PDR, 667, "steiner branch node out of bounds"); Branch& newBranch = tree.branch[i]; newBranch.x = child.x; newBranch.y = child.y; newBranch.n = parent; } } return tree; } void PdRev::graphLines(std::vector<std::pair<std::pair<int, int>, std::pair<int, int>>> &lines) { vector<Node> &nodes = graph_->nodes; for (Node &node : nodes) { Node &parent = nodes[node.parent]; std::pair<int, int> node_xy(node.x, node.y); std::pair<int, int> parent_xy(parent.x, parent.y); std::pair<std::pair<int, int>, std::pair<int, int>> line(node_xy, parent_xy); lines.push_back(line); } } // Useful for generating regression data. void reportXY(std::vector<int> x, std::vector<int> y, Logger* logger) { for (int i = 0; i < x.size(); i++) logger->report("\\{p{} {} {}\\}", i, x[i], y[i]); } void PdRev::highlightGraph() { gui::Gui *gui = gui::Gui::get(); if (gui) { if (stt::LinesRenderer::lines_renderer == nullptr) { stt::LinesRenderer::lines_renderer = new stt::LinesRenderer(); gui->registerRenderer(stt::LinesRenderer::lines_renderer); } std::vector<std::pair<std::pair<int, int>, std::pair<int, int>>> xy_lines; graphLines(xy_lines); std::vector<std::pair<odb::Point, odb::Point>> lines; for (int i = 0; i < xy_lines.size(); i++) { std::pair<int, int> xy1 = xy_lines[i].first; std::pair<int, int> xy2 = xy_lines[i].second; lines.push_back(std::pair(odb::Point(xy1.first, xy1.second), odb::Point(xy2.first, xy2.second))); } stt::LinesRenderer::lines_renderer->highlight(lines, gui::Painter::red); } } } // namespace
8,748
3,083
#include <cstdio> #include <algorithm> #include <cstring> #include <queue> using namespace std; int flag, n; void dfs(unsigned long long x, int k){ if(flag) return; if(x % n == 0){ printf("%llu\n", x); flag = 1; return; } if(k == 19) return; dfs(x * 10, k + 1); dfs(x * 10 + 1, k + 1); } int main(){ while(~scanf("%d", &n) && n){ flag = 0; dfs(1, 0); } return 0; }
392
204
#ifdef CH_LANG_CC /* * _______ __ * / ___/ / ___ __ _ / / ___ * / /__/ _ \/ _ \/ V \/ _ \/ _ \ * \___/_//_/\___/_/_/_/_.__/\___/ * Please refer to Copyright.txt, in Chombo's root directory. */ #endif #include "QuadCFInterp.H" #include "BoxIterator.H" #include "QuadCFInterpF_F.H" #include "LayoutIterator.H" #include "DataIterator.H" #include "CH_Timer.H" #include "CFIVS.H" #include "TensorCFInterp.H" #include "NamespaceHeader.H" using std::endl; /***********************/ // default constructor /***********************/ bool QuadCFInterp::newCFInterMode = true; /**/ void QuadCFInterp:: interpPhiOnIVS(LevelData<FArrayBox>& a_phif, const FArrayBox& a_phistar, const DataIndex& a_datInd, const int a_idir, const Side::LoHiSide a_hiorlo, const IntVectSet& a_interpIVS, Real a_dxLevel, Real a_dxCrse, int a_ncomp) { IVSIterator fine_ivsit(a_interpIVS); FArrayBox& a_phi = a_phif[a_datInd]; Real x1 = a_dxLevel; Real x2 = 0.5*(3.*a_dxLevel+a_dxCrse); Real denom = 1.0-((x1+x2)/x1); Real idenom = 1.0/(denom); // divide is more expensive usually Real x = 2.*a_dxLevel; Real xsquared = x*x; Real m1 = 1/(x1*x1); Real m2 = 1/(x1*(x1-x2)); Real q1 = 1/(x1-x2); Real q2 = x1+x2; int ihilo = sign(a_hiorlo); IntVect ai = -2*ihilo*BASISV(a_idir); IntVect bi = - ihilo*BASISV(a_idir); IntVect ci = ihilo*BASISV(a_idir); for (fine_ivsit.begin(); fine_ivsit.ok(); ++fine_ivsit) { IntVect ivf = fine_ivsit(); // quadratic interpolation for (int ivar = 0; ivar < a_ncomp; ivar++) { Real pa = a_phi(ivf + ai, ivar); Real pb = a_phi(ivf + bi, ivar); Real pc = a_phistar(ivf + ci, ivar); //phi = ax**2 + bx + c, x = 0 at pa Real a = (pb-pa)*m1 - (pb-pc)*m2; a *= idenom; Real b = (pb-pc)*q1 - a*q2; Real c = pa; a_phi(ivf,ivar) = a*xsquared + b*x + c; } //end loop over components } //end loop over fine intvects } /**/ void QuadCFInterp:: homogeneousCFInterpPhi(LevelData<FArrayBox>& a_phif, const DataIndex& a_datInd, int a_idir, Side::LoHiSide a_hiorlo, LayoutData<CFIVS> a_loCFIVS[SpaceDim], LayoutData<CFIVS> a_hiCFIVS[SpaceDim], Real a_dxLevel, Real a_dxCrse, int a_ncomp) { const CFIVS* cfivs_ptr = NULL; if (a_hiorlo == Side::Lo) cfivs_ptr = &a_loCFIVS[a_idir][a_datInd]; else cfivs_ptr = &a_hiCFIVS[a_idir][a_datInd]; const IntVectSet& interp_ivs = cfivs_ptr->getFineIVS(); if (!interp_ivs.isEmpty()) { int ihilo = sign(a_hiorlo); Box phistarbox = interp_ivs.minBox(); phistarbox.shift(a_idir, ihilo); FArrayBox phistar(phistarbox, a_ncomp); //hence the homogeneous... phistar.setVal(0.); //given phistar, interpolate on fine ivs to fill ghost cells for phi interpPhiOnIVS(a_phif, phistar, a_datInd, a_idir, a_hiorlo, interp_ivs, a_dxLevel, a_dxCrse, a_ncomp); } } /**/ void QuadCFInterp:: homogeneousCFInterpTanGrad(LevelData<FArrayBox>& a_tanGrad, const LevelData<FArrayBox>& a_phi, const DataIndex& a_DatInd, int a_idir, Side::LoHiSide a_hiorlo, Real a_dxLevel, Real a_dxCrse, int a_ncomp, LayoutData<TensorFineStencilSet> a_loTanStencilSets[SpaceDim], LayoutData<TensorFineStencilSet> a_hiTanStencilSets[SpaceDim]) { const TensorFineStencilSet* cfstencil_ptr = NULL; if (a_hiorlo == Side::Lo) cfstencil_ptr = &a_loTanStencilSets[a_idir][a_DatInd]; else cfstencil_ptr = &a_hiTanStencilSets[a_idir][a_DatInd]; Real x1 = a_dxLevel; Real x2 = 0.5*(3.*a_dxLevel+a_dxCrse); Real denom = 1.0-((x1+x2)/x1); Real idenom = 1.0/(denom); // divide is more expensive usually Real x = 2.*a_dxLevel; Real xsquared = x*x; Real m1 = 1/(x1*x1); Real m2 = 1/(x1*(x1-x2)); Real q1 = 1/(x1-x2); Real q2 = x1+x2; const FArrayBox& phi = a_phi[a_DatInd]; FArrayBox& tanGrad = a_tanGrad[a_DatInd]; // loop over gradient directions for (int gradDir = 0; gradDir<SpaceDim; gradDir++) { if (gradDir != a_idir) { // first do centered stencil const IntVectSet& centeredIVS = cfstencil_ptr->getCenteredStencilSet(gradDir); int ihilo = sign(a_hiorlo); if (!centeredIVS.isEmpty()) { // do centered computation IVSIterator cntrd_ivs(centeredIVS); // want to average fine-grid gradient with coarse // grid gradient, which is 0 (which is where the // extra factor of one-half comes from) Real gradMult = (0.5/a_dxLevel); for (cntrd_ivs.begin(); cntrd_ivs.ok(); ++cntrd_ivs) { IntVect ivf = cntrd_ivs(); IntVect finePhiLoc = ivf - ihilo*BASISV(a_idir); IntVect finePhiLoc2 = finePhiLoc - ihilo*BASISV(a_idir); // loop over variables for (int ivar = 0; ivar<a_phi.nComp(); ivar++) { Real fineHi = phi(finePhiLoc2+BASISV(gradDir),ivar); Real fineLo = phi(finePhiLoc2-BASISV(gradDir),ivar); Real fineGrada = gradMult*(fineHi-fineLo); fineHi = phi(finePhiLoc+BASISV(gradDir),ivar); fineLo = phi(finePhiLoc-BASISV(gradDir),ivar); Real fineGradb = gradMult*(fineHi-fineLo); // homogeneous interp implies that gradc is 0 Real gradc = 0; int gradComp = TensorCFInterp::gradIndex(ivar,gradDir); Real a = (fineGradb-fineGrada)*m1 - (fineGradb-gradc)*m2; a *= idenom; Real b = (fineGradb-gradc)*q1 - a*q2; Real c = fineGrada; tanGrad(ivf,gradComp) = a*xsquared + b*x + c; } } // end loop over centered difference cells } // end if there are centered cells // now do forward-difference cells const IntVectSet& forwardIVS = cfstencil_ptr->getForwardStencilSet(gradDir); if (!forwardIVS.isEmpty()) { // do forward-difference computations IVSIterator fwd_ivs(forwardIVS); // set up multipliers for gradient; since we want to average // fine-grid gradient with coarse-grid gradient (which is 0), // include an extra factor of one-half here. Real mult0 = -1.5/a_dxLevel; Real mult1 = 2.0/a_dxLevel; Real mult2 = -0.5/a_dxLevel; for (fwd_ivs.begin(); fwd_ivs.ok(); ++fwd_ivs) { IntVect ivf = fwd_ivs(); IntVect finePhiLoc = ivf - ihilo*BASISV(a_idir); IntVect finePhiLoc2 = finePhiLoc - ihilo*BASISV(a_idir); //now loop overvariables for (int var= 0; var<a_phi.nComp(); var++) { Real fine0 = phi(finePhiLoc2,var); Real fine1 = phi(finePhiLoc2+BASISV(gradDir),var); Real fine2 = phi(finePhiLoc2+2*BASISV(gradDir),var); Real fineGrada = mult0*fine0 +mult1*fine1 +mult2*fine2; fine0 = phi(finePhiLoc,var); fine1 = phi(finePhiLoc+BASISV(gradDir),var); fine2 = phi(finePhiLoc+2*BASISV(gradDir),var); Real fineGradb = mult0*fine0 +mult1*fine1 +mult2*fine2; Real gradc = 0.0; int gradComp = TensorCFInterp::gradIndex(var,gradDir); // now compute gradient Real a = (fineGradb-fineGrada)*m1 - (fineGradb-gradc)*m2; a *= idenom; Real b = (fineGradb-gradc)*q1 - a*q2; Real c = fineGrada; tanGrad(ivf,gradComp) = a*xsquared + b*x + c; } // end loop over variables } // end loop over forward-difference locations } // end if there are forward-difference cells // now do backward-difference cells const IntVectSet& backwardIVS = cfstencil_ptr->getBackwardStencilSet(gradDir); if (!backwardIVS.isEmpty()) { IVSIterator back_ivs(backwardIVS); // set up multipliers for gradient -- since we want to average // fine-grid gradient with coarse-grid gradient (which is 0), // include an extra factor of one-half here. Real mult0 = -1.5/a_dxLevel; Real mult1 = 2.0/a_dxLevel; Real mult2 = -0.5/a_dxLevel; for (back_ivs.begin(); back_ivs.ok(); ++back_ivs) { IntVect ivf = back_ivs(); IntVect finePhiLoc = ivf - ihilo*BASISV(a_idir); IntVect finePhiLoc2 = finePhiLoc - ihilo*BASISV(a_idir); // now loop over variables for (int var=0; var<a_phi.nComp(); var++) { Real fine0 = phi(finePhiLoc2,var); Real fine1 = phi(finePhiLoc2-BASISV(gradDir),var); Real fine2 = phi(finePhiLoc2-2*BASISV(gradDir),var); Real fineGrada = mult0*fine0 +mult1*fine1 +mult2*fine2; fine0 = phi(finePhiLoc,var); fine1 = phi(finePhiLoc-BASISV(gradDir),var); fine2 = phi(finePhiLoc-2*BASISV(gradDir),var); Real fineGradb = mult0*fine0 +mult1*fine1 +mult2*fine2; Real gradc = 0.0; int gradComp = TensorCFInterp::gradIndex(var,gradDir); Real a = (fineGradb-fineGrada)*m1 - (fineGradb-gradc)*m2; a *= idenom; Real b = (fineGradb-gradc)*q1 - a*q2; Real c = fineGrada; tanGrad(ivf,gradComp) = a*xsquared + b*x + c; } // end loop over variables } // end loop over backward-difference cells } // end if there are backward-difference cells } // end if gradDir is a tangential direction } // end loop over gradient directions } /***********************/ // does homogeneous coarse/fine interpolation /***********************/ void QuadCFInterp:: homogeneousCFInterp(LevelData<FArrayBox>& a_phif, LevelData<FArrayBox>& a_tanGrad, LayoutData<CFIVS> a_loCFIVS[SpaceDim], LayoutData<CFIVS> a_hiCFIVS[SpaceDim], Real a_dxLevel, Real a_dxCrse, int a_ncomp, LayoutData<TensorFineStencilSet> a_loTanStencilSets[SpaceDim], LayoutData<TensorFineStencilSet> a_hiTanStencilSets[SpaceDim]) { // need to do this to be sure that tangential derivatives are computed // correctly a_phif.exchange(a_phif.interval()); DataIterator dit = a_phif.dataIterator(); for (dit.begin(); dit.ok(); ++dit) { const DataIndex& datInd = dit(); // first fill in cells for phi for (int idir = 0; idir < SpaceDim; idir++) { SideIterator sit; for (sit.begin(); sit.ok(); sit.next()) { homogeneousCFInterpPhi(a_phif,datInd,idir,sit(), a_loCFIVS, a_hiCFIVS, a_dxLevel, a_dxCrse, a_ncomp); } } // now fill in tangential gradient cells for (int idir = 0; idir<SpaceDim; idir++) { SideIterator sit; for (sit.begin(); sit.ok(); sit.next()) { homogeneousCFInterpTanGrad(a_tanGrad, a_phif, datInd,idir,sit(), a_dxLevel, a_dxCrse, a_ncomp, a_loTanStencilSets, a_hiTanStencilSets); } } } } void QuadCFInterp::clear() { m_isDefined = false; m_level = -1; m_dxFine = -1; } QuadCFInterp::QuadCFInterp() { clear(); } /***********************/ /***********************/ QuadCFInterp::QuadCFInterp( const DisjointBoxLayout& a_fineBoxes, const DisjointBoxLayout* a_coarBoxes, Real a_dxFine, int a_refRatio, int a_nComp, const Box& a_domf) { ProblemDomain fineProbDomain(a_domf); define(a_fineBoxes,a_coarBoxes, a_dxFine,a_refRatio,a_nComp, fineProbDomain); } /***********************/ /***********************/ QuadCFInterp::QuadCFInterp( const DisjointBoxLayout& a_fineBoxes, const DisjointBoxLayout* a_coarBoxes, Real a_dxFine, int a_refRatio, int a_nComp, const ProblemDomain& a_domf) { define(a_fineBoxes,a_coarBoxes, a_dxFine,a_refRatio,a_nComp, a_domf); } /***********************/ /***********************/ void QuadCFInterp::define( const DisjointBoxLayout& a_fineBoxes, const DisjointBoxLayout* a_coarBoxesPtr, Real a_dxLevel, int a_refRatio, int a_nComp, const ProblemDomain& a_domf) { CH_TIME("QuadCFInterp::define"); clear(); m_isDefined = true; CH_assert(a_nComp > 0); CH_assert (!a_domf.isEmpty()); // consistency check CH_assert (a_fineBoxes.checkPeriodic(a_domf)); m_domainFine = a_domf; m_dxFine = a_dxLevel; m_refRatio = a_refRatio; m_nComp = a_nComp; m_inputFineLayout = a_fineBoxes; bool fineCoversCoarse = false; if (a_coarBoxesPtr != NULL) { int factor = D_TERM6(a_refRatio, *a_refRatio, *a_refRatio, *a_refRatio, *a_refRatio, *a_refRatio); long long numPts = a_fineBoxes.numCells()/factor; numPts -= a_coarBoxesPtr->numCells(); if (numPts == 0) fineCoversCoarse = true; } m_fineCoversCoarse = fineCoversCoarse; if (a_coarBoxesPtr == NULL || fineCoversCoarse) m_level = 0; else m_level = 1; if (m_level > 0) { // (DFM) only check for valid refRatio if a coarser level exists CH_assert(a_refRatio >= 1); const DisjointBoxLayout& coarBoxes = *a_coarBoxesPtr; m_inputCoarLayout = coarBoxes; CH_assert (coarBoxes.checkPeriodic(coarsen(a_domf,a_refRatio))); for (int idir = 0; idir < SpaceDim; idir++) { m_loQCFS[idir].define(a_fineBoxes); m_hiQCFS[idir].define(a_fineBoxes); } //locoarboxes and hicoarboxes are now open //and have same processor mapping as a_fineboxes //make boxes for coarse buffers m_coarBoxes.deepCopy(a_fineBoxes); m_coarBoxes.coarsen(m_refRatio); m_coarBoxes.grow(2); m_coarBoxes.close(); m_coarBuffer.define(m_coarBoxes, m_nComp); m_copier.define(coarBoxes, m_coarBoxes); if (!newCFInterMode) //old n^2 algorithm (bvs) { //make cfstencils and boxes for coarse buffers DataIterator dit = a_fineBoxes.dataIterator(); for (dit.begin(); dit.ok(); ++dit) { const Box& fineBox = a_fineBoxes[dit()]; for (int idir = 0; idir < SpaceDim; idir++) { //low side cfstencil m_loQCFS[idir][dit()].define(a_domf, fineBox, a_fineBoxes, coarBoxes, a_refRatio, idir, Side::Lo); //high side cfstencil m_hiQCFS[idir][dit()].define(a_domf, fineBox, a_fineBoxes, coarBoxes, a_refRatio, idir, Side::Hi); } } } else { //new "moving window" version of CF stencil building Vector<Box> periodicFine; CFStencil::buildPeriodicVector(periodicFine, a_domf, a_fineBoxes); Vector<Box> coarsenedFine(periodicFine); for (int i=0; i<coarsenedFine.size(); ++i) { coarsenedFine[i].coarsen(a_refRatio); } DataIterator dit = a_fineBoxes.dataIterator(); for (dit.begin(); dit.ok(); ++dit) { const Box& fineBox = a_fineBoxes[dit()]; for (int idir = 0; idir < SpaceDim; idir++) { //low side cfstencil m_loQCFS[idir][dit()].define(a_domf, fineBox, periodicFine, coarsenedFine, coarBoxes, a_refRatio, idir, Side::Lo); //high side cfstencil m_hiQCFS[idir][dit()].define(a_domf, fineBox, periodicFine, coarsenedFine, coarBoxes, a_refRatio, idir, Side::Hi); } } } } } /***********************/ // apply coarse-fine boundary conditions -- assume that phi grids // are grown by one /***********************/ void QuadCFInterp::coarseFineInterp(BaseFab<Real> & a_phif, const BaseFab<Real> & a_phic, const QuadCFStencil& a_qcfs, const Side::LoHiSide a_hiorlo, const int a_idir, const Interval& a_variables) const { CH_TIME("QuadCFInterp::coarseFineInterp(BaseFab<Real> & a_phif,...)"); CH_assert(isDefined()); //nothing happens if m_level == 0 if (m_level > 0) { if (!a_qcfs.isEmpty()) { BaseFab<Real> phistar; //first find extended value phistar //includes finding slopes of coarse solution bar getPhiStar(phistar, a_phic, a_qcfs, a_hiorlo, a_idir, a_variables); //given phistar, interpolate on fine ivs interpOnIVS(a_phif, phistar, a_qcfs, a_hiorlo, a_idir, a_variables); } } } /***********************/ //get extended phi (lives next to interpivs) /***********************/ void QuadCFInterp::getPhiStar(BaseFab<Real> & a_phistar, const BaseFab<Real> & a_phic, const QuadCFStencil& a_qcfs, const Side::LoHiSide a_hiorlo, const int a_idir, const Interval& a_variables) const { CH_TIMERS("QuadCFInterp::getPhiStar"); //CH_TIMER("QuadCFInterp::computeFirstDerivative", t1st); //CH_TIMER("QuadCFInterp::computesecondDerivative", t2nd); //CH_TIMER("QuadCFInterp::computemixedDerivative", tmixed); CH_TIMER("QuadCFInterp::slopes", tslopes); CH_TIMER("QuadCFInterp::notPacked", tnp); CH_TIMER("QuadCFInterp::preamble", tpreamble); CH_assert(isDefined()); CH_assert(a_qcfs.isDefined()); #if (CH_SPACEDIM > 1) Real dxf = m_dxFine; Real dxc = m_refRatio*dxf; #endif // if we think of a_idir as the "me" direction, then // the other directions can be "you1" and "you2" #if (CH_SPACEDIM == 3) int you1, you2; if (a_idir == 0) { you1 = 1; you2 = 2; } else if (a_idir == 1) { you1 = 0; you2 = 2; } else { you1 = 0; you2 = 1; } #else // (CH_SPACEDIM == 2) int you1; if (a_idir == 0) { you1 = 1; } else { you1 = 0; } #endif //if cfsten is empty, nothing to interpolate. if (!a_qcfs.isEmpty()) { CH_START(tpreamble); CH_assert(m_level > 0); const IntVectSet& interp_ivs = a_qcfs.getFineIVS(); const IntVectSet& coarsl_ivs = a_qcfs.getCoarIVS(); if (!coarsl_ivs.isDense()) { MayDay::Error("What the hell?? TreeIntVectSet ???"); } if (!interp_ivs.isDense()) { MayDay::Error("What the hell?? TreeIntVectSet ???"); } Box coarinterpbox = coarsl_ivs.minBox(); int ncomp = a_phic.nComp(); CH_assert(ncomp == m_nComp); CH_assert(a_phic.box().contains((coarinterpbox))); // allocate phistar here int ihilo = sign(a_hiorlo); Box phistarbox = interp_ivs.minBox(); phistarbox.shift(a_idir, ihilo); a_phistar.define(phistarbox, ncomp); CH_STOP(tpreamble); for (int ivar = a_variables.begin(); ivar <= a_variables.end(); ivar++) { CH_START(tslopes); //phi = phino + slope*x + half*x*x*curvature BaseFab<Real> coarslope(coarinterpbox, SpaceDim); BaseFab<Real> coarcurva(coarinterpbox, SpaceDim); #if (CH_SPACEDIM == 3) BaseFab<Real> coarmixed(coarinterpbox, 1); #endif // coarslope.setVal(0.); //first find extended value phistar. get slopes of coarse solution IVSIterator coar_ivsit(coarsl_ivs); for (coar_ivsit.begin(); coar_ivsit.ok(); ++coar_ivsit) { // this isn't relevant for 1D #if (CH_SPACEDIM > 1) const IntVect& coariv = coar_ivsit(); // coarslope(coariv, a_idir) = 0.0; // coarcurva(coariv, a_idir) = 0.0; coarslope(coariv, you1) = a_qcfs.computeFirstDerivative (a_phic, you1, ivar, coariv, dxc); coarcurva(coariv, you1) = a_qcfs.computeSecondDerivative(a_phic, you1, ivar, coariv, dxc); #endif #if (CH_SPACEDIM == 3) coarslope(coariv, you2) = a_qcfs.computeFirstDerivative (a_phic, you2, ivar, coariv, dxc); coarcurva(coariv, you2) = a_qcfs.computeSecondDerivative(a_phic, you2, ivar, coariv, dxc); coarmixed(coariv) = a_qcfs.computeMixedDerivative(a_phic, ivar, coariv, dxc); #endif } //end loop over coarse intvects CH_STOP(tslopes); if (a_qcfs.finePacked() && CH_SPACEDIM==3) { const IntVect& iv = phistarbox.smallEnd(); IntVect civ(iv); civ.coarsen(m_refRatio); Box region = a_qcfs.packedBox(); #if (CH_SPACEDIM == 3) FORT_PHISTAR(CHF_FRA_SHIFT(a_phistar, iv), CHF_BOX_SHIFT(region, iv), CHF_CONST_FRA_SHIFT(a_phic, civ), CHF_FRA_SHIFT(coarslope, civ), CHF_FRA_SHIFT(coarcurva, civ), CHF_FRA_SHIFT(coarmixed, civ), CHF_CONST_REAL(dxf), CHF_CONST_INT(ivar), CHF_CONST_INT(a_idir), CHF_CONST_INT(ihilo), CHF_CONST_INT(m_refRatio)); #endif } else { CH_START(tnp); IntVect ivf, ivc, ivstar; // ifdef is here to prevent unused variable wasrnings in 1D #if (CH_SPACEDIM > 1) int jf, jc; Real xf, xc, x1; #endif Real pc, update1=0, update2=0, update3=0; IVSIterator fine_ivsit(interp_ivs); for (fine_ivsit.begin(); fine_ivsit.ok(); ++fine_ivsit) { ivf = fine_ivsit(); ivc = coarsen(ivf, m_refRatio); ivstar = ivf; ivstar.shift(a_idir, ihilo); pc = a_phic(ivc,ivar); // for 1D, none of this is necessary -- just copy // coarse value into phiStar #if (CH_SPACEDIM > 1) jf = ivf[you1]; jc = ivc[you1]; xf = (jf+0.5)*dxf; xc = (jc+0.5)*dxc; x1 = xf-xc; update1= x1*coarslope(ivc, you1) + 0.5*x1*x1*coarcurva(ivc, you1); #endif #if (CH_SPACEDIM==3) Real x2; jf = ivf[you2]; jc = ivc[you2]; xf = (jf+0.5)*dxf; xc = (jc+0.5)*dxc; x2 = xf-xc; update2 = x2*coarslope(ivc, you2) + 0.5*x2*x2*coarcurva(ivc, you2); //add in mixed derivative component update3 = x1*x2*coarmixed(ivc); #endif a_phistar(ivstar, ivar) = pc+update1+update2+update3; } //end loop over fine intvects CH_STOP(tnp); } // end if for not packed optimization }//end loop over variables } //end if (level>0 && !hocfs.isempty()) } //end function getphistar /***********************/ /***********************/ bool QuadCFInterp::isDefined() const { return m_isDefined; } void QuadCFInterp::interpOnIVS(BaseFab<Real> & a_phif, const BaseFab<Real> & a_phistar, const QuadCFStencil& a_qcfs, const Side::LoHiSide a_hiorlo, const int a_idir, const Interval& a_variables) const { CH_TIME("QuadCFInterp::interpOnIVS"); CH_assert(isDefined()); CH_assert(a_qcfs.isDefined()); //if cfsten is empty, nothing to interpolate. if (!a_qcfs.isEmpty()) { //if there IS something to interpolate, the level ident //had better be greater than zero. Otherwise a null //was sent in as coarse grids on construction CH_assert(m_level > 0); const IntVectSet& interp_ivs = a_qcfs.getFineIVS(); int ihilo = sign(a_hiorlo); int nref = m_refRatio; if (!a_qcfs.finePacked()) { IVSIterator fine_ivsit(interp_ivs); CH_assert(a_phistar.nComp() == a_phif.nComp()); CH_assert(a_phistar.nComp() == m_nComp); for (fine_ivsit.begin(); fine_ivsit.ok(); ++fine_ivsit) { IntVect ivf = fine_ivsit(); // quadratic interpolation for (int ivar = a_variables.begin(); ivar <= a_variables.end(); ivar++) { Real pa = a_phif (ivf -2*ihilo*BASISV(a_idir), ivar); Real pb = a_phif (ivf - ihilo*BASISV(a_idir), ivar); Real ps = a_phistar(ivf + ihilo*BASISV(a_idir), ivar); //phi = ax**2 + bx + c, x = 0 at pa Real h = m_dxFine; Real a = (2./h/h)*(2.*ps + pa*(nref+1.0) -pb*(nref+3.0))/ (nref*nref + 4*nref + 3.0); Real b = (pb-pa)/h - a*h; Real c = pa; Real x = 2.*h; a_phif (ivf,ivar) = a*x*x + b*x + c; } //end loop over components } //end loop over fine intvects } else { // data is packed, just call Fortran int b=a_variables.begin(); int e=a_variables.end(); FORT_QUADINTERP(CHF_FRA(a_phif), CHF_CONST_FRA(a_phistar), CHF_BOX(a_qcfs.packedBox()), CHF_CONST_INT(ihilo), CHF_CONST_REAL(m_dxFine), CHF_CONST_INT(a_idir), CHF_CONST_INT(b), CHF_CONST_INT(e), CHF_CONST_INT(nref)); } } //end if (level>0 && !oscfs.isempty()) } //end function interponivs /***********************/ // apply coarse-fine boundary conditions -- assume that phi grids // are grown by one /***********************/ void QuadCFInterp::coarseFineInterp(LevelData<FArrayBox>& a_phif, const LevelData<FArrayBox>& a_phic) { CH_TIME("QuadCFInterp::coarseFineInterp"); CH_assert(isDefined()); Interval variables = a_phic.interval(); if (m_level > 0) { CH_assert(a_phif.nComp() == m_nComp); CH_assert(a_phic.nComp() == m_nComp); CH_assert(a_phif.ghostVect() >= IntVect::Unit); CH_assert(a_phic.boxLayout() == m_inputCoarLayout); CH_assert(a_phif.boxLayout() == m_inputFineLayout); a_phic.copyTo(a_phic.interval(), m_coarBuffer, m_coarBuffer.interval(), m_copier); for (int idir = 0; idir < SpaceDim; idir++) { DataIterator ditFine = a_phif.dataIterator(); for (ditFine.begin(); ditFine.ok(); ++ditFine) { DataIndex datIndGlo =ditFine(); BaseFab<Real> & phif = a_phif[datIndGlo]; const BaseFab<Real> & phiC = m_coarBuffer[datIndGlo]; //lo side cfinterp //recall that buffers have fine processor mapping { const QuadCFStencil& loQCFS = m_loQCFS[idir][datIndGlo]; coarseFineInterp(phif, phiC ,loQCFS, Side::Lo, idir, variables); } //hi side cfinterp { const QuadCFStencil& hiQCFS = m_hiQCFS[idir][datIndGlo]; coarseFineInterp(phif, phiC, hiQCFS, Side::Hi, idir, variables); } }//end iteration over boxes in fine grid } //end iteration over directions } } /***********************/ /***********************/ QuadCFInterp::~QuadCFInterp() { clear(); } #include "NamespaceFooter.H"
31,004
10,732
// Copyright Oliver Kowalke 2014. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #ifndef MEMORIA_CONTEXT_FIXEDSIZE_H #define MEMORIA_CONTEXT_FIXEDSIZE_H #include <cstddef> #include <cstdlib> #include <new> #include <boost/assert.hpp> #include <boost/config.hpp> #include <memoria/context/detail/config.hpp> #include <memoria/context/stack_context.hpp> #include <memoria/context/stack_traits.hpp> #if defined(MEMORIA_CONTEXT_USE_MAP_STACK) extern "C" { #include <sys/mman.h> } #endif #if defined(BOOST_USE_VALGRIND) #include <valgrind/valgrind.h> #endif namespace memoria { namespace context { template< typename traitsT > class basic_fixedsize_stack { private: std::size_t size_; public: typedef traitsT traits_type; basic_fixedsize_stack( std::size_t size = traits_type::default_size() ) noexcept : size_( size) { } stack_context allocate() { #if defined(MEMORIA_CONTEXT_USE_MAP_STACK) void * vp = ::mmap( 0, size_, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_STACK, -1, 0); if ( vp == MAP_FAILED) { throw std::bad_alloc(); } #else void * vp = std::malloc( size_); if ( ! vp) { throw std::bad_alloc(); } #endif stack_context sctx; sctx.size = size_; sctx.sp = static_cast< char * >( vp) + sctx.size; #if defined(BOOST_USE_VALGRIND) sctx.valgrind_stack_id = VALGRIND_STACK_REGISTER( sctx.sp, vp); #endif return sctx; } void deallocate( stack_context & sctx) noexcept { BOOST_ASSERT( sctx.sp); #if defined(BOOST_USE_VALGRIND) VALGRIND_STACK_DEREGISTER( sctx.valgrind_stack_id); #endif void * vp = static_cast< char * >( sctx.sp) - sctx.size; #if defined(MEMORIA_CONTEXT_USE_MAP_STACK) ::munmap( vp, sctx.size); #else std::free( vp); #endif } }; typedef basic_fixedsize_stack< stack_traits > fixedsize_stack; # if ! defined(MEMORIA_USE_SEGMENTED_STACKS) typedef fixedsize_stack default_stack; # endif }} #endif // MEMORIA_CONTEXT_FIXEDSIZE_H
2,195
863
#pragma once #ifndef UITSL_CHRONO_TIMEGUARD_HPP_INCLUDE #define UITSL_CHRONO_TIMEGUARD_HPP_INCLUDE #include <chrono> namespace uitsl { template<typename DurationType, typename Clock=std::chrono::steady_clock> class TimeGuard { DurationType &dest; const std::chrono::time_point<Clock> start; public: explicit TimeGuard(DurationType &dest_) : dest{dest_} , start{Clock::now()} { ; } ~TimeGuard() { dest = std::chrono::duration_cast<DurationType>( Clock::now() - start ); } }; } // namespace uitsl #endif // #ifndef UITSL_CHRONO_TIMEGUARD_HPP_INCLUDE
579
245
#include "Utils.hpp" #define NOMINMAX #include <Windows.h> #include <stdio.h> #include <string> #include <vector> #include "../valve_sdk/csgostructs.hpp" #include "Math.hpp" HANDLE _out = NULL, _old_out = NULL; HANDLE _err = NULL, _old_err = NULL; HANDLE _in = NULL, _old_in = NULL; namespace Utils { std::vector<char> HexToBytes(const std::string& hex) { std::vector<char> res; for (auto i = 0u; i < hex.length(); i += 2) { std::string byteString = hex.substr(i, 2); char byte = (char)strtol(byteString.c_str(), NULL, 16); res.push_back(byte); } return res; } std::string BytesToString(unsigned char* data, int len) { constexpr char hexmap[] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; std::string res(len * 2, ' '); for (int i = 0; i < len; ++i) { res[2 * i] = hexmap[(data[i] & 0xF0) >> 4]; res[2 * i + 1] = hexmap[data[i] & 0x0F]; } return res; } std::vector<std::string> Split(const std::string& str, const char* delim) { std::vector<std::string> res; char* pTempStr = _strdup(str.c_str()); char* context = NULL; char* pWord = strtok_s(pTempStr, delim, &context); while (pWord != NULL) { res.push_back(pWord); pWord = strtok_s(NULL, delim, &context); } free(pTempStr); return res; } unsigned int FindInDataMap(datamap_t *pMap, const char *name) { while (pMap) { for (int i = 0; i<pMap->dataNumFields; i++) { if (pMap->dataDesc[i].fieldName == NULL) continue; if (strcmp(name, pMap->dataDesc[i].fieldName) == 0) return pMap->dataDesc[i].fieldOffset[TD_OFFSET_NORMAL]; if (pMap->dataDesc[i].fieldType == FIELD_EMBEDDED) { if (pMap->dataDesc[i].td) { unsigned int offset; if ((offset = FindInDataMap(pMap->dataDesc[i].td, name)) != 0) return offset; } } } pMap = pMap->baseMap; } return 0; } /* * @brief Create console * * Create and attach a console window to the current process */ void AttachConsole() { _old_out = GetStdHandle(STD_OUTPUT_HANDLE); _old_err = GetStdHandle(STD_ERROR_HANDLE); _old_in = GetStdHandle(STD_INPUT_HANDLE); ::AllocConsole() && ::AttachConsole(GetCurrentProcessId()); _out = GetStdHandle(STD_OUTPUT_HANDLE); _err = GetStdHandle(STD_ERROR_HANDLE); _in = GetStdHandle(STD_INPUT_HANDLE); SetConsoleMode(_out, ENABLE_PROCESSED_OUTPUT | ENABLE_WRAP_AT_EOL_OUTPUT); SetConsoleMode(_in, ENABLE_INSERT_MODE | ENABLE_EXTENDED_FLAGS | ENABLE_PROCESSED_INPUT | ENABLE_QUICK_EDIT_MODE); } /* * @brief Detach console * * Detach and destroy the attached console */ void DetachConsole() { if(_out && _err && _in) { FreeConsole(); if(_old_out) SetStdHandle(STD_OUTPUT_HANDLE, _old_out); if(_old_err) SetStdHandle(STD_ERROR_HANDLE, _old_err); if(_old_in) SetStdHandle(STD_INPUT_HANDLE, _old_in); } } /* * @brief Print to console * * Replacement to printf that works with the newly created console */ bool ConsolePrint(const char* fmt, ...) { if(!_out) return false; char buf[1024]; va_list va; va_start(va, fmt); _vsnprintf_s(buf, 1024, fmt, va); va_end(va); return !!WriteConsoleA(_out, buf, static_cast<DWORD>(strlen(buf)), nullptr, nullptr); } /* * @brief Blocks execution until a key is pressed on the console window * */ char ConsoleReadKey() { if(!_in) return false; auto key = char{ 0 }; auto keysread = DWORD{ 0 }; ReadConsoleA(_in, &key, 1, &keysread, nullptr); return key; } /* * @brief Wait for all the given modules to be loaded * * @param timeout How long to wait * @param modules List of modules to wait for * * @returns See WaitForSingleObject return values. */ int WaitForModules(std::int32_t timeout, const std::initializer_list<std::wstring>& modules) { bool signaled[32] = { 0 }; bool success = false; std::uint32_t totalSlept = 0; if(timeout == 0) { for(auto& mod : modules) { if(GetModuleHandleW(std::data(mod)) == NULL) return WAIT_TIMEOUT; } return WAIT_OBJECT_0; } if(timeout < 0) timeout = INT32_MAX; while(true) { for(auto i = 0u; i < modules.size(); ++i) { auto& module = *(modules.begin() + i); if(!signaled[i] && GetModuleHandleW(std::data(module)) != NULL) { signaled[i] = true; // // Checks if all modules are signaled // bool done = true; for(auto j = 0u; j < modules.size(); ++j) { if(!signaled[j]) { done = false; break; } } if(done) { success = true; goto exit; } } } if(totalSlept > std::uint32_t(timeout)) { break; } Sleep(10); totalSlept += 10; } exit: return success ? WAIT_OBJECT_0 : WAIT_TIMEOUT; } /* * @brief Scan for a given byte pattern on a module * * @param module Base of the module to search * @param signature IDA-style byte array pattern * * @returns Address of the first occurence */ std::uint8_t* PatternScan(void* module, const char* signature) { static auto pattern_to_byte = [](const char* pattern) { auto bytes = std::vector<int>{}; auto start = const_cast<char*>(pattern); auto end = const_cast<char*>(pattern) + strlen(pattern); for(auto current = start; current < end; ++current) { if(*current == '?') { ++current; if(*current == '?') ++current; bytes.push_back(-1); } else { bytes.push_back(strtoul(current, &current, 16)); } } return bytes; }; auto dosHeader = (PIMAGE_DOS_HEADER)module; auto ntHeaders = (PIMAGE_NT_HEADERS)((std::uint8_t*)module + dosHeader->e_lfanew); auto sizeOfImage = ntHeaders->OptionalHeader.SizeOfImage; auto patternBytes = pattern_to_byte(signature); auto scanBytes = reinterpret_cast<std::uint8_t*>(module); auto s = patternBytes.size(); auto d = patternBytes.data(); for(auto i = 0ul; i < sizeOfImage - s; ++i) { bool found = true; for(auto j = 0ul; j < s; ++j) { if(scanBytes[i + j] != d[j] && d[j] != -1) { found = false; break; } } if(found) { return &scanBytes[i]; } } return nullptr; } /* * @brief Set player clantag * * @param tag New clantag */ void SetClantag(const char* tag) { static auto fnClantagChanged = (int(__fastcall*)(const char*, const char*))PatternScan(GetModuleHandleW(L"engine.dll"), "53 56 57 8B DA 8B F9 FF 15"); fnClantagChanged(tag, tag); } /* * @brief Set player name * * @param name New name */ void SetName(const char* name) { static auto nameConvar = g_CVar->FindVar("name"); nameConvar->m_fnChangeCallbacks.m_Size = 0; // Fix so we can change names how many times we want // This code will only run once because of `static` static auto do_once = (nameConvar->SetValue("\n���"), true); nameConvar->SetValue(name); } }
8,241
2,866
#include <vector> #include <gtest/gtest.h> #include "../../main/recommender_data/ShuffleIterator.h" namespace { class TestShuffleIterator : public ::testing::Test { public: RecommenderData rd; TestShuffleIterator() { EXPECT_TRUE(rd.initialize()); } virtual ~TestShuffleIterator() { // You can do clean-up work that doesn't throw exceptions here. } void SetUp() override { } void TearDown() override { } RecDat createRecDat(int user, int item, double time, double score){ RecDat recDat; recDat.user=user; recDat.item=item; recDat.time=time; recDat.score=score; return recDat; } bool in(int element, vector<int> list){ //element is in list for(uint i=0; i<list.size(); i++){ if(list[i]==element) return true; } return false; } }; } TEST_F(TestShuffleIterator, size) { RecDats recData; recData.push_back(createRecDat(2,3,10.0,1.0)); recData.push_back(createRecDat(1,6,10.0,1.0)); recData.push_back(createRecDat(2,8,10.0,1.0)); rd.set_rec_data(recData); ShuffleIterator it(&rd, 123124); ASSERT_TRUE(it.initialize()); EXPECT_EQ(3,it.size()); } TEST_F(TestShuffleIterator, hasNext) { RecDats recData; recData.push_back(createRecDat(2,3,10.1,1.0)); recData.push_back(createRecDat(1,6,10.2,1.0)); recData.push_back(createRecDat(2,8,10.3,1.0)); rd.set_rec_data(recData); ShuffleIterator it(&rd, 1231212); ASSERT_TRUE(it.initialize()); EXPECT_EQ(3,it.size()); ASSERT_TRUE(it.has_next()); it.next(); ASSERT_TRUE(it.has_next()); it.next(); ASSERT_TRUE(it.has_next()); it.next(); EXPECT_FALSE(it.has_next()); } TEST_F(TestShuffleIterator, noshuffle_it) { RecDats recData; recData.push_back(createRecDat(2,3,10.1,1.0)); recData.push_back(createRecDat(1,6,10.2,1.0)); recData.push_back(createRecDat(2,8,10.3,1.0)); rd.set_rec_data(recData); ShuffleIterator it(&rd, 1231212); ASSERT_TRUE(it.initialize()); EXPECT_EQ(3,it.size()); ASSERT_TRUE(it.has_next()); RecDat* recDat = it.next(); EXPECT_EQ(3, recDat->item); ASSERT_TRUE(it.has_next()); recDat = it.next(); EXPECT_EQ(6, recDat->item); ASSERT_TRUE(it.has_next()); recDat = it.next(); EXPECT_EQ(8, recDat->item); EXPECT_FALSE(it.has_next()); } TEST_F(TestShuffleIterator, shuffle_it) { RecDats recData; recData.push_back(createRecDat(2,1,10.0,1.0)); recData.push_back(createRecDat(1,2,10.0,1.0)); recData.push_back(createRecDat(2,3,10.0,1.0)); recData.push_back(createRecDat(4,4,10.1,1.0)); recData.push_back(createRecDat(2,5,10.2,1.0)); recData.push_back(createRecDat(3,6,10.5,1.0)); recData.push_back(createRecDat(1,7,10.5,1.0)); recData.push_back(createRecDat(2,8,10.5,1.0)); recData.push_back(createRecDat(3,9,10.5,1.0)); rd.set_rec_data(recData); ShuffleIterator it(&rd, 1239); ASSERT_TRUE(it.initialize()); EXPECT_EQ(9,it.size()); vector<int> items; items.push_back(1); items.push_back(2); items.push_back(3); ASSERT_TRUE(it.has_next()); RecDat* recDat = it.next(); EXPECT_EQ(10.0, recDat->time); EXPECT_TRUE(in(recDat->item, items)); ASSERT_TRUE(it.has_next()); recDat = it.next(); EXPECT_EQ(10.0, recDat->time); EXPECT_TRUE(in(recDat->item, items)); EXPECT_NE(2, recDat->item); ASSERT_TRUE(it.has_next()); recDat = it.next(); EXPECT_EQ(10.0, recDat->time); EXPECT_TRUE(in(recDat->item, items)); EXPECT_NE(3, recDat->item); ASSERT_TRUE(it.has_next()); recDat = it.next(); EXPECT_EQ(4, recDat->item); ASSERT_TRUE(it.has_next()); recDat = it.next(); EXPECT_EQ(5, recDat->item); items.clear(); items.push_back(6);items.push_back(7);items.push_back(8);items.push_back(9); ASSERT_TRUE(it.has_next()); recDat = it.next(); EXPECT_EQ(10.5, recDat->time); EXPECT_TRUE(in(recDat->item, items)); EXPECT_NE(6, recDat->item); ASSERT_TRUE(it.has_next()); recDat = it.next(); EXPECT_EQ(10.5, recDat->time); EXPECT_TRUE(in(recDat->item, items)); EXPECT_NE(7, recDat->item); ASSERT_TRUE(it.has_next()); recDat = it.next(); EXPECT_EQ(10.5, recDat->time); EXPECT_TRUE(in(recDat->item, items)); EXPECT_NE(8, recDat->item); ASSERT_TRUE(it.has_next()); recDat = it.next(); EXPECT_EQ(10.5, recDat->time); EXPECT_TRUE(in(recDat->item, items)); EXPECT_NE(9, recDat->item); } TEST_F(TestShuffleIterator, noshuffle_get) { RecDats recData; recData.push_back(createRecDat(2,3,10.1,1.0)); recData.push_back(createRecDat(1,6,10.2,1.0)); recData.push_back(createRecDat(2,8,10.3,1.0)); rd.set_rec_data(recData); ShuffleIterator it(&rd, 12361887); ASSERT_TRUE(it.initialize()); EXPECT_EQ(3,it.size()); EXPECT_EQ(3, (it.get_future(0))->item); EXPECT_EQ(6, (it.get_future(1))->item); EXPECT_EQ(8, (it.get_future(2))->item); EXPECT_EQ(6, (it.get_future(1))->item); EXPECT_EQ(3, (it.get_future(0))->item); EXPECT_EQ(8, (it.get_future(2))->item); } TEST_F(TestShuffleIterator, reproducable) { RecDats recData; recData.push_back(createRecDat(2,1,10.0,1.0)); recData.push_back(createRecDat(1,2,10.0,1.0)); recData.push_back(createRecDat(2,3,10.0,1.0)); recData.push_back(createRecDat(4,4,10.0,1.0)); recData.push_back(createRecDat(2,5,10.0,1.0)); recData.push_back(createRecDat(3,6,10.0,1.0)); recData.push_back(createRecDat(1,7,10.5,1.0)); recData.push_back(createRecDat(2,8,10.5,1.0)); recData.push_back(createRecDat(3,9,10.5,1.0)); rd.set_rec_data(recData); ShuffleIterator it(&rd, 123124); ASSERT_TRUE(it.initialize()); ShuffleIterator it2(&rd, 123124); ASSERT_TRUE(it2.initialize()); EXPECT_EQ(it.get_future(0)->item,it2.get_future(0)->item); EXPECT_EQ(it.get_future(1)->item,it2.get_future(1)->item); EXPECT_EQ(it.get_future(2)->item,it2.get_future(2)->item); EXPECT_EQ(it.get_future(3)->item,it2.get_future(3)->item); EXPECT_EQ(it.get_future(4)->item,it2.get_future(4)->item); EXPECT_EQ(it.get_future(5)->item,it2.get_future(5)->item); EXPECT_EQ(it.get_future(6)->item,it2.get_future(6)->item); EXPECT_EQ(it.get_future(7)->item,it2.get_future(7)->item); EXPECT_EQ(it.get_future(8)->item,it2.get_future(8)->item); } //matrix function disabled, online recommender should not use it //TEST_F(TestShuffleIterator, matrix) { // RecDats recData; // recData.push_back(createRecDat(2,3,10.0,1.0)); // recData.push_back(createRecDat(1,6,10.0,1.0)); // recData.push_back(createRecDat(2,8,10.0,1.0)); // rd.set_rec_data(recData); // ShuffleIterator it(&rd, 1283761287); // EXPECT_EQ(rd.matrix(),it.matrix()); //} int main (int argc, char **argv) { testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
6,873
3,070
/* * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "Read.h" #include <sstream> #include <string> namespace tflread { // This will provide v3/v3a format neutral BuiltinOperator tflite::BuiltinOperator builtin_code_neutral(const tflite::OperatorCode *opcode) { assert(opcode != nullptr); int8_t dp_code = opcode->deprecated_builtin_code(); if (dp_code < 127 && dp_code >= 0) return tflite::BuiltinOperator(dp_code); return opcode->builtin_code(); } bool is_valid(const tflite::OperatorCode *opcode) { tflite::BuiltinOperator code = builtin_code_neutral(opcode); return (tflite::BuiltinOperator_MIN <= code && code <= tflite::BuiltinOperator_MAX); } bool is_custom(const tflite::OperatorCode *opcode) { tflite::BuiltinOperator code = builtin_code_neutral(opcode); return (code == tflite::BuiltinOperator_CUSTOM); } std::string opcode_name(const tflite::OperatorCode *opcode) { assert(opcode); if (!is_valid(opcode)) { std::ostringstream oss; oss << "(invalid)"; return oss.str(); } if (is_custom(opcode)) { if (!opcode->custom_code()) return "(invalid custom)"; std::string custom_op = "CUSTOM("; custom_op += opcode->custom_code()->c_str(); custom_op += ")"; return custom_op; } tflite::BuiltinOperator code = builtin_code_neutral(opcode); return tflite::EnumNameBuiltinOperator(code); } const char *tensor_type(const tflite::Tensor *tensor) { return tflite::EnumNameTensorType(tensor->type()); } const char *tensor_name(const tflite::Tensor *tensor) { static const char *kEmptyTensorName = "(noname)"; auto name = tensor->name(); if (name) return name->c_str(); return kEmptyTensorName; } Reader::Reader(const tflite::Model *model) { _version = model->version(); _subgraphs = model->subgraphs(); _buffers = model->buffers(); _metadata = model->metadata(); _signaturedefs = model->signature_defs(); auto opcodes = model->operator_codes(); for (const ::tflite::OperatorCode *opcode : *opcodes) { _op_codes.push_back(opcode); } } size_t Reader::buffer_info(uint32_t buf_idx, const uint8_t **buff_data) { *buff_data = nullptr; if (buf_idx == 0) return 0; if (auto *buffer = (*_buffers)[buf_idx]) { if (auto *array = buffer->data()) { if (size_t size = array->size()) { *buff_data = reinterpret_cast<const uint8_t *>(array->data()); return size; } } } return 0; } tflite::BuiltinOperator Reader::builtin_code(const tflite::Operator *op) const { uint32_t index = op->opcode_index(); assert(index < _op_codes.size()); const tflite::OperatorCode *opcode = _op_codes.at(index); return tflread::builtin_code_neutral(opcode); } std::string Reader::opcode_name(const tflite::Operator *op) const { uint32_t index = op->opcode_index(); assert(index < _op_codes.size()); const tflite::OperatorCode *opcode = _op_codes.at(index); if (!is_valid(opcode)) { std::ostringstream oss; oss << "(invalid: " << index << ")"; return oss.str(); } return tflread::opcode_name(opcode); } bool Reader::select_subgraph(uint32_t sgindex) { _subgraph_index = sgindex; _tensors = nullptr; _operators = nullptr; _inputs.clear(); _outputs.clear(); if (_subgraphs->Length() <= sgindex) { assert(false); return false; } const tflite::SubGraph *subgraph = (*_subgraphs)[sgindex]; auto name = subgraph->name(); _subgraph_name = name ? name->c_str() : "(noname)"; _tensors = subgraph->tensors(); _operators = subgraph->operators(); _inputs = as_index_vector(subgraph->inputs()); _outputs = as_index_vector(subgraph->outputs()); return true; } } // namespace tflread
4,301
1,577
// MIT License // // Copyright (c) 2021-2022. Seungwoo Kang // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // // project home: https://github.com/perfkitpp #include "ftxui/component/captured_mouse.hpp" // for ftxui #include "ftxui/component/component.hpp" // for Checkbox, Vertical #include "ftxui/component/screen_interactive.hpp" // for ScreenInteractive #include "perfkit/configs.h" #include "perfkit/ftxui-extension.hpp" #include "perfkit/traces.h" #include "spdlog/fmt/fmt.h" using namespace std::literals; std::map<std::string, std::map<std::string, std::string>> ved{ {"asd", {{"asd", "weqw"}, {"vafe, ewqew", "dwrew"}}}, {"vadsfew", {{"dav ,ea w", "Ewqsad"}, {"scxz ss", "dwqewqew"}}}}; PERFKIT_CATEGORY(cfg) { PERFKIT_CONFIGURE(active, true).confirm(); PERFKIT_CONFIGURE(active_async, true).confirm(); PERFKIT_SUBCATEGORY(labels) { PERFKIT_CONFIGURE(foo, 1).confirm(); PERFKIT_CONFIGURE(bar, false).confirm(); PERFKIT_CONFIGURE(ce, "ola ollalala").confirm(); PERFKIT_CONFIGURE(ced, std::vector({1, 2, 3, 4, 5, 6})).confirm(); PERFKIT_CONFIGURE(cedr, (std::map<std::string, int>{ {"fdf", 2}, {"erwe", 4}})) .confirm(); PERFKIT_CONFIGURE(bb, (std::map<std::string, bool>{ {"fdf", false}, {"erwe", true}})) .confirm(); PERFKIT_CONFIGURE(cedrs, 3.141592).confirm(); PERFKIT_CONFIGURE(cedrstt, std::move(ved)).confirm(); } PERFKIT_SUBCATEGORY(lomo) { PERFKIT_SUBCATEGORY(movdo) { PERFKIT_CONFIGURE(ce, 1).confirm(); PERFKIT_CONFIGURE(ced, 1).confirm(); PERFKIT_CONFIGURE(cedr, 1).confirm(); PERFKIT_SUBCATEGORY(cef) { } PERFKIT_SUBCATEGORY(ccra) { PERFKIT_CONFIGURE(foo, 1).confirm(); PERFKIT_CONFIGURE(bar, 1).confirm(); PERFKIT_CONFIGURE(ce, 1).confirm(); PERFKIT_CONFIGURE(ced, 1).confirm(); PERFKIT_CONFIGURE(cedr, 1).confirm(); PERFKIT_CONFIGURE(cedrs, 1).confirm(); PERFKIT_CONFIGURE(a_foo, 1).confirm(); PERFKIT_CONFIGURE(a_bar, 1).confirm(); PERFKIT_CONFIGURE(a_ce, 1).confirm(); PERFKIT_CONFIGURE(a_ced, 1).confirm(); PERFKIT_CONFIGURE(a_cedr, 1).confirm(); PERFKIT_CONFIGURE(a_cedrs, 1).confirm(); PERFKIT_CONFIGURE(b_foo, 1).confirm(); PERFKIT_CONFIGURE(b_bar, 1).confirm(); PERFKIT_CONFIGURE(b_ce, 1).confirm(); PERFKIT_CONFIGURE(b_ced, 1).confirm(); PERFKIT_CONFIGURE(b_cedr, 1).confirm(); PERFKIT_CONFIGURE(b_cedrs, 1).confirm(); PERFKIT_CONFIGURE(c_foo, 1).confirm(); PERFKIT_CONFIGURE(c_bar, 1).confirm(); PERFKIT_CONFIGURE(c_ce, 1).confirm(); PERFKIT_CONFIGURE(c_ced, 1).confirm(); PERFKIT_CONFIGURE(c_cedr, 1).confirm(); PERFKIT_CONFIGURE(c_cedrs, 1).confirm(); PERFKIT_CONFIGURE(d_foo, 1).confirm(); PERFKIT_CONFIGURE(d_bar, 1).confirm(); PERFKIT_CONFIGURE(d_ce, 1).confirm(); PERFKIT_CONFIGURE(d_ced, 1).confirm(); PERFKIT_CONFIGURE(d_cedr, 1).confirm(); PERFKIT_CONFIGURE(d_cedrs, 1).confirm(); } } } PERFKIT_CONFIGURE(foo, 1).confirm(); PERFKIT_CONFIGURE(bar, 1).confirm(); PERFKIT_CONFIGURE(ce, 1).confirm(); PERFKIT_CONFIGURE(ced, 1).confirm(); PERFKIT_CONFIGURE(cedr, 1).confirm(); PERFKIT_CONFIGURE(cedrs, 1).confirm(); PERFKIT_CONFIGURE(a_foo, 1).confirm(); PERFKIT_CONFIGURE(a_bar, 1).confirm(); PERFKIT_CONFIGURE(a_ce, 1).confirm(); PERFKIT_CONFIGURE(a_ced, 1).confirm(); PERFKIT_CONFIGURE(a_cedr, 1).confirm(); PERFKIT_CONFIGURE(a_cedrs, 1).confirm(); PERFKIT_CONFIGURE(b_foo, 1).confirm(); PERFKIT_CONFIGURE(b_bar, 1).confirm(); PERFKIT_CONFIGURE(b_ce, 1).confirm(); PERFKIT_CONFIGURE(b_ced, 1).confirm(); PERFKIT_CONFIGURE(b_cedr, 1).confirm(); PERFKIT_CONFIGURE(b_cedrs, 1).confirm(); PERFKIT_CONFIGURE(c_foo, 1).confirm(); PERFKIT_CONFIGURE(c_bar, 1).confirm(); PERFKIT_CONFIGURE(c_ce, 1).confirm(); PERFKIT_CONFIGURE(c_ced, 1).confirm(); PERFKIT_CONFIGURE(c_cedr, 1).confirm(); PERFKIT_CONFIGURE(c_cedrs, 1).confirm(); PERFKIT_CONFIGURE(d_foo, 1).confirm(); PERFKIT_CONFIGURE(d_bar, 1).confirm(); PERFKIT_CONFIGURE(d_ce, 1).confirm(); PERFKIT_CONFIGURE(d_ced, 1).confirm(); PERFKIT_CONFIGURE(d_cedr, 1).confirm(); PERFKIT_CONFIGURE(d_cedrs, 1).confirm(); PERFKIT_CONFIGURE(e_foo, 1).confirm(); PERFKIT_CONFIGURE(e_bar, 1).confirm(); PERFKIT_CONFIGURE(e_ce, 1).confirm(); PERFKIT_CONFIGURE(e_ced, 1).confirm(); PERFKIT_CONFIGURE(e_cedr, 1).confirm(); PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); } PERFKIT_CATEGORY(vlao) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); PERFKIT_CONFIGURE(e_cedrsd, "").confirm(); } PERFKIT_CATEGORY(vlao1) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); } PERFKIT_CATEGORY(vlao2) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); } PERFKIT_CATEGORY(vlao3) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); } PERFKIT_CATEGORY(vlao4) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); } PERFKIT_CATEGORY(vlao5) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); } PERFKIT_CATEGORY(vlao6) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); } PERFKIT_CATEGORY(vlao7) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); } PERFKIT_CATEGORY(vlao8) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); } PERFKIT_CATEGORY(vlao9) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); } PERFKIT_CATEGORY(vlao22) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); } PERFKIT_CATEGORY(vlao33) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); } PERFKIT_CATEGORY(vlao44) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); } PERFKIT_CATEGORY(vlao55) { PERFKIT_CONFIGURE(e_cedrs, 1).confirm(); } using namespace ftxui; perfkit::tracer traces[] = { {0, "root (1)"}, {1, "A (2)"}, {31, "B (4)"}, {-51, "C (0)"}, {14, "D (3)"}, }; class my_subscriber : public perfkit_ftxui::if_subscriber { public: bool on_update(update_param_type const& param_type, perfkit::trace_variant_type const& value) override { traces[1].fork("Value update A")["NAME"] = param_type.name; return true; } void on_end(update_param_type const& param_type) override { vlao::e_cedrs.async_modify(vlao::e_cedrs.get() + 1); vlao::e_cedrsd.async_modify(std::string(param_type.name)); vlao::registry().apply_update_and_check_if_dirty(); } }; int main(int argc, char const* argv[]) { auto screen = ScreenInteractive::Fullscreen(); std::shared_ptr<perfkit_ftxui::string_queue> commands; auto preset = perfkit_ftxui::PRESET(&commands, {}, std::make_shared<my_subscriber>()); auto kill_switch = perfkit_ftxui::launch_async_loop(&screen, preset); for (int ic = 0; perfkit_ftxui::is_alive(kill_switch.get()); ++ic) { std::this_thread::sleep_for(10ms); cfg::registry().apply_update_and_check_if_dirty(); auto trc_root = traces[0].fork("Root Trace"); auto timer = trc_root.timer("Some Timer"); trc_root["Value 0"] = 3; trc_root["Value 1"] = *cfg::labels::foo; trc_root["Value 2"] = fmt::format("Hell, world! {}", *cfg::labels::foo); trc_root["Value 3"] = false; trc_root["Value 3"]["Subvalue 0"] = ic; trc_root["Value 3"]["Subvalue GR"] = std::vector<int>{3, 4, 5}; trc_root["Value 3"]["Subvalue 1"] = double(ic); trc_root["Value 3"]["Subvalue 2"] = !!(ic & 1); trc_root["Value 4"]["Subvalue 3"] = fmt::format("Hell, world! {}", ic); auto r = trc_root["Value 5"]; trc_root["Value 5"]["Subvalue 0"] = ic; if (r) { trc_root["Value 5"]["Subvalue 1 Cond"] = double(ic); } trc_root["Value 5"]["Subvalue 2"] = !!(ic & 1); std::string to_get; if (commands->try_getline(to_get)) { trc_root["TEXT"] = to_get; } cfg::labels::foo.async_modify(cfg::labels::foo.get() + 1); if (cfg::active_async.get() == false) { kill_switch.reset(); break; } } return 0; } // Copyright 2020 Arthur Sonzogni. All rights reserved. // Use of this source code is governed by the MIT license that can be found in // the LICENSE file.
9,916
3,925
/** * Created by G-Canvas Open Source Team. * Copyright (c) 2017, Alibaba, Inc. All rights reserved. * * This source code is licensed under the Apache Licence 2.0. * For the full copyright and license information, please view * the LICENSE file in the root directory of this source tree. */ #include "WebGLTexture.h" namespace NodeBinding { Napi::FunctionReference WebGLTexture::constructor; WebGLTexture::WebGLTexture(const Napi::CallbackInfo &info) : Napi::ObjectWrap<WebGLTexture>(info) { mId = info[0].As<Napi::Number>().Uint32Value(); } void WebGLTexture::Init(Napi::Env env) { Napi::HandleScope scope(env); Napi::Function func = DefineClass(env, "WebGLTexture", {}); constructor = Napi::Persistent(func); constructor.SuppressDestruct(); } Napi::Object WebGLTexture::NewInstance(Napi::Env env, const Napi::Value arg) { Napi::Object obj = constructor.New({arg}); return obj; } } // namespace NodeBinding
945
303
//================================================================================================== /** EVE - Expressive Vector Engine Copyright : EVE Contributors & Maintainers SPDX-License-Identifier: MIT **/ //================================================================================================== #include "test.hpp" #include <eve/concept/value.hpp> #include <eve/constant/valmin.hpp> #include <eve/constant/valmax.hpp> #include <eve/constant/invpi.hpp> #include <eve/function/cosd.hpp> #include <eve/function/diff/cosd.hpp> #include <eve/function/deginrad.hpp> #include <cmath> #include <eve/module/math/detail/constant/rempio2_limits.hpp> #include <eve/detail/function/tmp/boost_math_cospi.hpp> #include <eve/detail/function/tmp/boost_math_sinpi.hpp> //================================================================================================== // Types tests //================================================================================================== EVE_TEST_TYPES( "Check return types of cosd" , eve::test::simd::ieee_reals ) <typename T>(eve::as<T>) { using v_t = eve::element_type_t<T>; TTS_EXPR_IS( eve::cosd(T()) , T); TTS_EXPR_IS( eve::cosd(v_t()), v_t); }; //================================================================================================== // cosd tests //================================================================================================== auto mquarter_c = []<typename T>(eve::as<T> const & ){ return T(-45); }; auto quarter_c = []<typename T>(eve::as<T> const & ){ return T( 45); }; auto mhalf_c = []<typename T>(eve::as<T> const & ){ return T(-90 ); }; auto half_c = []<typename T>(eve::as<T> const & ){ return T( 90 ); }; auto mmed = []<typename T>(eve::as<T> const & ){ return -5000; }; auto med = []<typename T>(eve::as<T> const & ){ return 5000; }; EVE_TEST( "Check behavior of cosd on wide" , eve::test::simd::ieee_reals , eve::test::generate( eve::test::randoms(mquarter_c, quarter_c) , eve::test::randoms(mhalf_c, half_c) , eve::test::randoms(mmed, med)) ) <typename T>(T const& a0, T const& a1, T const& a2) { using eve::detail::map; using eve::cosd; using eve::diff; using eve::deginrad; using v_t = eve::element_type_t<T>; auto ref = [](auto e) -> v_t { return boost::math::cos_pi(e/180.0l); }; TTS_ULP_EQUAL(eve::quarter_circle(cosd)(a0) , map(ref, a0), 2); TTS_ULP_EQUAL(eve::half_circle(cosd)(a0) , map(ref, a0), 2); TTS_ULP_EQUAL(eve::half_circle(cosd)(a1) , map(ref, a1), 30); TTS_ULP_EQUAL(cosd(a0) , map(ref, a0), 2); TTS_ULP_EQUAL(cosd(a1) , map(ref, a1), 30); TTS_ULP_EQUAL(cosd(a2) , map(ref, a2), 420); auto dinr = 1.7453292519943295769236907684886127134428718885417e-2l; TTS_ULP_EQUAL(diff(cosd)(a0), map([dinr](auto e) -> v_t { return -dinr*boost::math::sin_pi(e/180.0l); }, a0), 2); }; EVE_TEST_TYPES( "Check return types of cosd" , eve::test::simd::ieee_reals ) <typename T>(eve::as<T>) { TTS_ULP_EQUAL(eve::cosd(T(1)) , T(0.9998476951563912391570115588139148516927403105832) , 0.5 ); TTS_ULP_EQUAL(eve::cosd(T(-1)) , T(0.9998476951563912391570115588139148516927403105832) , 0.5 ); TTS_ULP_EQUAL(eve::cosd(T(45.0)) , T(0.70710678118654752440084436210484903928483593768847) , 0.5 ); TTS_ULP_EQUAL(eve::cosd(-T(45.0)) , T(0.70710678118654752440084436210484903928483593768847) , 0.5 ); TTS_ULP_EQUAL(eve::cosd(T(-500.0)) , T(-0.7660444431189780352023926505554166739358324570804) , 3.5 ); TTS_ULP_EQUAL(eve::cosd(T(500.0)) , T(-0.7660444431189780352023926505554166739358324570804) , 3.5 ); };
3,819
1,728
// Autogenerated from CppHeaderCreator // Created by Sc2ad // ========================================================================= #pragma once // Begin includes #include "beatsaber-hook/shared/utils/typedefs.h" #include "beatsaber-hook/shared/utils/byref.hpp" // Including type: Zenject.SubContainerCreatorByNewGameObjectDynamicContext #include "Zenject/SubContainerCreatorByNewGameObjectDynamicContext.hpp" #include "beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp" #include "beatsaber-hook/shared/utils/il2cpp-utils-properties.hpp" #include "beatsaber-hook/shared/utils/il2cpp-utils-fields.hpp" #include "beatsaber-hook/shared/utils/utils.h" // Completed includes // Begin forward declares // Forward declaring namespace: Zenject namespace Zenject { // Skipping declaration: <>c__DisplayClass2_0 because it is already included! // Forward declaring type: DiContainer class DiContainer; // Forward declaring type: GameObjectCreationParameters class GameObjectCreationParameters; // Forward declaring type: GameObjectContext class GameObjectContext; // Forward declaring type: InjectTypeInfo class InjectTypeInfo; } // Forward declaring namespace: System namespace System { // Forward declaring type: Action`2<T1, T2> template<typename T1, typename T2> class Action_2; } // Forward declaring namespace: System::Collections::Generic namespace System::Collections::Generic { // Forward declaring type: List`1<T> template<typename T> class List_1; } // Completed forward declares // Type namespace: Zenject namespace Zenject { // Forward declaring type: SubContainerCreatorByNewGameObjectMethod`1<TParam1> template<typename TParam1> class SubContainerCreatorByNewGameObjectMethod_1; } #include "beatsaber-hook/shared/utils/il2cpp-type-check.hpp" DEFINE_IL2CPP_ARG_TYPE_GENERIC_CLASS(::Zenject::SubContainerCreatorByNewGameObjectMethod_1, "Zenject", "SubContainerCreatorByNewGameObjectMethod`1"); // Type namespace: Zenject namespace Zenject { // WARNING Size may be invalid! // Autogenerated type: Zenject.SubContainerCreatorByNewGameObjectMethod`1 // [TokenAttribute] Offset: FFFFFFFF // [NoReflectionBakingAttribute] Offset: FFFFFFFF template<typename TParam1> class SubContainerCreatorByNewGameObjectMethod_1 : public ::Zenject::SubContainerCreatorByNewGameObjectDynamicContext { public: // Nested type: ::Zenject::SubContainerCreatorByNewGameObjectMethod_1::$$c__DisplayClass2_0<TParam1> class $$c__DisplayClass2_0; // WARNING Size may be invalid! // Autogenerated type: Zenject.SubContainerCreatorByNewGameObjectMethod`1/Zenject.<>c__DisplayClass2_0 // [TokenAttribute] Offset: FFFFFFFF // [CompilerGeneratedAttribute] Offset: FFFFFFFF class $$c__DisplayClass2_0 : public ::il2cpp_utils::il2cpp_type_check::NestedType, public ::Il2CppObject { public: using declaring_type = SubContainerCreatorByNewGameObjectMethod_1<TParam1>*; static constexpr std::string_view NESTED_NAME = "<>c__DisplayClass2_0"; static constexpr bool IS_VALUE_TYPE = false; #ifdef USE_CODEGEN_FIELDS public: #else #ifdef CODEGEN_FIELD_ACCESSIBILITY CODEGEN_FIELD_ACCESSIBILITY: #else protected: #endif #endif // public Zenject.SubContainerCreatorByNewGameObjectMethod`1<TParam1> <>4__this // Size: 0x8 // Offset: 0x0 ::Zenject::SubContainerCreatorByNewGameObjectMethod_1<TParam1>* $$4__this; // Field size check static_assert(sizeof(::Zenject::SubContainerCreatorByNewGameObjectMethod_1<TParam1>*) == 0x8); // public System.Collections.Generic.List`1<Zenject.TypeValuePair> args // Size: 0x8 // Offset: 0x0 ::System::Collections::Generic::List_1<::Zenject::TypeValuePair>* args; // Field size check static_assert(sizeof(::System::Collections::Generic::List_1<::Zenject::TypeValuePair>*) == 0x8); public: // Autogenerated instance field getter // Get instance field: public Zenject.SubContainerCreatorByNewGameObjectMethod`1<TParam1> <>4__this ::Zenject::SubContainerCreatorByNewGameObjectMethod_1<TParam1>*& dyn_$$4__this() { static auto ___internal__logger = ::Logger::get().WithContext("::Zenject::SubContainerCreatorByNewGameObjectMethod_1::$$c__DisplayClass2_0::dyn_$$4__this"); auto ___internal__instance = this; static auto ___internal__field__offset = THROW_UNLESS(il2cpp_utils::FindField(___internal__instance, "<>4__this"))->offset; return *reinterpret_cast<::Zenject::SubContainerCreatorByNewGameObjectMethod_1<TParam1>**>(reinterpret_cast<char*>(this) + ___internal__field__offset); } // Autogenerated instance field getter // Get instance field: public System.Collections.Generic.List`1<Zenject.TypeValuePair> args ::System::Collections::Generic::List_1<::Zenject::TypeValuePair>*& dyn_args() { static auto ___internal__logger = ::Logger::get().WithContext("::Zenject::SubContainerCreatorByNewGameObjectMethod_1::$$c__DisplayClass2_0::dyn_args"); auto ___internal__instance = this; static auto ___internal__field__offset = THROW_UNLESS(il2cpp_utils::FindField(___internal__instance, "args"))->offset; return *reinterpret_cast<::System::Collections::Generic::List_1<::Zenject::TypeValuePair>**>(reinterpret_cast<char*>(this) + ___internal__field__offset); } // System.Void <AddInstallers>b__0(Zenject.DiContainer subContainer) // Offset: 0xFFFFFFFFFFFFFFFF void $AddInstallers$b__0(::Zenject::DiContainer* subContainer) { static auto ___internal__logger = ::Logger::get().WithContext("::Zenject::SubContainerCreatorByNewGameObjectMethod_1::$$c__DisplayClass2_0::<AddInstallers>b__0"); static auto* ___internal__method = THROW_UNLESS((::il2cpp_utils::FindMethod(this, "<AddInstallers>b__0", std::vector<Il2CppClass*>{}, ::std::vector<const Il2CppType*>{::il2cpp_utils::ExtractType(subContainer)}))); ::il2cpp_utils::RunMethodRethrow<void, false>(this, ___internal__method, subContainer); } // static private System.Object __zenCreate(System.Object[] P_0) // Offset: 0xFFFFFFFFFFFFFFFF static ::Il2CppObject* __zenCreate(::ArrayW<::Il2CppObject*> P_0) { static auto ___internal__logger = ::Logger::get().WithContext("::Zenject::SubContainerCreatorByNewGameObjectMethod_1::$$c__DisplayClass2_0::__zenCreate"); static auto* ___internal__method = THROW_UNLESS((::il2cpp_utils::FindMethod(::il2cpp_utils::il2cpp_type_check::il2cpp_no_arg_class<typename SubContainerCreatorByNewGameObjectMethod_1<TParam1>::$$c__DisplayClass2_0*>::get(), "__zenCreate", std::vector<Il2CppClass*>{}, ::std::vector<const Il2CppType*>{::il2cpp_utils::ExtractType(P_0)}))); return ::il2cpp_utils::RunMethodRethrow<::Il2CppObject*, false>(static_cast<Il2CppObject*>(nullptr), ___internal__method, P_0); } // static private Zenject.InjectTypeInfo __zenCreateInjectTypeInfo() // Offset: 0xFFFFFFFFFFFFFFFF static ::Zenject::InjectTypeInfo* __zenCreateInjectTypeInfo() { static auto ___internal__logger = ::Logger::get().WithContext("::Zenject::SubContainerCreatorByNewGameObjectMethod_1::$$c__DisplayClass2_0::__zenCreateInjectTypeInfo"); static auto* ___internal__method = THROW_UNLESS((::il2cpp_utils::FindMethod(::il2cpp_utils::il2cpp_type_check::il2cpp_no_arg_class<typename SubContainerCreatorByNewGameObjectMethod_1<TParam1>::$$c__DisplayClass2_0*>::get(), "__zenCreateInjectTypeInfo", std::vector<Il2CppClass*>{}, ::std::vector<const Il2CppType*>{}))); return ::il2cpp_utils::RunMethodRethrow<::Zenject::InjectTypeInfo*, false>(static_cast<Il2CppObject*>(nullptr), ___internal__method); } // public System.Void .ctor() // Offset: 0xFFFFFFFFFFFFFFFF // Implemented from: System.Object // Base method: System.Void Object::.ctor() template<::il2cpp_utils::CreationType creationType = ::il2cpp_utils::CreationType::Temporary> static typename SubContainerCreatorByNewGameObjectMethod_1<TParam1>::$$c__DisplayClass2_0* New_ctor() { static auto ___internal__logger = ::Logger::get().WithContext("::Zenject::SubContainerCreatorByNewGameObjectMethod_1::$$c__DisplayClass2_0::.ctor"); return THROW_UNLESS((::il2cpp_utils::New<typename SubContainerCreatorByNewGameObjectMethod_1<TParam1>::$$c__DisplayClass2_0*, creationType>())); } }; // Zenject.SubContainerCreatorByNewGameObjectMethod`1/Zenject.<>c__DisplayClass2_0 // Could not write size check! Type: Zenject.SubContainerCreatorByNewGameObjectMethod`1/Zenject.<>c__DisplayClass2_0 is generic, or has no fields that are valid for size checks! #ifdef USE_CODEGEN_FIELDS public: #else #ifdef CODEGEN_FIELD_ACCESSIBILITY CODEGEN_FIELD_ACCESSIBILITY: #else protected: #endif #endif // private readonly System.Action`2<Zenject.DiContainer,TParam1> _installerMethod // Size: 0x8 // Offset: 0x0 ::System::Action_2<::Zenject::DiContainer*, TParam1>* installerMethod; // Field size check static_assert(sizeof(::System::Action_2<::Zenject::DiContainer*, TParam1>*) == 0x8); public: // Autogenerated instance field getter // Get instance field: private readonly System.Action`2<Zenject.DiContainer,TParam1> _installerMethod ::System::Action_2<::Zenject::DiContainer*, TParam1>*& dyn__installerMethod() { static auto ___internal__logger = ::Logger::get().WithContext("::Zenject::SubContainerCreatorByNewGameObjectMethod_1::dyn__installerMethod"); auto ___internal__instance = this; static auto ___internal__field__offset = THROW_UNLESS(il2cpp_utils::FindField(___internal__instance, "_installerMethod"))->offset; return *reinterpret_cast<::System::Action_2<::Zenject::DiContainer*, TParam1>**>(reinterpret_cast<char*>(this) + ___internal__field__offset); } // public System.Void .ctor(Zenject.DiContainer container, Zenject.GameObjectCreationParameters gameObjectBindInfo, System.Action`2<Zenject.DiContainer,TParam1> installerMethod) // Offset: 0xFFFFFFFFFFFFFFFF template<::il2cpp_utils::CreationType creationType = ::il2cpp_utils::CreationType::Temporary> static SubContainerCreatorByNewGameObjectMethod_1<TParam1>* New_ctor(::Zenject::DiContainer* container, ::Zenject::GameObjectCreationParameters* gameObjectBindInfo, ::System::Action_2<::Zenject::DiContainer*, TParam1>* installerMethod) { static auto ___internal__logger = ::Logger::get().WithContext("::Zenject::SubContainerCreatorByNewGameObjectMethod_1::.ctor"); return THROW_UNLESS((::il2cpp_utils::New<SubContainerCreatorByNewGameObjectMethod_1<TParam1>*, creationType>(container, gameObjectBindInfo, installerMethod))); } // protected override System.Void AddInstallers(System.Collections.Generic.List`1<Zenject.TypeValuePair> args, Zenject.GameObjectContext context) // Offset: 0xFFFFFFFFFFFFFFFF // Implemented from: Zenject.SubContainerCreatorDynamicContext // Base method: System.Void SubContainerCreatorDynamicContext::AddInstallers(System.Collections.Generic.List`1<Zenject.TypeValuePair> args, Zenject.GameObjectContext context) void AddInstallers(::System::Collections::Generic::List_1<::Zenject::TypeValuePair>* args, ::Zenject::GameObjectContext* context) { static auto ___internal__logger = ::Logger::get().WithContext("::Zenject::SubContainerCreatorByNewGameObjectMethod_1::AddInstallers"); auto* ___internal__method = THROW_UNLESS((::il2cpp_utils::FindMethod(this, "AddInstallers", std::vector<Il2CppClass*>{}, ::std::vector<const Il2CppType*>{::il2cpp_utils::ExtractType(args), ::il2cpp_utils::ExtractType(context)}))); ::il2cpp_utils::RunMethodRethrow<void, false>(this, ___internal__method, args, context); } }; // Zenject.SubContainerCreatorByNewGameObjectMethod`1 // Could not write size check! Type: Zenject.SubContainerCreatorByNewGameObjectMethod`1 is generic, or has no fields that are valid for size checks! } #include "beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp"
12,285
3,965
/* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "BakedOpDispatcher.h" #include "BakedOpRenderer.h" #include "Caches.h" #include "Glop.h" #include "GlopBuilder.h" #include "Patch.h" #include "PathTessellator.h" #include "renderstate/OffscreenBufferPool.h" #include "renderstate/RenderState.h" #include "utils/GLUtils.h" #include "VertexBuffer.h" #include <algorithm> #include <math.h> #include <SkPaintDefaults.h> #include <SkPathOps.h> namespace android { namespace uirenderer { static void storeTexturedRect(TextureVertex* vertices, const Rect& bounds, const Rect& texCoord) { vertices[0] = { bounds.left, bounds.top, texCoord.left, texCoord.top }; vertices[1] = { bounds.right, bounds.top, texCoord.right, texCoord.top }; vertices[2] = { bounds.left, bounds.bottom, texCoord.left, texCoord.bottom }; vertices[3] = { bounds.right, bounds.bottom, texCoord.right, texCoord.bottom }; } void BakedOpDispatcher::onMergedBitmapOps(BakedOpRenderer& renderer, const MergedBakedOpList& opList) { const BakedOpState& firstState = *(opList.states[0]); const SkBitmap* bitmap = (static_cast<const BitmapOp*>(opList.states[0]->op))->bitmap; AssetAtlas::Entry* entry = renderer.renderState().assetAtlas().getEntry(bitmap->pixelRef()); Texture* texture = entry ? entry->texture : renderer.caches().textureCache.get(bitmap); if (!texture) return; const AutoTexture autoCleanup(texture); TextureVertex vertices[opList.count * 4]; Rect texCoords(0, 0, 1, 1); if (entry) { entry->uvMapper.map(texCoords); } for (size_t i = 0; i < opList.count; i++) { const BakedOpState& state = *(opList.states[i]); TextureVertex* rectVerts = &vertices[i * 4]; // calculate unclipped bounds, since they'll determine texture coordinates Rect opBounds = state.op->unmappedBounds; state.computedState.transform.mapRect(opBounds); if (CC_LIKELY(state.computedState.transform.isPureTranslate())) { // pure translate, so snap (same behavior as onBitmapOp) opBounds.snapToPixelBoundaries(); } storeTexturedRect(rectVerts, opBounds, texCoords); renderer.dirtyRenderTarget(opBounds); } const int textureFillFlags = (bitmap->colorType() == kAlpha_8_SkColorType) ? TextureFillFlags::IsAlphaMaskTexture : TextureFillFlags::None; Glop glop; GlopBuilder(renderer.renderState(), renderer.caches(), &glop) .setRoundRectClipState(firstState.roundRectClipState) .setMeshTexturedIndexedQuads(vertices, opList.count * 6) .setFillTexturePaint(*texture, textureFillFlags, firstState.op->paint, firstState.alpha) .setTransform(Matrix4::identity(), TransformFlags::None) .setModelViewIdentityEmptyBounds() .build(); ClipRect renderTargetClip(opList.clip); const ClipBase* clip = opList.clipSideFlags ? &renderTargetClip : nullptr; renderer.renderGlop(nullptr, clip, glop); } void BakedOpDispatcher::onMergedPatchOps(BakedOpRenderer& renderer, const MergedBakedOpList& opList) { const PatchOp& firstOp = *(static_cast<const PatchOp*>(opList.states[0]->op)); const BakedOpState& firstState = *(opList.states[0]); AssetAtlas::Entry* entry = renderer.renderState().assetAtlas().getEntry( firstOp.bitmap->pixelRef()); // Batches will usually contain a small number of items so it's // worth performing a first iteration to count the exact number // of vertices we need in the new mesh uint32_t totalVertices = 0; for (size_t i = 0; i < opList.count; i++) { const PatchOp& op = *(static_cast<const PatchOp*>(opList.states[i]->op)); // TODO: cache mesh lookups const Patch* opMesh = renderer.caches().patchCache.get( entry, op.bitmap->width(), op.bitmap->height(), op.unmappedBounds.getWidth(), op.unmappedBounds.getHeight(), op.patch); totalVertices += opMesh->verticesCount; } const bool dirtyRenderTarget = renderer.offscreenRenderTarget(); uint32_t indexCount = 0; TextureVertex vertices[totalVertices]; TextureVertex* vertex = &vertices[0]; // Create a mesh that contains the transformed vertices for all the // 9-patch objects that are part of the batch. Note that onDefer() // enforces ops drawn by this function to have a pure translate or // identity matrix for (size_t i = 0; i < opList.count; i++) { const PatchOp& op = *(static_cast<const PatchOp*>(opList.states[i]->op)); const BakedOpState& state = *opList.states[i]; // TODO: cache mesh lookups const Patch* opMesh = renderer.caches().patchCache.get( entry, op.bitmap->width(), op.bitmap->height(), op.unmappedBounds.getWidth(), op.unmappedBounds.getHeight(), op.patch); uint32_t vertexCount = opMesh->verticesCount; if (vertexCount == 0) continue; // We use the bounds to know where to translate our vertices // Using patchOp->state.mBounds wouldn't work because these // bounds are clipped const float tx = floorf(state.computedState.transform.getTranslateX() + op.unmappedBounds.left + 0.5f); const float ty = floorf(state.computedState.transform.getTranslateY() + op.unmappedBounds.top + 0.5f); // Copy & transform all the vertices for the current operation TextureVertex* opVertices = opMesh->vertices.get(); for (uint32_t j = 0; j < vertexCount; j++, opVertices++) { TextureVertex::set(vertex++, opVertices->x + tx, opVertices->y + ty, opVertices->u, opVertices->v); } // Dirty the current layer if possible. When the 9-patch does not // contain empty quads we can take a shortcut and simply set the // dirty rect to the object's bounds. if (dirtyRenderTarget) { if (!opMesh->hasEmptyQuads) { renderer.dirtyRenderTarget(Rect(tx, ty, tx + op.unmappedBounds.getWidth(), ty + op.unmappedBounds.getHeight())); } else { const size_t count = opMesh->quads.size(); for (size_t i = 0; i < count; i++) { const Rect& quadBounds = opMesh->quads[i]; const float x = tx + quadBounds.left; const float y = ty + quadBounds.top; renderer.dirtyRenderTarget(Rect(x, y, x + quadBounds.getWidth(), y + quadBounds.getHeight())); } } } indexCount += opMesh->indexCount; } Texture* texture = entry ? entry->texture : renderer.caches().textureCache.get(firstOp.bitmap); if (!texture) return; const AutoTexture autoCleanup(texture); // 9 patches are built for stretching - always filter int textureFillFlags = TextureFillFlags::ForceFilter; if (firstOp.bitmap->colorType() == kAlpha_8_SkColorType) { textureFillFlags |= TextureFillFlags::IsAlphaMaskTexture; } Glop glop; GlopBuilder(renderer.renderState(), renderer.caches(), &glop) .setRoundRectClipState(firstState.roundRectClipState) .setMeshTexturedIndexedQuads(vertices, indexCount) .setFillTexturePaint(*texture, textureFillFlags, firstOp.paint, firstState.alpha) .setTransform(Matrix4::identity(), TransformFlags::None) .setModelViewIdentityEmptyBounds() .build(); ClipRect renderTargetClip(opList.clip); const ClipBase* clip = opList.clipSideFlags ? &renderTargetClip : nullptr; renderer.renderGlop(nullptr, clip, glop); } static void renderTextShadow(BakedOpRenderer& renderer, const TextOp& op, const BakedOpState& textOpState) { if (CC_LIKELY(!PaintUtils::hasTextShadow(op.paint))) return; FontRenderer& fontRenderer = renderer.caches().fontRenderer.getFontRenderer(); fontRenderer.setFont(op.paint, SkMatrix::I()); renderer.caches().textureState().activateTexture(0); PaintUtils::TextShadow textShadow; if (!PaintUtils::getTextShadow(op.paint, &textShadow)) { LOG_ALWAYS_FATAL("failed to query shadow attributes"); } renderer.caches().dropShadowCache.setFontRenderer(fontRenderer); ShadowTexture* texture = renderer.caches().dropShadowCache.get( op.paint, op.glyphs, op.glyphCount, textShadow.radius, op.positions); // If the drop shadow exceeds the max texture size or couldn't be // allocated, skip drawing if (!texture) return; const AutoTexture autoCleanup(texture); const float sx = op.x - texture->left + textShadow.dx; const float sy = op.y - texture->top + textShadow.dy; Glop glop; GlopBuilder(renderer.renderState(), renderer.caches(), &glop) .setRoundRectClipState(textOpState.roundRectClipState) .setMeshTexturedUnitQuad(nullptr) .setFillShadowTexturePaint(*texture, textShadow.color, *op.paint, textOpState.alpha) .setTransform(textOpState.computedState.transform, TransformFlags::None) .setModelViewMapUnitToRect(Rect(sx, sy, sx + texture->width(), sy + texture->height())) .build(); // Compute damage bounds and clip (since may differ from those in textOpState). // Bounds should be same as text op, but with dx/dy offset and radius outset // applied in local space. auto& transform = textOpState.computedState.transform; Rect shadowBounds = op.unmappedBounds; // STROKE const bool expandForStroke = op.paint->getStyle() != SkPaint::kFill_Style; if (expandForStroke) { shadowBounds.outset(op.paint->getStrokeWidth() * 0.5f); } shadowBounds.translate(textShadow.dx, textShadow.dy); shadowBounds.outset(textShadow.radius, textShadow.radius); transform.mapRect(shadowBounds); if (CC_UNLIKELY(expandForStroke && (!transform.isPureTranslate() || op.paint->getStrokeWidth() < 1.0f))) { shadowBounds.outset(0.5f); } auto clipState = textOpState.computedState.clipState; if (clipState->mode != ClipMode::Rectangle || !clipState->rect.contains(shadowBounds)) { // need clip, so pass it and clip bounds shadowBounds.doIntersect(clipState->rect); } else { // don't need clip, ignore clipState = nullptr; } renderer.renderGlop(&shadowBounds, clipState, glop); } enum class TextRenderType { Defer, Flush }; static void renderText(BakedOpRenderer& renderer, const TextOp& op, const BakedOpState& state, const ClipBase* renderClip, TextRenderType renderType) { FontRenderer& fontRenderer = renderer.caches().fontRenderer.getFontRenderer(); float x = op.x; float y = op.y; const Matrix4& transform = state.computedState.transform; const bool pureTranslate = transform.isPureTranslate(); if (CC_LIKELY(pureTranslate)) { x = floorf(x + transform.getTranslateX() + 0.5f); y = floorf(y + transform.getTranslateY() + 0.5f); fontRenderer.setFont(op.paint, SkMatrix::I()); fontRenderer.setTextureFiltering(false); } else if (CC_UNLIKELY(transform.isPerspective())) { fontRenderer.setFont(op.paint, SkMatrix::I()); fontRenderer.setTextureFiltering(true); } else { // We only pass a partial transform to the font renderer. That partial // matrix defines how glyphs are rasterized. Typically we want glyphs // to be rasterized at their final size on screen, which means the partial // matrix needs to take the scale factor into account. // When a partial matrix is used to transform glyphs during rasterization, // the mesh is generated with the inverse transform (in the case of scale, // the mesh is generated at 1.0 / scale for instance.) This allows us to // apply the full transform matrix at draw time in the vertex shader. // Applying the full matrix in the shader is the easiest way to handle // rotation and perspective and allows us to always generated quads in the // font renderer which greatly simplifies the code, clipping in particular. float sx, sy; transform.decomposeScale(sx, sy); fontRenderer.setFont(op.paint, SkMatrix::MakeScale( roundf(std::max(1.0f, sx)), roundf(std::max(1.0f, sy)))); fontRenderer.setTextureFiltering(true); } Rect layerBounds(FLT_MAX / 2.0f, FLT_MAX / 2.0f, FLT_MIN / 2.0f, FLT_MIN / 2.0f); int alpha = PaintUtils::getAlphaDirect(op.paint) * state.alpha; SkXfermode::Mode mode = PaintUtils::getXfermodeDirect(op.paint); TextDrawFunctor functor(&renderer, &state, renderClip, x, y, pureTranslate, alpha, mode, op.paint); bool forceFinish = (renderType == TextRenderType::Flush); bool mustDirtyRenderTarget = renderer.offscreenRenderTarget(); const Rect* localOpClip = pureTranslate ? &state.computedState.clipRect() : nullptr; fontRenderer.renderPosText(op.paint, localOpClip, op.glyphs, op.glyphCount, x, y, op.positions, mustDirtyRenderTarget ? &layerBounds : nullptr, &functor, forceFinish); if (mustDirtyRenderTarget) { if (!pureTranslate) { transform.mapRect(layerBounds); } renderer.dirtyRenderTarget(layerBounds); } } void BakedOpDispatcher::onMergedTextOps(BakedOpRenderer& renderer, const MergedBakedOpList& opList) { for (size_t i = 0; i < opList.count; i++) { const BakedOpState& state = *(opList.states[i]); const TextOp& op = *(static_cast<const TextOp*>(state.op)); renderTextShadow(renderer, op, state); } ClipRect renderTargetClip(opList.clip); const ClipBase* clip = opList.clipSideFlags ? &renderTargetClip : nullptr; for (size_t i = 0; i < opList.count; i++) { const BakedOpState& state = *(opList.states[i]); const TextOp& op = *(static_cast<const TextOp*>(state.op)); TextRenderType renderType = (i + 1 == opList.count) ? TextRenderType::Flush : TextRenderType::Defer; renderText(renderer, op, state, clip, renderType); } } namespace VertexBufferRenderFlags { enum { Offset = 0x1, ShadowInterp = 0x2, }; } static void renderVertexBuffer(BakedOpRenderer& renderer, const BakedOpState& state, const VertexBuffer& vertexBuffer, float translateX, float translateY, const SkPaint& paint, int vertexBufferRenderFlags) { if (CC_LIKELY(vertexBuffer.getVertexCount())) { bool shadowInterp = vertexBufferRenderFlags & VertexBufferRenderFlags::ShadowInterp; const int transformFlags = vertexBufferRenderFlags & VertexBufferRenderFlags::Offset ? TransformFlags::OffsetByFudgeFactor : 0; Glop glop; GlopBuilder(renderer.renderState(), renderer.caches(), &glop) .setRoundRectClipState(state.roundRectClipState) .setMeshVertexBuffer(vertexBuffer) .setFillPaint(paint, state.alpha, shadowInterp) .setTransform(state.computedState.transform, transformFlags) .setModelViewOffsetRect(translateX, translateY, vertexBuffer.getBounds()) .build(); renderer.renderGlop(state, glop); } } static void renderConvexPath(BakedOpRenderer& renderer, const BakedOpState& state, const SkPath& path, const SkPaint& paint) { VertexBuffer vertexBuffer; // TODO: try clipping large paths to viewport PathTessellator::tessellatePath(path, &paint, state.computedState.transform, vertexBuffer); renderVertexBuffer(renderer, state, vertexBuffer, 0.0f, 0.0f, paint, 0); } static void renderPathTexture(BakedOpRenderer& renderer, const BakedOpState& state, float xOffset, float yOffset, PathTexture& texture, const SkPaint& paint) { Rect dest(texture.width(), texture.height()); dest.translate(xOffset + texture.left - texture.offset, yOffset + texture.top - texture.offset); Glop glop; GlopBuilder(renderer.renderState(), renderer.caches(), &glop) .setRoundRectClipState(state.roundRectClipState) .setMeshTexturedUnitQuad(nullptr) .setFillPathTexturePaint(texture, paint, state.alpha) .setTransform(state.computedState.transform, TransformFlags::None) .setModelViewMapUnitToRect(dest) .build(); renderer.renderGlop(state, glop); } SkRect getBoundsOfFill(const RecordedOp& op) { SkRect bounds = op.unmappedBounds.toSkRect(); if (op.paint->getStyle() == SkPaint::kStrokeAndFill_Style) { float outsetDistance = op.paint->getStrokeWidth() / 2; bounds.outset(outsetDistance, outsetDistance); } return bounds; } void BakedOpDispatcher::onArcOp(BakedOpRenderer& renderer, const ArcOp& op, const BakedOpState& state) { // TODO: support fills (accounting for concavity if useCenter && sweepAngle > 180) if (op.paint->getStyle() != SkPaint::kStroke_Style || op.paint->getPathEffect() != nullptr || op.useCenter) { PathTexture* texture = renderer.caches().pathCache.getArc( op.unmappedBounds.getWidth(), op.unmappedBounds.getHeight(), op.startAngle, op.sweepAngle, op.useCenter, op.paint); const AutoTexture holder(texture); if (CC_LIKELY(holder.texture)) { renderPathTexture(renderer, state, op.unmappedBounds.left, op.unmappedBounds.top, *texture, *(op.paint)); } } else { SkRect rect = getBoundsOfFill(op); SkPath path; if (op.useCenter) { path.moveTo(rect.centerX(), rect.centerY()); } path.arcTo(rect, op.startAngle, op.sweepAngle, !op.useCenter); if (op.useCenter) { path.close(); } renderConvexPath(renderer, state, path, *(op.paint)); } } void BakedOpDispatcher::onBitmapOp(BakedOpRenderer& renderer, const BitmapOp& op, const BakedOpState& state) { Texture* texture = renderer.getTexture(op.bitmap); if (!texture) return; const AutoTexture autoCleanup(texture); const int textureFillFlags = (op.bitmap->colorType() == kAlpha_8_SkColorType) ? TextureFillFlags::IsAlphaMaskTexture : TextureFillFlags::None; Glop glop; GlopBuilder(renderer.renderState(), renderer.caches(), &glop) .setRoundRectClipState(state.roundRectClipState) .setMeshTexturedUnitQuad(texture->uvMapper) .setFillTexturePaint(*texture, textureFillFlags, op.paint, state.alpha) .setTransform(state.computedState.transform, TransformFlags::None) .setModelViewMapUnitToRectSnap(Rect(texture->width(), texture->height())) .build(); renderer.renderGlop(state, glop); } void BakedOpDispatcher::onBitmapMeshOp(BakedOpRenderer& renderer, const BitmapMeshOp& op, const BakedOpState& state) { const static UvMapper defaultUvMapper; const uint32_t elementCount = op.meshWidth * op.meshHeight * 6; std::unique_ptr<ColorTextureVertex[]> mesh(new ColorTextureVertex[elementCount]); ColorTextureVertex* vertex = &mesh[0]; const int* colors = op.colors; std::unique_ptr<int[]> tempColors; if (!colors) { uint32_t colorsCount = (op.meshWidth + 1) * (op.meshHeight + 1); tempColors.reset(new int[colorsCount]); memset(tempColors.get(), 0xff, colorsCount * sizeof(int)); colors = tempColors.get(); } Texture* texture = renderer.renderState().assetAtlas().getEntryTexture(op.bitmap->pixelRef()); const UvMapper& mapper(texture && texture->uvMapper ? *texture->uvMapper : defaultUvMapper); for (int32_t y = 0; y < op.meshHeight; y++) { for (int32_t x = 0; x < op.meshWidth; x++) { uint32_t i = (y * (op.meshWidth + 1) + x) * 2; float u1 = float(x) / op.meshWidth; float u2 = float(x + 1) / op.meshWidth; float v1 = float(y) / op.meshHeight; float v2 = float(y + 1) / op.meshHeight; mapper.map(u1, v1, u2, v2); int ax = i + (op.meshWidth + 1) * 2; int ay = ax + 1; int bx = i; int by = bx + 1; int cx = i + 2; int cy = cx + 1; int dx = i + (op.meshWidth + 1) * 2 + 2; int dy = dx + 1; const float* vertices = op.vertices; ColorTextureVertex::set(vertex++, vertices[dx], vertices[dy], u2, v2, colors[dx / 2]); ColorTextureVertex::set(vertex++, vertices[ax], vertices[ay], u1, v2, colors[ax / 2]); ColorTextureVertex::set(vertex++, vertices[bx], vertices[by], u1, v1, colors[bx / 2]); ColorTextureVertex::set(vertex++, vertices[dx], vertices[dy], u2, v2, colors[dx / 2]); ColorTextureVertex::set(vertex++, vertices[bx], vertices[by], u1, v1, colors[bx / 2]); ColorTextureVertex::set(vertex++, vertices[cx], vertices[cy], u2, v1, colors[cx / 2]); } } if (!texture) { texture = renderer.caches().textureCache.get(op.bitmap); if (!texture) { return; } } const AutoTexture autoCleanup(texture); /* * TODO: handle alpha_8 textures correctly by applying paint color, but *not* * shader in that case to mimic the behavior in SkiaCanvas::drawBitmapMesh. */ const int textureFillFlags = TextureFillFlags::None; Glop glop; GlopBuilder(renderer.renderState(), renderer.caches(), &glop) .setRoundRectClipState(state.roundRectClipState) .setMeshColoredTexturedMesh(mesh.get(), elementCount) .setFillTexturePaint(*texture, textureFillFlags, op.paint, state.alpha) .setTransform(state.computedState.transform, TransformFlags::None) .setModelViewOffsetRect(0, 0, op.unmappedBounds) .build(); renderer.renderGlop(state, glop); } void BakedOpDispatcher::onBitmapRectOp(BakedOpRenderer& renderer, const BitmapRectOp& op, const BakedOpState& state) { Texture* texture = renderer.getTexture(op.bitmap); if (!texture) return; const AutoTexture autoCleanup(texture); Rect uv(std::max(0.0f, op.src.left / texture->width()), std::max(0.0f, op.src.top / texture->height()), std::min(1.0f, op.src.right / texture->width()), std::min(1.0f, op.src.bottom / texture->height())); const int textureFillFlags = (op.bitmap->colorType() == kAlpha_8_SkColorType) ? TextureFillFlags::IsAlphaMaskTexture : TextureFillFlags::None; const bool tryToSnap = MathUtils::areEqual(op.src.getWidth(), op.unmappedBounds.getWidth()) && MathUtils::areEqual(op.src.getHeight(), op.unmappedBounds.getHeight()); Glop glop; GlopBuilder(renderer.renderState(), renderer.caches(), &glop) .setRoundRectClipState(state.roundRectClipState) .setMeshTexturedUvQuad(texture->uvMapper, uv) .setFillTexturePaint(*texture, textureFillFlags, op.paint, state.alpha) .setTransform(state.computedState.transform, TransformFlags::None) .setModelViewMapUnitToRectOptionalSnap(tryToSnap, op.unmappedBounds) .build(); renderer.renderGlop(state, glop); } void BakedOpDispatcher::onColorOp(BakedOpRenderer& renderer, const ColorOp& op, const BakedOpState& state) { SkPaint paint; paint.setColor(op.color); paint.setXfermodeMode(op.mode); Glop glop; GlopBuilder(renderer.renderState(), renderer.caches(), &glop) .setRoundRectClipState(state.roundRectClipState) .setMeshUnitQuad() .setFillPaint(paint, state.alpha) .setTransform(Matrix4::identity(), TransformFlags::None) .setModelViewMapUnitToRect(state.computedState.clipState->rect) .build(); renderer.renderGlop(state, glop); } void BakedOpDispatcher::onFunctorOp(BakedOpRenderer& renderer, const FunctorOp& op, const BakedOpState& state) { renderer.renderFunctor(op, state); } void BakedOpDispatcher::onLinesOp(BakedOpRenderer& renderer, const LinesOp& op, const BakedOpState& state) { VertexBuffer buffer; PathTessellator::tessellateLines(op.points, op.floatCount, op.paint, state.computedState.transform, buffer); int displayFlags = op.paint->isAntiAlias() ? 0 : VertexBufferRenderFlags::Offset; renderVertexBuffer(renderer, state, buffer, 0, 0, *(op.paint), displayFlags); } void BakedOpDispatcher::onOvalOp(BakedOpRenderer& renderer, const OvalOp& op, const BakedOpState& state) { if (op.paint->getPathEffect() != nullptr) { PathTexture* texture = renderer.caches().pathCache.getOval( op.unmappedBounds.getWidth(), op.unmappedBounds.getHeight(), op.paint); const AutoTexture holder(texture); if (CC_LIKELY(holder.texture)) { renderPathTexture(renderer, state, op.unmappedBounds.left, op.unmappedBounds.top, *texture, *(op.paint)); } } else { SkPath path; SkRect rect = getBoundsOfFill(op); path.addOval(rect); if (state.computedState.localProjectionPathMask != nullptr) { // Mask the ripple path by the local space projection mask in local space. // Note that this can create CCW paths. Op(path, *state.computedState.localProjectionPathMask, kIntersect_SkPathOp, &path); } renderConvexPath(renderer, state, path, *(op.paint)); } } void BakedOpDispatcher::onPatchOp(BakedOpRenderer& renderer, const PatchOp& op, const BakedOpState& state) { // 9 patches are built for stretching - always filter int textureFillFlags = TextureFillFlags::ForceFilter; if (op.bitmap->colorType() == kAlpha_8_SkColorType) { textureFillFlags |= TextureFillFlags::IsAlphaMaskTexture; } // TODO: avoid redoing the below work each frame: AssetAtlas::Entry* entry = renderer.renderState().assetAtlas().getEntry(op.bitmap->pixelRef()); const Patch* mesh = renderer.caches().patchCache.get( entry, op.bitmap->width(), op.bitmap->height(), op.unmappedBounds.getWidth(), op.unmappedBounds.getHeight(), op.patch); Texture* texture = entry ? entry->texture : renderer.caches().textureCache.get(op.bitmap); if (CC_LIKELY(texture)) { const AutoTexture autoCleanup(texture); Glop glop; GlopBuilder(renderer.renderState(), renderer.caches(), &glop) .setRoundRectClipState(state.roundRectClipState) .setMeshPatchQuads(*mesh) .setFillTexturePaint(*texture, textureFillFlags, op.paint, state.alpha) .setTransform(state.computedState.transform, TransformFlags::None) .setModelViewOffsetRectSnap(op.unmappedBounds.left, op.unmappedBounds.top, Rect(op.unmappedBounds.getWidth(), op.unmappedBounds.getHeight())) .build(); renderer.renderGlop(state, glop); } } void BakedOpDispatcher::onPathOp(BakedOpRenderer& renderer, const PathOp& op, const BakedOpState& state) { PathTexture* texture = renderer.caches().pathCache.get(op.path, op.paint); const AutoTexture holder(texture); if (CC_LIKELY(holder.texture)) { // Unlike other callers to renderPathTexture, no offsets are used because PathOp doesn't // have any translate built in, other than what's in the SkPath itself renderPathTexture(renderer, state, 0, 0, *texture, *(op.paint)); } } void BakedOpDispatcher::onPointsOp(BakedOpRenderer& renderer, const PointsOp& op, const BakedOpState& state) { VertexBuffer buffer; PathTessellator::tessellatePoints(op.points, op.floatCount, op.paint, state.computedState.transform, buffer); int displayFlags = op.paint->isAntiAlias() ? 0 : VertexBufferRenderFlags::Offset; renderVertexBuffer(renderer, state, buffer, 0, 0, *(op.paint), displayFlags); } // See SkPaintDefaults.h #define SkPaintDefaults_MiterLimit SkIntToScalar(4) void BakedOpDispatcher::onRectOp(BakedOpRenderer& renderer, const RectOp& op, const BakedOpState& state) { if (op.paint->getStyle() != SkPaint::kFill_Style) { // only fill + default miter is supported by drawConvexPath, since others must handle joins static_assert(SkPaintDefaults_MiterLimit == 4.0f, "Miter limit has changed"); if (CC_UNLIKELY(op.paint->getPathEffect() != nullptr || op.paint->getStrokeJoin() != SkPaint::kMiter_Join || op.paint->getStrokeMiter() != SkPaintDefaults_MiterLimit)) { PathTexture* texture = renderer.caches().pathCache.getRect( op.unmappedBounds.getWidth(), op.unmappedBounds.getHeight(), op.paint); const AutoTexture holder(texture); if (CC_LIKELY(holder.texture)) { renderPathTexture(renderer, state, op.unmappedBounds.left, op.unmappedBounds.top, *texture, *(op.paint)); } } else { SkPath path; path.addRect(getBoundsOfFill(op)); renderConvexPath(renderer, state, path, *(op.paint)); } } else { if (op.paint->isAntiAlias() && !state.computedState.transform.isSimple()) { SkPath path; path.addRect(op.unmappedBounds.toSkRect()); renderConvexPath(renderer, state, path, *(op.paint)); } else { // render simple unit quad, no tessellation required Glop glop; GlopBuilder(renderer.renderState(), renderer.caches(), &glop) .setRoundRectClipState(state.roundRectClipState) .setMeshUnitQuad() .setFillPaint(*op.paint, state.alpha) .setTransform(state.computedState.transform, TransformFlags::None) .setModelViewMapUnitToRect(op.unmappedBounds) .build(); renderer.renderGlop(state, glop); } } } void BakedOpDispatcher::onRoundRectOp(BakedOpRenderer& renderer, const RoundRectOp& op, const BakedOpState& state) { if (op.paint->getPathEffect() != nullptr) { PathTexture* texture = renderer.caches().pathCache.getRoundRect( op.unmappedBounds.getWidth(), op.unmappedBounds.getHeight(), op.rx, op.ry, op.paint); const AutoTexture holder(texture); if (CC_LIKELY(holder.texture)) { renderPathTexture(renderer, state, op.unmappedBounds.left, op.unmappedBounds.top, *texture, *(op.paint)); } } else { const VertexBuffer* buffer = renderer.caches().tessellationCache.getRoundRect( state.computedState.transform, *(op.paint), op.unmappedBounds.getWidth(), op.unmappedBounds.getHeight(), op.rx, op.ry); renderVertexBuffer(renderer, state, *buffer, op.unmappedBounds.left, op.unmappedBounds.top, *(op.paint), 0); } } static void renderShadow(BakedOpRenderer& renderer, const BakedOpState& state, float casterAlpha, const VertexBuffer* ambientShadowVertexBuffer, const VertexBuffer* spotShadowVertexBuffer) { SkPaint paint; paint.setAntiAlias(true); // want to use AlphaVertex // The caller has made sure casterAlpha > 0. uint8_t ambientShadowAlpha = renderer.getLightInfo().ambientShadowAlpha; if (CC_UNLIKELY(Properties::overrideAmbientShadowStrength >= 0)) { ambientShadowAlpha = Properties::overrideAmbientShadowStrength; } if (ambientShadowVertexBuffer && ambientShadowAlpha > 0) { paint.setAlpha((uint8_t)(casterAlpha * ambientShadowAlpha)); renderVertexBuffer(renderer, state, *ambientShadowVertexBuffer, 0, 0, paint, VertexBufferRenderFlags::ShadowInterp); } uint8_t spotShadowAlpha = renderer.getLightInfo().spotShadowAlpha; if (CC_UNLIKELY(Properties::overrideSpotShadowStrength >= 0)) { spotShadowAlpha = Properties::overrideSpotShadowStrength; } if (spotShadowVertexBuffer && spotShadowAlpha > 0) { paint.setAlpha((uint8_t)(casterAlpha * spotShadowAlpha)); renderVertexBuffer(renderer, state, *spotShadowVertexBuffer, 0, 0, paint, VertexBufferRenderFlags::ShadowInterp); } } void BakedOpDispatcher::onShadowOp(BakedOpRenderer& renderer, const ShadowOp& op, const BakedOpState& state) { TessellationCache::vertexBuffer_pair_t buffers = op.shadowTask->getResult(); renderShadow(renderer, state, op.casterAlpha, buffers.first, buffers.second); } void BakedOpDispatcher::onSimpleRectsOp(BakedOpRenderer& renderer, const SimpleRectsOp& op, const BakedOpState& state) { Glop glop; GlopBuilder(renderer.renderState(), renderer.caches(), &glop) .setRoundRectClipState(state.roundRectClipState) .setMeshIndexedQuads(&op.vertices[0], op.vertexCount / 4) .setFillPaint(*op.paint, state.alpha) .setTransform(state.computedState.transform, TransformFlags::None) .setModelViewOffsetRect(0, 0, op.unmappedBounds) .build(); renderer.renderGlop(state, glop); } void BakedOpDispatcher::onTextOp(BakedOpRenderer& renderer, const TextOp& op, const BakedOpState& state) { renderTextShadow(renderer, op, state); renderText(renderer, op, state, state.computedState.getClipIfNeeded(), TextRenderType::Flush); } void BakedOpDispatcher::onTextOnPathOp(BakedOpRenderer& renderer, const TextOnPathOp& op, const BakedOpState& state) { // Note: can't trust clipSideFlags since we record with unmappedBounds == clip. // TODO: respect clipSideFlags, once we record with bounds auto renderTargetClip = state.computedState.clipState; FontRenderer& fontRenderer = renderer.caches().fontRenderer.getFontRenderer(); fontRenderer.setFont(op.paint, SkMatrix::I()); fontRenderer.setTextureFiltering(true); Rect layerBounds(FLT_MAX / 2.0f, FLT_MAX / 2.0f, FLT_MIN / 2.0f, FLT_MIN / 2.0f); int alpha = PaintUtils::getAlphaDirect(op.paint) * state.alpha; SkXfermode::Mode mode = PaintUtils::getXfermodeDirect(op.paint); TextDrawFunctor functor(&renderer, &state, renderTargetClip, 0.0f, 0.0f, false, alpha, mode, op.paint); bool mustDirtyRenderTarget = renderer.offscreenRenderTarget(); const Rect localSpaceClip = state.computedState.computeLocalSpaceClip(); if (fontRenderer.renderTextOnPath(op.paint, &localSpaceClip, op.glyphs, op.glyphCount, op.path, op.hOffset, op.vOffset, mustDirtyRenderTarget ? &layerBounds : nullptr, &functor)) { if (mustDirtyRenderTarget) { // manually dirty render target, since TextDrawFunctor won't state.computedState.transform.mapRect(layerBounds); renderer.dirtyRenderTarget(layerBounds); } } } void BakedOpDispatcher::onTextureLayerOp(BakedOpRenderer& renderer, const TextureLayerOp& op, const BakedOpState& state) { const bool tryToSnap = !op.layer->getForceFilter(); float alpha = (op.layer->getAlpha() / 255.0f) * state.alpha; Glop glop; GlopBuilder(renderer.renderState(), renderer.caches(), &glop) .setRoundRectClipState(state.roundRectClipState) .setMeshTexturedUvQuad(nullptr, Rect(0, 1, 1, 0)) // TODO: simplify with VBO .setFillTextureLayer(*(op.layer), alpha) .setTransform(state.computedState.transform, TransformFlags::None) .setModelViewMapUnitToRectOptionalSnap(tryToSnap, Rect(op.layer->getWidth(), op.layer->getHeight())) .build(); renderer.renderGlop(state, glop); } void renderRectForLayer(BakedOpRenderer& renderer, const LayerOp& op, const BakedOpState& state, int color, SkXfermode::Mode mode, SkColorFilter* colorFilter) { SkPaint paint; paint.setColor(color); paint.setXfermodeMode(mode); paint.setColorFilter(colorFilter); RectOp rectOp(op.unmappedBounds, op.localMatrix, op.localClip, &paint); BakedOpDispatcher::onRectOp(renderer, rectOp, state); } void BakedOpDispatcher::onLayerOp(BakedOpRenderer& renderer, const LayerOp& op, const BakedOpState& state) { // Note that we don't use op->paint in this function - it's never set on a LayerOp OffscreenBuffer* buffer = *op.layerHandle; if (CC_UNLIKELY(!buffer)) return; float layerAlpha = op.alpha * state.alpha; Glop glop; GlopBuilder(renderer.renderState(), renderer.caches(), &glop) .setRoundRectClipState(state.roundRectClipState) .setMeshTexturedIndexedVbo(buffer->vbo, buffer->elementCount) .setFillLayer(buffer->texture, op.colorFilter, layerAlpha, op.mode, Blend::ModeOrderSwap::NoSwap) .setTransform(state.computedState.transform, TransformFlags::None) .setModelViewOffsetRectSnap(op.unmappedBounds.left, op.unmappedBounds.top, Rect(op.unmappedBounds.getWidth(), op.unmappedBounds.getHeight())) .build(); renderer.renderGlop(state, glop); if (!buffer->hasRenderedSinceRepaint) { buffer->hasRenderedSinceRepaint = true; if (CC_UNLIKELY(Properties::debugLayersUpdates)) { // render debug layer highlight renderRectForLayer(renderer, op, state, 0x7f00ff00, SkXfermode::Mode::kSrcOver_Mode, nullptr); } else if (CC_UNLIKELY(Properties::debugOverdraw)) { // render transparent to increment overdraw for repaint area renderRectForLayer(renderer, op, state, SK_ColorTRANSPARENT, SkXfermode::Mode::kSrcOver_Mode, nullptr); } } } void BakedOpDispatcher::onCopyToLayerOp(BakedOpRenderer& renderer, const CopyToLayerOp& op, const BakedOpState& state) { LOG_ALWAYS_FATAL_IF(*(op.layerHandle) != nullptr, "layer already exists!"); *(op.layerHandle) = renderer.copyToLayer(state.computedState.clippedBounds); LOG_ALWAYS_FATAL_IF(*op.layerHandle == nullptr, "layer copy failed"); } void BakedOpDispatcher::onCopyFromLayerOp(BakedOpRenderer& renderer, const CopyFromLayerOp& op, const BakedOpState& state) { LOG_ALWAYS_FATAL_IF(*op.layerHandle == nullptr, "no layer to draw underneath!"); if (!state.computedState.clippedBounds.isEmpty()) { if (op.paint && op.paint->getAlpha() < 255) { SkPaint layerPaint; layerPaint.setAlpha(op.paint->getAlpha()); layerPaint.setXfermodeMode(SkXfermode::kDstIn_Mode); layerPaint.setColorFilter(op.paint->getColorFilter()); RectOp rectOp(state.computedState.clippedBounds, Matrix4::identity(), nullptr, &layerPaint); BakedOpDispatcher::onRectOp(renderer, rectOp, state); } OffscreenBuffer& layer = **(op.layerHandle); auto mode = PaintUtils::getXfermodeDirect(op.paint); Glop glop; GlopBuilder(renderer.renderState(), renderer.caches(), &glop) .setRoundRectClipState(state.roundRectClipState) .setMeshTexturedUvQuad(nullptr, layer.getTextureCoordinates()) .setFillLayer(layer.texture, nullptr, 1.0f, mode, Blend::ModeOrderSwap::Swap) .setTransform(state.computedState.transform, TransformFlags::None) .setModelViewMapUnitToRect(state.computedState.clippedBounds) .build(); renderer.renderGlop(state, glop); } renderer.renderState().layerPool().putOrDelete(*op.layerHandle); } } // namespace uirenderer } // namespace android
40,349
12,664
#include <memory> #include <string> #include <vector> #include "envoy/extensions/filters/network/ratelimit/v3/rate_limit.pb.h" #include "envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.h" #include "envoy/stats/stats.h" #include "source/common/buffer/buffer_impl.h" #include "source/common/network/filter_manager_impl.h" #include "source/common/tcp_proxy/tcp_proxy.h" #include "source/extensions/filters/network/ratelimit/ratelimit.h" #include "source/extensions/filters/network/well_known_names.h" #include "test/extensions/filters/common/ratelimit/mocks.h" #include "test/mocks/network/mocks.h" #include "test/mocks/ratelimit/mocks.h" #include "test/mocks/runtime/mocks.h" #include "test/mocks/server/factory_context.h" #include "test/mocks/stream_info/mocks.h" #include "test/mocks/tracing/mocks.h" #include "test/test_common/printers.h" #include "gmock/gmock.h" #include "gtest/gtest.h" using testing::_; using testing::InSequence; using testing::Invoke; using testing::NiceMock; using testing::Return; using testing::WithArgs; namespace Envoy { namespace Extensions { namespace NetworkFilters { namespace RateLimitFilter { class RateLimitFilterTest : public testing::Test { public: void SetUpTest(const std::string& yaml) { ON_CALL(runtime_.snapshot_, featureEnabled("ratelimit.tcp_filter_enabled", 100)) .WillByDefault(Return(true)); ON_CALL(runtime_.snapshot_, featureEnabled("ratelimit.tcp_filter_enforcing", 100)) .WillByDefault(Return(true)); envoy::extensions::filters::network::ratelimit::v3::RateLimit proto_config{}; TestUtility::loadFromYaml(yaml, proto_config); config_ = std::make_shared<Config>(proto_config, stats_store_, runtime_); client_ = new Filters::Common::RateLimit::MockClient(); filter_ = std::make_unique<Filter>(config_, Filters::Common::RateLimit::ClientPtr{client_}); filter_->initializeReadFilterCallbacks(filter_callbacks_); // NOP currently. filter_->onAboveWriteBufferHighWatermark(); filter_->onBelowWriteBufferLowWatermark(); } ~RateLimitFilterTest() override { for (const Stats::GaugeSharedPtr& gauge : stats_store_.gauges()) { EXPECT_EQ(0U, gauge->value()); } } const std::string filter_config_ = R"EOF( domain: foo descriptors: - entries: - key: hello value: world - key: foo value: bar - entries: - key: foo2 value: bar2 stat_prefix: name )EOF"; const std::string fail_close_config_ = R"EOF( domain: foo descriptors: - entries: - key: hello value: world - key: foo value: bar - entries: - key: foo2 value: bar2 stat_prefix: name failure_mode_deny: true )EOF"; Stats::TestUtil::TestStore stats_store_; NiceMock<Runtime::MockLoader> runtime_; ConfigSharedPtr config_; Filters::Common::RateLimit::MockClient* client_; std::unique_ptr<Filter> filter_; NiceMock<Network::MockReadFilterCallbacks> filter_callbacks_; Filters::Common::RateLimit::RequestCallbacks* request_callbacks_{}; }; TEST_F(RateLimitFilterTest, OK) { InSequence s; SetUpTest(filter_config_); EXPECT_CALL(*client_, limit(_, "foo", testing::ContainerEq(std::vector<RateLimit::Descriptor>{ {{{"hello", "world"}, {"foo", "bar"}}}, {{{"foo2", "bar2"}}}}), testing::A<Tracing::Span&>(), _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); Buffer::OwnedImpl data("hello"); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false)); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false)); EXPECT_CALL(filter_callbacks_, continueReading()); request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, nullptr, "", nullptr); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); EXPECT_CALL(*client_, cancel()).Times(0); filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::LocalClose); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.total").value()); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.ok").value()); } TEST_F(RateLimitFilterTest, OverLimit) { InSequence s; SetUpTest(filter_config_); EXPECT_CALL(*client_, limit(_, "foo", _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); Buffer::OwnedImpl data("hello"); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false)); EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush)); EXPECT_CALL(*client_, cancel()).Times(0); request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, nullptr, nullptr, "", nullptr); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.total").value()); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.over_limit").value()); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.cx_closed").value()); } TEST_F(RateLimitFilterTest, OverLimitWithDynamicMetadata) { InSequence s; SetUpTest(filter_config_); EXPECT_CALL(*client_, limit(_, "foo", _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); Buffer::OwnedImpl data("hello"); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false)); Filters::Common::RateLimit::DynamicMetadataPtr dynamic_metadata = std::make_unique<ProtobufWkt::Struct>(); auto* fields = dynamic_metadata->mutable_fields(); (*fields)["name"] = ValueUtil::stringValue("my-limit"); (*fields)["x"] = ValueUtil::numberValue(3); NiceMock<StreamInfo::MockStreamInfo> stream_info; EXPECT_CALL(filter_callbacks_.connection_, streamInfo()).WillOnce(ReturnRef(stream_info)); EXPECT_CALL(stream_info, setDynamicMetadata(_, _)) .WillOnce(Invoke([&dynamic_metadata](const std::string& ns, const ProtobufWkt::Struct& returned_dynamic_metadata) { EXPECT_EQ(ns, NetworkFilterNames::get().RateLimit); EXPECT_TRUE(TestUtility::protoEqual(returned_dynamic_metadata, *dynamic_metadata)); })); EXPECT_CALL(filter_callbacks_.connection_, close(Network::ConnectionCloseType::NoFlush)); EXPECT_CALL(*client_, cancel()).Times(0); request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, nullptr, nullptr, "", std::move(dynamic_metadata)); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.total").value()); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.over_limit").value()); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.cx_closed").value()); } TEST_F(RateLimitFilterTest, OverLimitNotEnforcing) { InSequence s; SetUpTest(filter_config_); EXPECT_CALL(*client_, limit(_, "foo", _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); Buffer::OwnedImpl data("hello"); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false)); EXPECT_CALL(runtime_.snapshot_, featureEnabled("ratelimit.tcp_filter_enforcing", 100)) .WillOnce(Return(false)); EXPECT_CALL(filter_callbacks_.connection_, close(_)).Times(0); EXPECT_CALL(*client_, cancel()).Times(0); EXPECT_CALL(filter_callbacks_, continueReading()); request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::OverLimit, nullptr, nullptr, nullptr, "", nullptr); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.total").value()); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.over_limit").value()); EXPECT_EQ(0U, stats_store_.counter("ratelimit.name.cx_closed").value()); } TEST_F(RateLimitFilterTest, Error) { InSequence s; SetUpTest(filter_config_); EXPECT_CALL(*client_, limit(_, "foo", _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); Buffer::OwnedImpl data("hello"); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false)); EXPECT_CALL(filter_callbacks_, continueReading()); request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, nullptr, "", nullptr); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); EXPECT_CALL(*client_, cancel()).Times(0); filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.total").value()); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.error").value()); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.failure_mode_allowed").value()); } TEST_F(RateLimitFilterTest, Disconnect) { InSequence s; SetUpTest(filter_config_); EXPECT_CALL(*client_, limit(_, "foo", _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); Buffer::OwnedImpl data("hello"); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false)); EXPECT_CALL(*client_, cancel()); filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.total").value()); } TEST_F(RateLimitFilterTest, ImmediateOK) { InSequence s; SetUpTest(filter_config_); EXPECT_CALL(filter_callbacks_, continueReading()).Times(0); EXPECT_CALL(*client_, limit(_, "foo", _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { callbacks.complete(Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, nullptr, "", nullptr); }))); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); Buffer::OwnedImpl data("hello"); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); EXPECT_CALL(*client_, cancel()).Times(0); filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.total").value()); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.ok").value()); } TEST_F(RateLimitFilterTest, ImmediateError) { InSequence s; SetUpTest(filter_config_); EXPECT_CALL(filter_callbacks_, continueReading()).Times(0); EXPECT_CALL(*client_, limit(_, "foo", _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { callbacks.complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, nullptr, "", nullptr); }))); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); Buffer::OwnedImpl data("hello"); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); EXPECT_CALL(*client_, cancel()).Times(0); filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.total").value()); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.error").value()); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.failure_mode_allowed").value()); } TEST_F(RateLimitFilterTest, RuntimeDisable) { InSequence s; SetUpTest(filter_config_); EXPECT_CALL(runtime_.snapshot_, featureEnabled("ratelimit.tcp_filter_enabled", 100)) .WillOnce(Return(false)); EXPECT_CALL(*client_, limit(_, _, _, _, _)).Times(0); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onNewConnection()); Buffer::OwnedImpl data("hello"); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); } TEST_F(RateLimitFilterTest, ErrorResponseWithFailureModeAllowOff) { InSequence s; SetUpTest(fail_close_config_); EXPECT_CALL(*client_, limit(_, "foo", _, _, _)) .WillOnce( WithArgs<0>(Invoke([&](Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks_ = &callbacks; }))); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onNewConnection()); Buffer::OwnedImpl data("hello"); EXPECT_EQ(Network::FilterStatus::StopIteration, filter_->onData(data, false)); request_callbacks_->complete(Filters::Common::RateLimit::LimitStatus::Error, nullptr, nullptr, nullptr, "", nullptr); EXPECT_EQ(Network::FilterStatus::Continue, filter_->onData(data, false)); EXPECT_CALL(*client_, cancel()).Times(0); filter_callbacks_.connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.total").value()); EXPECT_EQ(1U, stats_store_.counter("ratelimit.name.error").value()); EXPECT_EQ(0U, stats_store_.counter("ratelimit.name.failure_mode_allowed").value()); } class NetworkFilterManagerRateLimitTest : public testing::Test { public: void SetUp() override { EXPECT_CALL(connection_, getReadBuffer).WillRepeatedly(Invoke([this]() { return Network::StreamBuffer{read_buffer_, read_end_stream_}; })); EXPECT_CALL(connection_, getWriteBuffer).WillRepeatedly(Invoke([this]() { return Network::StreamBuffer{write_buffer_, write_end_stream_}; })); } NiceMock<Network::MockFilterManagerConnection> connection_; NiceMock<Network::MockListenSocket> socket_; Buffer::OwnedImpl read_buffer_; Buffer::OwnedImpl write_buffer_; bool read_end_stream_{}; bool write_end_stream_{}; }; // This is a very important flow so make sure it works correctly in aggregate. TEST_F(NetworkFilterManagerRateLimitTest, RateLimitAndTcpProxy) { InSequence s; NiceMock<Server::Configuration::MockFactoryContext> factory_context; NiceMock<Network::MockClientConnection> upstream_connection; NiceMock<Tcp::ConnectionPool::MockInstance> conn_pool; Network::FilterManagerImpl manager(connection_, socket_); std::string rl_yaml = R"EOF( domain: foo descriptors: - entries: - key: hello value: world stat_prefix: name )EOF"; ON_CALL(factory_context.runtime_loader_.snapshot_, featureEnabled("ratelimit.tcp_filter_enabled", 100)) .WillByDefault(Return(true)); ON_CALL(factory_context.runtime_loader_.snapshot_, featureEnabled("ratelimit.tcp_filter_enforcing", 100)) .WillByDefault(Return(true)); envoy::extensions::filters::network::ratelimit::v3::RateLimit proto_config{}; TestUtility::loadFromYaml(rl_yaml, proto_config); Extensions::NetworkFilters::RateLimitFilter::ConfigSharedPtr rl_config( new Extensions::NetworkFilters::RateLimitFilter::Config(proto_config, factory_context.scope_, factory_context.runtime_loader_)); Extensions::Filters::Common::RateLimit::MockClient* rl_client = new Extensions::Filters::Common::RateLimit::MockClient(); manager.addReadFilter(std::make_shared<Extensions::NetworkFilters::RateLimitFilter::Filter>( rl_config, Extensions::Filters::Common::RateLimit::ClientPtr{rl_client})); factory_context.cluster_manager_.initializeThreadLocalClusters({"fake_cluster"}); envoy::extensions::filters::network::tcp_proxy::v3::TcpProxy tcp_proxy; tcp_proxy.set_stat_prefix("name"); tcp_proxy.set_cluster("fake_cluster"); TcpProxy::ConfigSharedPtr tcp_proxy_config(new TcpProxy::Config(tcp_proxy, factory_context)); manager.addReadFilter( std::make_shared<TcpProxy::Filter>(tcp_proxy_config, factory_context.cluster_manager_)); Extensions::Filters::Common::RateLimit::RequestCallbacks* request_callbacks{}; EXPECT_CALL(*rl_client, limit(_, "foo", testing::ContainerEq( std::vector<RateLimit::Descriptor>{{{{"hello", "world"}}}}), testing::A<Tracing::Span&>(), _)) .WillOnce(WithArgs<0>( Invoke([&](Extensions::Filters::Common::RateLimit::RequestCallbacks& callbacks) -> void { request_callbacks = &callbacks; }))); EXPECT_EQ(manager.initializeReadFilters(), true); EXPECT_CALL(factory_context.cluster_manager_.thread_local_cluster_, tcpConnPool(_, _)) .WillOnce(Return(Upstream::TcpPoolData([]() {}, &conn_pool))); request_callbacks->complete(Extensions::Filters::Common::RateLimit::LimitStatus::OK, nullptr, nullptr, nullptr, "", nullptr); conn_pool.poolReady(upstream_connection); Buffer::OwnedImpl buffer("hello"); EXPECT_CALL(upstream_connection, write(BufferEqual(&buffer), _)); read_buffer_.add("hello"); manager.onRead(); connection_.raiseEvent(Network::ConnectionEvent::RemoteClose); } } // namespace RateLimitFilter } // namespace NetworkFilters } // namespace Extensions } // namespace Envoy
18,192
5,922
/** *Licensed to the Apache Software Foundation (ASF) under one *or more contributor license agreements. See the NOTICE file *distributed with this work for additional information *regarding copyright ownership. The ASF licenses this file *to you under the Apache License, Version 2.0 (the *"License"); you may not use this file except in compliance *with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * *Unless required by applicable law or agreed to in writing, *software distributed under the License is distributed on an *"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the *specific language governing permissions and limitations *under the License. */ #include <CppUTest/TestHarness.h> #include <float.h> #include <assert.h> #include "CppUTest/CommandLineTestRunner.h" extern "C" { #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <ffi.h> #include "dyn_common.h" #include "dyn_type.h" #include "json_serializer.h" #include "json_rpc.h" static void stdLog(void *handle, int level, const char *file, int line, const char *msg, ...) { va_list ap; const char *levels[5] = {"NIL", "ERROR", "WARNING", "INFO", "DEBUG"}; fprintf(stderr, "%s: FILE:%s, LINE:%i, MSG:",levels[level], file, line); va_start(ap, msg); vfprintf(stderr, msg, ap); fprintf(stderr, "\n"); } void prepareTest(void) { dyn_function_type *dynFunc = NULL; int rc = dynFunction_parseWithStr("add(#am=handle;PDD#am=pre;*D)N", NULL, &dynFunc); CHECK_EQUAL(0, rc); char *result = NULL; void *handle = NULL; double arg1 = 1.0; double arg2 = 2.0; void *args[4]; args[0] = &handle; args[1] = &arg1; args[2] = &arg2; rc = jsonRpc_prepareInvokeRequest(dynFunc, "add", args, &result); CHECK_EQUAL(0, rc); //printf("result is %s\n", result); STRCMP_CONTAINS("\"add\"", result); STRCMP_CONTAINS("1.0", result); STRCMP_CONTAINS("2.0", result); free(result); dynFunction_destroy(dynFunc); } void handleTestPre(void) { dyn_function_type *dynFunc = NULL; int rc = dynFunction_parseWithStr("add(#am=handle;PDD#am=pre;*D)N", NULL, &dynFunc); CHECK_EQUAL(0, rc); const char *reply = "{\"r\":2.2}"; double result = -1.0; double *out = &result; void *args[4]; args[3] = &out; rc = jsonRpc_handleReply(dynFunc, reply, args); CHECK_EQUAL(0, rc); //CHECK_EQUAL(2.2, result); dynFunction_destroy(dynFunc); } int add(void *handle, double a, double b, double *result) { *result = a + b; return 0; } int getName_example4(void *handle, char** result) { *result = strdup("allocatedInFunction"); return 0; } struct tst_seq { uint32_t cap; uint32_t len; double *buf; }; //StatsResult={DDD[D average min max input} struct tst_StatsResult { double average; double min; double max; struct tst_seq input; }; int stats(void *handle, struct tst_seq input, struct tst_StatsResult **out) { assert(out != NULL); assert(*out == NULL); double total = 0.0; unsigned int count = 0; double max = DBL_MIN; double min = DBL_MAX; unsigned int i; for (i = 0; i<input.len; i += 1) { total += input.buf[i]; count += 1; if (input.buf[i] > max) { max = input.buf[i]; } if (input.buf[i] < min) { min = input.buf[i]; } } struct tst_StatsResult *result = (struct tst_StatsResult *) calloc(1, sizeof(*result)); result->average = total / count; result->min = min; result->max = max; double *buf = (double *)calloc(input.len, sizeof(double)); memcpy(buf, input.buf, input.len * sizeof(double)); result->input.len = input.len; result->input.cap = input.len; result->input.buf = buf; *out = result; return 0; } struct item { double a; double b; }; struct item_seq { uint32_t cap; uint32_t len; struct item **buf; }; struct tst_serv { void *handle; int (*add)(void *, double, double, double *); int (*sub)(void *, double, double, double *); int (*sqrt)(void *, double, double *); int (*stats)(void *, struct tst_seq, struct tst_StatsResult **); }; struct tst_serv_example4 { void *handle; int (*getName_example4)(void *, char** name); }; void callTestPreAllocated(void) { dyn_interface_type *intf = NULL; FILE *desc = fopen("descriptors/example1.descriptor", "r"); CHECK(desc != NULL); int rc = dynInterface_parse(desc, &intf); CHECK_EQUAL(0, rc); fclose(desc); char *result = NULL; struct tst_serv serv; serv.handle = NULL; serv.add = add; rc = jsonRpc_call(intf, &serv, "{\"m\":\"add(DD)D\", \"a\": [1.0,2.0]}", &result); CHECK_EQUAL(0, rc); STRCMP_CONTAINS("3.0", result); free(result); dynInterface_destroy(intf); } void callTestOutput(void) { dyn_interface_type *intf = NULL; FILE *desc = fopen("descriptors/example1.descriptor", "r"); CHECK(desc != NULL); int rc = dynInterface_parse(desc, &intf); CHECK_EQUAL(0, rc); fclose(desc); char *result = NULL; struct tst_serv serv; serv.handle = NULL; serv.stats = stats; rc = jsonRpc_call(intf, &serv, "{\"m\":\"stats([D)LStatsResult;\", \"a\": [[1.0,2.0]]}", &result); CHECK_EQUAL(0, rc); STRCMP_CONTAINS("1.5", result); //avg free(result); dynInterface_destroy(intf); } void handleTestOut(void) { dyn_interface_type *intf = NULL; FILE *desc = fopen("descriptors/example1.descriptor", "r"); CHECK(desc != NULL); int rc = dynInterface_parse(desc, &intf); CHECK_EQUAL(0, rc); fclose(desc); struct methods_head *head; dynInterface_methods(intf, &head); dyn_function_type *func = NULL; struct method_entry *entry = NULL; TAILQ_FOREACH(entry, head, entries) { if (strcmp(entry->name, "stats") == 0) { func = entry->dynFunc; break; } } CHECK(func != NULL); const char *reply = "{\"r\":{\"input\":[1.0,2.0],\"max\":2.0,\"average\":1.5,\"min\":1.0}}"; void *args[3]; args[0] = NULL; args[1] = NULL; args[2] = NULL; struct tst_StatsResult *result = NULL; void *out = &result; args[2] = &out; rc = jsonRpc_handleReply(func, reply, args); CHECK_EQUAL(0, rc); CHECK_EQUAL(1.5, result->average); free(result->input.buf); free(result); dynInterface_destroy(intf); } static void handleTestOutputSequence(void) { dyn_interface_type *intf = NULL; FILE *desc = fopen("descriptors/example2.descriptor", "r"); CHECK(desc != NULL); int rc = dynInterface_parse(desc, &intf); CHECK_EQUAL(0, rc); fclose(desc); struct methods_head *head; dynInterface_methods(intf, &head); dyn_function_type *func = NULL; struct method_entry *entry = NULL; TAILQ_FOREACH(entry, head, entries) { if (strcmp(entry->name, "example1") == 0) { func = entry->dynFunc; break; } } CHECK(func != NULL); //dyn_type *arg = dynFunction_argumentTypeForIndex(func, 1); //dynType_print(arg, stdout); const char *reply = "{\"r\":[{\"a\":1.0,\"b\":1.5},{\"a\":2.0,\"b\":2.5}]}"; void *args[2]; args[0] = NULL; args[1] = NULL; struct item_seq *result = NULL; void *out = &result; args[1] = &out; rc = jsonRpc_handleReply(func, reply, args); CHECK_EQUAL(0, rc); CHECK_EQUAL(2, result->len); CHECK_EQUAL(1.0, result->buf[0]->a); CHECK_EQUAL(1.5, result->buf[0]->b); CHECK_EQUAL(2.0, result->buf[1]->a); CHECK_EQUAL(2.5, result->buf[1]->b); unsigned int i; for (i = 0; i < result->len; i +=1 ) { free(result->buf[i]); } free(result->buf); free(result); dynInterface_destroy(intf); } void callTestOutChar(void) { dyn_interface_type *intf = NULL; FILE *desc = fopen("descriptors/example4.descriptor", "r"); CHECK(desc != NULL); int rc = dynInterface_parse(desc, &intf); CHECK_EQUAL(0, rc); fclose(desc); char *result = NULL; struct tst_serv_example4 serv; serv.handle = NULL; serv.getName_example4 = getName_example4; rc = jsonRpc_call(intf, &serv, "{\"m\":\"getName(V)t\", \"a\": []}", &result); CHECK_EQUAL(0, rc); STRCMP_CONTAINS("allocatedInFunction", result); free(result); dynInterface_destroy(intf); } void handleTestOutChar(void) { dyn_interface_type *intf = NULL; FILE *desc = fopen("descriptors/example4.descriptor", "r"); CHECK(desc != NULL); int rc = dynInterface_parse(desc, &intf); CHECK_EQUAL(0, rc); fclose(desc); struct methods_head *head; dynInterface_methods(intf, &head); dyn_function_type *func = NULL; struct method_entry *entry = NULL; TAILQ_FOREACH(entry, head, entries) { if (strcmp(entry->name, "getName") == 0) { func = entry->dynFunc; break; } } CHECK(func != NULL); const char *reply = "{\"r\": \"this is a test string\" }"; char *result = NULL; void *out = &result; void *args[2]; args[0] = NULL; args[1] = &out; rc = jsonRpc_handleReply(func, reply, args); STRCMP_EQUAL("this is a test string", result); free(result); dynInterface_destroy(intf); } } TEST_GROUP(JsonRpcTests) { void setup() { int lvl = 1; dynCommon_logSetup(stdLog, NULL, lvl); dynType_logSetup(stdLog, NULL,lvl); dynFunction_logSetup(stdLog, NULL,lvl); dynInterface_logSetup(stdLog, NULL,lvl); jsonSerializer_logSetup(stdLog, NULL, lvl); jsonRpc_logSetup(stdLog, NULL, lvl); } }; TEST(JsonRpcTests, prepareTest) { prepareTest(); } TEST(JsonRpcTests, handleTestPre) { handleTestPre(); } TEST(JsonRpcTests, handleTestOut) { handleTestOut(); } TEST(JsonRpcTests, callPre) { callTestPreAllocated(); } TEST(JsonRpcTests, callOut) { callTestOutput(); } TEST(JsonRpcTests, handleOutSeq) { handleTestOutputSequence(); } TEST(JsonRpcTests, callTestOutChar) { callTestOutChar(); } TEST(JsonRpcTests, handleOutChar) { handleTestOutChar(); }
11,553
3,995
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <utility> #include "base/command_line.h" #include "base/strings/utf_string_conversions.h" #include "mojo/public/cpp/bindings/binding.h" #include "mojo/public/cpp/environment/environment.h" #include "mojo/public/cpp/system/message_pipe.h" #include "mojo/shell/public/cpp/application_impl.h" #include "mojo/shell/public/cpp/application_test_base.h" #include "mojo/shell/public/interfaces/application.mojom.h" namespace mojo { namespace test { namespace { // Share the application URL with multiple application tests. String g_url; // Application request handle passed from the shell in MojoMain, stored in // between SetUp()/TearDown() so we can (re-)intialize new ApplicationImpls. InterfaceRequest<Application> g_application_request; // Shell pointer passed in the initial mojo.Application.Initialize() call, // stored in between initial setup and the first test and between SetUp/TearDown // calls so we can (re-)initialize new ApplicationImpls. ShellPtr g_shell; class ShellGrabber : public Application { public: explicit ShellGrabber(InterfaceRequest<Application> application_request) : binding_(this, std::move(application_request)) {} void WaitForInitialize() { // Initialize is always the first call made on Application. MOJO_CHECK(binding_.WaitForIncomingMethodCall()); } private: // Application implementation. void Initialize(ShellPtr shell, const mojo::String& url) override { g_url = url; g_application_request = binding_.Unbind(); g_shell = std::move(shell); } void AcceptConnection(const String& requestor_url, InterfaceRequest<ServiceProvider> services, ServiceProviderPtr exposed_services, Array<String> allowed_interfaces, const String& url) override { MOJO_CHECK(false); } void OnQuitRequested(const Callback<void(bool)>& callback) override { MOJO_CHECK(false); } Binding<Application> binding_; }; } // namespace MojoResult RunAllTests(MojoHandle application_request_handle) { { // This loop is used for init, and then destroyed before running tests. Environment::InstantiateDefaultRunLoop(); // Grab the shell handle. ShellGrabber grabber( MakeRequest<Application>(MakeScopedHandle( MessagePipeHandle(application_request_handle)))); grabber.WaitForInitialize(); MOJO_CHECK(g_shell); MOJO_CHECK(g_application_request.is_pending()); int argc = 0; base::CommandLine* cmd_line = base::CommandLine::ForCurrentProcess(); const char** argv = new const char* [cmd_line->argv().size() + 1]; #if defined(OS_WIN) std::vector<std::string> local_strings; #endif for (auto& arg : cmd_line->argv()) { #if defined(OS_WIN) local_strings.push_back(base::WideToUTF8(arg)); argv[argc++] = local_strings.back().c_str(); #else argv[argc++] = arg.c_str(); #endif } argv[argc] = nullptr; testing::InitGoogleTest(&argc, const_cast<char**>(&(argv[0]))); Environment::DestroyDefaultRunLoop(); } int result = RUN_ALL_TESTS(); // Shut down our message pipes before exiting. (void)g_application_request.PassMessagePipe(); g_shell.reset(); return (result == 0) ? MOJO_RESULT_OK : MOJO_RESULT_UNKNOWN; } TestHelper::TestHelper(ApplicationDelegate* delegate) : application_impl_(new ApplicationImpl( delegate == nullptr ? &default_application_delegate_ : delegate, std::move(g_application_request))) { // Fake application initialization. Application* application = application_impl_.get(); application->Initialize(std::move(g_shell), g_url); } TestHelper::~TestHelper() { // TODO: commented out until http://crbug.com/533107 is solved. // { // ApplicationImpl::TestApi test_api(application_impl_); // test_api.UnbindConnections(&g_application_request, &g_shell); // } // We may have supplied a member as the delegate. Delete |application_impl_| // while still valid. application_impl_.reset(); } ApplicationTestBase::ApplicationTestBase() : test_helper_(nullptr) {} ApplicationTestBase::~ApplicationTestBase() { } ApplicationDelegate* ApplicationTestBase::GetApplicationDelegate() { return nullptr; } void ApplicationTestBase::SetUp() { // A run loop is recommended for ApplicationImpl initialization and // communication. if (ShouldCreateDefaultRunLoop()) Environment::InstantiateDefaultRunLoop(); MOJO_CHECK(g_application_request.is_pending()); MOJO_CHECK(g_shell); // New applications are constructed for each test to avoid persisting state. test_helper_.reset(new TestHelper(GetApplicationDelegate())); } void ApplicationTestBase::TearDown() { MOJO_CHECK(!g_application_request.is_pending()); MOJO_CHECK(!g_shell); test_helper_.reset(); if (ShouldCreateDefaultRunLoop()) Environment::DestroyDefaultRunLoop(); } bool ApplicationTestBase::ShouldCreateDefaultRunLoop() { return true; } } // namespace test } // namespace mojo
5,173
1,582
// ListViewDialog.cpp #include "StdAfx.h" #include "ListViewDialog.h" #include "RegistryUtils.h" #ifdef LANG #include "LangUtils.h" #endif using namespace NWindows; bool CListViewDialog::OnInit() { #ifdef LANG LangSetDlgItems(*this, NULL, 0); #endif _listView.Attach(GetItem(IDL_LISTVIEW)); // FIXME if (ReadSingleClick()) // FIXME _listView.SetExtendedListViewStyle(LVS_EX_ONECLICKACTIVATE | LVS_EX_TRACKSELECT); SetText(Title); LVCOLUMN columnInfo; columnInfo.mask = LVCF_FMT | LVCF_WIDTH | LVCF_SUBITEM; columnInfo.fmt = LVCFMT_LEFT; columnInfo.iSubItem = 0; columnInfo.cx = 200; _listView.InsertColumn(0, &columnInfo); FOR_VECTOR (i, Strings) _listView.InsertItem(i, Strings[i]); if (Strings.Size() > 0) _listView.SetItemState_FocusedSelected(0); _listView.SetColumnWidthAuto(0); StringsWereChanged = false; NormalizeSize(); return CModalDialog::OnInit(); } bool CListViewDialog::OnSize(WPARAM /* wParam */, int xSize, int ySize) { #ifdef _WIN32 int mx, my; GetMargins(8, mx, my); int bx1, bx2, by; GetItemSizes(IDCANCEL, bx1, by); GetItemSizes(IDOK, bx2, by); int y = ySize - my - by; int x = xSize - mx - bx1; /* RECT rect; GetClientRect(&rect); rect.top = y - my; InvalidateRect(&rect); */ InvalidateRect(NULL); MoveItem(IDCANCEL, x, y, bx1, by); MoveItem(IDOK, x - mx - bx2, y, bx2, by); /* if (wParam == SIZE_MAXSHOW || wParam == SIZE_MAXIMIZED || wParam == SIZE_MAXHIDE) mx = 0; */ _listView.Move(mx, my, xSize - mx * 2, y - my * 2); #endif return false; } extern bool g_LVN_ITEMACTIVATE_Support; bool CListViewDialog::OnNotify(UINT /* controlID */, LPNMHDR header) { #ifdef _WIN32 if (header->hwndFrom != _listView) return false; switch (header->code) { case LVN_ITEMACTIVATE: if (g_LVN_ITEMACTIVATE_Support) { OnOK(); return true; } break; case NM_DBLCLK: case NM_RETURN: // probabably it's unused if (!g_LVN_ITEMACTIVATE_Support) { OnOK(); return true; } break; case LVN_KEYDOWN: { LPNMLVKEYDOWN keyDownInfo = LPNMLVKEYDOWN(header); switch (keyDownInfo->wVKey) { case VK_DELETE: { if (!DeleteIsAllowed) return false; for (;;) { int index = _listView.GetNextSelectedItem(-1); if (index < 0) break; StringsWereChanged = true; _listView.DeleteItem(index); Strings.Delete(index); } int focusedIndex = _listView.GetFocusedItem(); if (focusedIndex >= 0) _listView.SetItemState_FocusedSelected(focusedIndex); _listView.SetColumnWidthAuto(0); return true; } case 'A': { if (IsKeyDown(VK_CONTROL)) { _listView.SelectAll(); return true; } } } } } #endif return false; } void CListViewDialog::OnOK() { FocusedItemIndex = _listView.GetFocusedItem(); CModalDialog::OnOK(); }
3,115
1,218
// Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/services/app_service/public/cpp/icon_loader.h" #include <utility> #include "base/callback.h" namespace apps { IconLoader::Releaser::Releaser(std::unique_ptr<IconLoader::Releaser> next, base::OnceClosure closure) : next_(std::move(next)), closure_(std::move(closure)) {} IconLoader::Releaser::~Releaser() { std::move(closure_).Run(); } IconLoader::Key::Key(apps::mojom::AppType app_type, const std::string& app_id, const apps::mojom::IconKeyPtr& icon_key, apps::mojom::IconType icon_type, int32_t size_hint_in_dip, bool allow_placeholder_icon) : app_type_(app_type), app_id_(app_id), timeline_(icon_key ? icon_key->timeline : 0), resource_id_(icon_key ? icon_key->resource_id : 0), icon_effects_(icon_key ? icon_key->icon_effects : 0), icon_type_(icon_type), size_hint_in_dip_(size_hint_in_dip), allow_placeholder_icon_(allow_placeholder_icon) {} IconLoader::Key::Key(const Key& other) = default; bool IconLoader::Key::operator<(const Key& that) const { if (this->app_type_ != that.app_type_) { return this->app_type_ < that.app_type_; } if (this->timeline_ != that.timeline_) { return this->timeline_ < that.timeline_; } if (this->resource_id_ != that.resource_id_) { return this->resource_id_ < that.resource_id_; } if (this->icon_effects_ != that.icon_effects_) { return this->icon_effects_ < that.icon_effects_; } if (this->icon_type_ != that.icon_type_) { return this->icon_type_ < that.icon_type_; } if (this->size_hint_in_dip_ != that.size_hint_in_dip_) { return this->size_hint_in_dip_ < that.size_hint_in_dip_; } if (this->allow_placeholder_icon_ != that.allow_placeholder_icon_) { return this->allow_placeholder_icon_ < that.allow_placeholder_icon_; } return this->app_id_ < that.app_id_; } IconLoader::IconLoader() = default; IconLoader::~IconLoader() = default; std::unique_ptr<IconLoader::Releaser> IconLoader::LoadIcon( apps::mojom::AppType app_type, const std::string& app_id, apps::mojom::IconType icon_type, int32_t size_hint_in_dip, bool allow_placeholder_icon, apps::mojom::Publisher::LoadIconCallback callback) { return LoadIconFromIconKey(app_type, app_id, GetIconKey(app_id), icon_type, size_hint_in_dip, allow_placeholder_icon, std::move(callback)); } } // namespace apps
2,724
948
#include <slice.h> namespace ld { bool Slice::starts_with(const Slice& rhs) const { return (size_ >= rhs.size_) && (memcmp(data_, rhs.data(), rhs.size_) == 0); } }
188
73
#include "event_loop_base.h" #include <sys/event.h> #include <stdlib.h> namespace mevent { int EventLoopBase::Create() { return kqueue(); } int EventLoopBase::Add(int evfd, int fd, int mask, void *data) { struct kevent ev; if (mask & MEVENT_IN) { EV_SET(&ev, fd, EVFILT_READ, EV_ADD, 0, 0, data); kevent(evfd, &ev, 1, NULL, 0, NULL); } if (mask & MEVENT_OUT) { EV_SET(&ev, fd, EVFILT_WRITE, EV_ADD, 0, 0, data); kevent(evfd, &ev, 1, NULL, 0, NULL); } return 0; } int EventLoopBase::Modify(int evfd, int fd, int mask, void *data) { return Add(evfd, fd, mask, data); } int EventLoopBase::Poll(int evfd, EventLoopBase::Event *events, int size, struct timeval *tv) { struct kevent evs[size]; int nfds; if (tv) { struct timespec timeout; timeout.tv_sec = tv->tv_sec; timeout.tv_nsec = tv->tv_usec * 1000; nfds = kevent(evfd, NULL, 0, evs, size, &timeout); } else { nfds = kevent(evfd, NULL, 0, evs, size, NULL); } if (nfds > 0) { for (int i = 0; i < nfds; i++) { events[i].data.ptr = evs[i].udata; events[i].mask = 0; if (evs[i].filter == EVFILT_READ) { events[i].mask |= MEVENT_IN; } if (evs[i].filter == EVFILT_WRITE) { events[i].mask |= MEVENT_OUT; } } } return nfds; } }//namespace mevent
1,519
608
// Created on: 1996-12-11 // Created by: Robert COUBLANC // Copyright (c) 1996-1999 Matra Datavision // Copyright (c) 1999-2014 OPEN CASCADE SAS // // This file is part of Open CASCADE Technology software library. // // This library is free software; you can redistribute it and/or modify it under // the terms of the GNU Lesser General Public License version 2.1 as published // by the Free Software Foundation, with special exception defined in the file // OCCT_LGPL_EXCEPTION.txt. Consult the file LICENSE_LGPL_21.txt included in OCCT // distribution for complete text of the license and disclaimer of any warranty. // // Alternatively, this file may be used under the terms of Open CASCADE // commercial license or contractual agreement. #ifndef _AIS_InteractiveObject_HeaderFile #define _AIS_InteractiveObject_HeaderFile #include <AIS_KindOfInteractive.hxx> #include <AIS_DragAction.hxx> #include <SelectMgr_SelectableObject.hxx> class AIS_InteractiveContext; class Graphic3d_MaterialAspect; class Prs3d_BasicAspect; class Bnd_Box; class V3d_View; //! Defines a class of objects with display and selection services. //! Entities which are visualized and selected are Interactive Objects. //! Specific attributes of entities such as arrow aspect for dimensions must be loaded in a Prs3d_Drawer. //! //! You can make use of classes of standard Interactive Objects for which all necessary methods have already been programmed, //! or you can implement your own classes of Interactive Objects. //! Key interface methods to be implemented by every Interactive Object: //! * Presentable Object (PrsMgr_PresentableObject) //! Consider defining an enumeration of supported Display Mode indexes for particular Interactive Object or class of Interactive Objects. //! - AcceptDisplayMode() accepting display modes implemented by this object; //! - Compute() computing presentation for the given display mode index; //! * Selectable Object (SelectMgr_SelectableObject) //! Consider defining an enumeration of supported Selection Mode indexes for particular Interactive Object or class of Interactive Objects. //! - ComputeSelection() computing selectable entities for the given selection mode index. class AIS_InteractiveObject : public SelectMgr_SelectableObject { friend class AIS_InteractiveContext; DEFINE_STANDARD_RTTIEXT(AIS_InteractiveObject, SelectMgr_SelectableObject) public: //! Returns the kind of Interactive Object; AIS_KOI_None by default. virtual AIS_KindOfInteractive Type() const { return AIS_KOI_None; } //! Specifies additional characteristics of Interactive Object of Type(); -1 by default. //! Among the datums, this signature is attributed to the shape. //! The remaining datums have the following default signatures: //! - Point signature 1 //! - Axis signature 2 //! - Trihedron signature 3 //! - PlaneTrihedron signature 4 //! - Line signature 5 //! - Circle signature 6 //! - Plane signature 7. virtual Standard_Integer Signature() const { return -1; } //! Updates the active presentation; if <AllModes> = Standard_True //! all the presentations inside are recomputed. //! IMPORTANT: It is preferable to call Redisplay method of //! corresponding AIS_InteractiveContext instance for cases when it //! is accessible. This method just redirects call to myCTXPtr, //! so this class field must be up to date for proper result. Standard_EXPORT void Redisplay (const Standard_Boolean AllModes = Standard_False); //! Indicates whether the Interactive Object has a pointer to an interactive context. Standard_Boolean HasInteractiveContext() const { return myCTXPtr != NULL; } //! Returns the context pointer to the interactive context. AIS_InteractiveContext* InteractiveContext() const { return myCTXPtr; } //! Sets the interactive context aCtx and provides a link //! to the default drawing tool or "Drawer" if there is none. Standard_EXPORT virtual void SetContext (const Handle(AIS_InteractiveContext)& aCtx); //! Returns true if the object has an owner attributed to it. //! The owner can be a shape for a set of sub-shapes or a sub-shape for sub-shapes which it is composed of, and takes the form of a transient. Standard_Boolean HasOwner() const { return !myOwner.IsNull(); } //! Returns the owner of the Interactive Object. //! The owner can be a shape for a set of sub-shapes or //! a sub-shape for sub-shapes which it is composed of, //! and takes the form of a transient. //! There are two types of owners: //! - Direct owners, decomposition shapes such as //! edges, wires, and faces. //! - Users, presentable objects connecting to sensitive //! primitives, or a shape which has been decomposed. const Handle(Standard_Transient)& GetOwner() const { return myOwner; } //! Allows you to attribute the owner theApplicativeEntity to //! an Interactive Object. This can be a shape for a set of //! sub-shapes or a sub-shape for sub-shapes which it //! is composed of. The owner takes the form of a transient. void SetOwner (const Handle(Standard_Transient)& theApplicativeEntity) { myOwner = theApplicativeEntity; } //! Each Interactive Object has methods which allow us to attribute an Owner to it in the form of a Transient. //! This method removes the owner from the graphic entity. void ClearOwner() { myOwner.Nullify(); } //! Drag object in the viewer. //! @param theCtx [in] interactive context //! @param theView [in] active View //! @param theOwner [in] the owner of detected entity //! @param theDragFrom [in] drag start point //! @param theDragTo [in] drag end point //! @param theAction [in] drag action //! @return FALSE if object rejects dragging action (e.g. AIS_DragAction_Start) Standard_EXPORT virtual Standard_Boolean ProcessDragging (const Handle(AIS_InteractiveContext)& theCtx, const Handle(V3d_View)& theView, const Handle(SelectMgr_EntityOwner)& theOwner, const Graphic3d_Vec2i& theDragFrom, const Graphic3d_Vec2i& theDragTo, const AIS_DragAction theAction); public: //! Returns the context pointer to the interactive context. Standard_EXPORT Handle(AIS_InteractiveContext) GetContext() const; //! Returns TRUE when this object has a presentation in the current DisplayMode() Standard_EXPORT Standard_Boolean HasPresentation() const; //! Returns the current presentation of this object according to the current DisplayMode() Standard_EXPORT Handle(Prs3d_Presentation) Presentation() const; //! Sets the graphic basic aspect to the current presentation. Standard_DEPRECATED("Deprecated method, results might be undefined") Standard_EXPORT void SetAspect (const Handle(Prs3d_BasicAspect)& anAspect); //! Dumps the content of me into the stream Standard_EXPORT virtual void DumpJson (Standard_OStream& theOStream, Standard_Integer theDepth = -1) const Standard_OVERRIDE; protected: //! The TypeOfPresention3d means that the interactive object //! may have a presentation dependant of the view of Display. Standard_EXPORT AIS_InteractiveObject(const PrsMgr_TypeOfPresentation3d aTypeOfPresentation3d = PrsMgr_TOP_AllView); protected: AIS_InteractiveContext* myCTXPtr; //!< pointer to Interactive Context, where object is currently displayed; @sa SetContext() Handle(Standard_Transient) myOwner; //!< application-specific owner object }; DEFINE_STANDARD_HANDLE(AIS_InteractiveObject, SelectMgr_SelectableObject) #endif // _AIS_InteractiveObject_HeaderFile
7,863
2,204
#include "Halide.h" #include <stdio.h> using namespace Halide; int main(int argc, char **argv) { Func f("f"), g("g"); Var x("x"); RDom r(0, 100, "r"); f(x) = x; g(x) = 0; g(x) = f(g(x - 1)) + r; f.compute_at(g, r.x); // Use of f is unbounded in g. g.realize({100}); printf("Success!\n"); return 0; }
352
172
/* * Copyright (c) 2019 Samsung Electronics Co., Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "test-render-surface.h" namespace Dali { TestRenderSurface::TestRenderSurface( Dali::PositionSize positionSize ) : mPositionSize( positionSize ), mBackgroundColor() { } TestRenderSurface::~TestRenderSurface() { } Dali::PositionSize TestRenderSurface::GetPositionSize() const { return mPositionSize; }; void TestRenderSurface::GetDpi( unsigned int& dpiHorizontal, unsigned int& dpiVertical ) { dpiHorizontal = dpiVertical = 96; }; void TestRenderSurface::InitializeGraphics() { } void TestRenderSurface::CreateSurface() { } void TestRenderSurface::DestroySurface() { } bool TestRenderSurface::ReplaceGraphicsSurface() { return false; } void TestRenderSurface::MoveResize( Dali::PositionSize positionSize ) { mPositionSize = positionSize; } void TestRenderSurface::StartRender() { } bool TestRenderSurface::PreRender( bool resizingSurface ) { return true; } void TestRenderSurface::PostRender( bool renderToFbo, bool replacingSurface, bool resizingSurface ) { } void TestRenderSurface::StopRender() { } void TestRenderSurface::ReleaseLock() { } Dali::Integration::RenderSurface::Type TestRenderSurface::GetSurfaceType() { return WINDOW_RENDER_SURFACE; } void TestRenderSurface::MakeContextCurrent() { } Integration::DepthBufferAvailable TestRenderSurface::GetDepthBufferRequired() { return Integration::DepthBufferAvailable::TRUE; } Integration::StencilBufferAvailable TestRenderSurface::GetStencilBufferRequired() { return Integration::StencilBufferAvailable::TRUE; } void TestRenderSurface::SetBackgroundColor( Vector4 color ) { mBackgroundColor = color; } Vector4 TestRenderSurface::GetBackgroundColor() { return mBackgroundColor; } } // Namespace dali
2,324
734
//------------------------------------------------------------------------------ /* This file is part of rippled: https://github.com/ripple/rippled Copyright (c) 2016 Ripple Labs Inc. Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ //============================================================================== #include <ripple/beast/unit_test.h> #include <ripple/protocol/SField.h> #include <ripple/protocol/jss.h> #include <cstdlib> #include <test/jtx.h> #include <ripple/rpc/GRPCHandlers.h> #include <ripple/rpc/impl/RPCHelpers.h> #include <test/rpc/GRPCTestClientBase.h> namespace ripple { class AccountTxPaging_test : public beast::unit_test::suite { bool checkTransaction(Json::Value const& tx, int sequence, int ledger) { return ( tx[jss::tx][jss::Sequence].asInt() == sequence && tx[jss::tx][jss::ledger_index].asInt() == ledger); } auto next( test::jtx::Env& env, test::jtx::Account const& account, int ledger_min, int ledger_max, int limit, bool forward, Json::Value const& marker = Json::nullValue) { Json::Value jvc; jvc[jss::account] = account.human(); jvc[jss::ledger_index_min] = ledger_min; jvc[jss::ledger_index_max] = ledger_max; jvc[jss::forward] = forward; jvc[jss::limit] = limit; if (marker) jvc[jss::marker] = marker; return env.rpc("json", "account_tx", to_string(jvc))[jss::result]; } void testAccountTxPaging() { testcase("Paging for Single Account"); using namespace test::jtx; Env env(*this); Account A1{"A1"}; Account A2{"A2"}; Account A3{"A3"}; env.fund(XRP(10000), A1, A2, A3); env.close(); env.trust(A3["USD"](1000), A1); env.trust(A2["USD"](1000), A1); env.trust(A3["USD"](1000), A2); env.close(); for (auto i = 0; i < 5; ++i) { env(pay(A2, A1, A2["USD"](2))); env(pay(A3, A1, A3["USD"](2))); env(offer(A1, XRP(11), A1["USD"](1))); env(offer(A2, XRP(10), A2["USD"](1))); env(offer(A3, XRP(9), A3["USD"](1))); env.close(); } /* The sequence/ledger for A3 are as follows: * seq ledger_index * 3 ----> 3 * 1 ----> 3 * 2 ----> 4 * 2 ----> 4 * 2 ----> 5 * 3 ----> 5 * 4 ----> 6 * 5 ----> 6 * 6 ----> 7 * 7 ----> 7 * 8 ----> 8 * 9 ----> 8 * 10 ----> 9 * 11 ----> 9 */ // page through the results in several ways. { // limit = 2, 3 batches giving the first 6 txs auto jrr = next(env, A3, 2, 5, 2, true); auto txs = jrr[jss::transactions]; if (!BEAST_EXPECT(txs.isArray() && txs.size() == 2)) return; BEAST_EXPECT(checkTransaction(txs[0u], 3, 3)); BEAST_EXPECT(checkTransaction(txs[1u], 3, 3)); if (!BEAST_EXPECT(jrr[jss::marker])) return; jrr = next(env, A3, 2, 5, 2, true, jrr[jss::marker]); txs = jrr[jss::transactions]; if (!BEAST_EXPECT(txs.isArray() && txs.size() == 2)) return; BEAST_EXPECT(checkTransaction(txs[0u], 4, 4)); BEAST_EXPECT(checkTransaction(txs[1u], 4, 4)); if (!BEAST_EXPECT(jrr[jss::marker])) return; jrr = next(env, A3, 2, 5, 2, true, jrr[jss::marker]); txs = jrr[jss::transactions]; if (!BEAST_EXPECT(txs.isArray() && txs.size() == 2)) return; BEAST_EXPECT(checkTransaction(txs[0u], 4, 5)); BEAST_EXPECT(checkTransaction(txs[1u], 5, 5)); BEAST_EXPECT(!jrr[jss::marker]); } { // limit 1, 3 requests giving the first 3 txs auto jrr = next(env, A3, 3, 9, 1, true); auto txs = jrr[jss::transactions]; if (!BEAST_EXPECT(txs.isArray() && txs.size() == 1)) return; BEAST_EXPECT(checkTransaction(txs[0u], 3, 3)); if (!BEAST_EXPECT(jrr[jss::marker])) return; jrr = next(env, A3, 3, 9, 1, true, jrr[jss::marker]); txs = jrr[jss::transactions]; if (!BEAST_EXPECT(txs.isArray() && txs.size() == 1)) return; BEAST_EXPECT(checkTransaction(txs[0u], 3, 3)); if (!BEAST_EXPECT(jrr[jss::marker])) return; jrr = next(env, A3, 3, 9, 1, true, jrr[jss::marker]); txs = jrr[jss::transactions]; if (!BEAST_EXPECT(txs.isArray() && txs.size() == 1)) return; BEAST_EXPECT(checkTransaction(txs[0u], 4, 4)); if (!BEAST_EXPECT(jrr[jss::marker])) return; // continue with limit 3, to end of all txs jrr = next(env, A3, 3, 9, 3, true, jrr[jss::marker]); txs = jrr[jss::transactions]; if (!BEAST_EXPECT(txs.isArray() && txs.size() == 3)) return; BEAST_EXPECT(checkTransaction(txs[0u], 4, 4)); BEAST_EXPECT(checkTransaction(txs[1u], 4, 5)); BEAST_EXPECT(checkTransaction(txs[2u], 5, 5)); if (!BEAST_EXPECT(jrr[jss::marker])) return; jrr = next(env, A3, 3, 9, 3, true, jrr[jss::marker]); txs = jrr[jss::transactions]; if (!BEAST_EXPECT(txs.isArray() && txs.size() == 3)) return; BEAST_EXPECT(checkTransaction(txs[0u], 6, 6)); BEAST_EXPECT(checkTransaction(txs[1u], 7, 6)); BEAST_EXPECT(checkTransaction(txs[2u], 8, 7)); if (!BEAST_EXPECT(jrr[jss::marker])) return; jrr = next(env, A3, 3, 9, 3, true, jrr[jss::marker]); txs = jrr[jss::transactions]; if (!BEAST_EXPECT(txs.isArray() && txs.size() == 3)) return; BEAST_EXPECT(checkTransaction(txs[0u], 9, 7)); BEAST_EXPECT(checkTransaction(txs[1u], 10, 8)); BEAST_EXPECT(checkTransaction(txs[2u], 11, 8)); if (!BEAST_EXPECT(jrr[jss::marker])) return; jrr = next(env, A3, 3, 9, 3, true, jrr[jss::marker]); txs = jrr[jss::transactions]; if (!BEAST_EXPECT(txs.isArray() && txs.size() == 2)) return; BEAST_EXPECT(checkTransaction(txs[0u], 12, 9)); BEAST_EXPECT(checkTransaction(txs[1u], 13, 9)); BEAST_EXPECT(!jrr[jss::marker]); } { // limit 2, descending, 2 batches giving last 4 txs auto jrr = next(env, A3, 3, 9, 2, false); auto txs = jrr[jss::transactions]; if (!BEAST_EXPECT(txs.isArray() && txs.size() == 2)) return; BEAST_EXPECT(checkTransaction(txs[0u], 13, 9)); BEAST_EXPECT(checkTransaction(txs[1u], 12, 9)); if (!BEAST_EXPECT(jrr[jss::marker])) return; jrr = next(env, A3, 3, 9, 2, false, jrr[jss::marker]); txs = jrr[jss::transactions]; if (!BEAST_EXPECT(txs.isArray() && txs.size() == 2)) return; BEAST_EXPECT(checkTransaction(txs[0u], 11, 8)); BEAST_EXPECT(checkTransaction(txs[1u], 10, 8)); if (!BEAST_EXPECT(jrr[jss::marker])) return; // continue with limit 3 until all txs have been seen jrr = next(env, A3, 3, 9, 3, false, jrr[jss::marker]); txs = jrr[jss::transactions]; if (!BEAST_EXPECT(txs.isArray() && txs.size() == 3)) return; BEAST_EXPECT(checkTransaction(txs[0u], 9, 7)); BEAST_EXPECT(checkTransaction(txs[1u], 8, 7)); BEAST_EXPECT(checkTransaction(txs[2u], 7, 6)); if (!BEAST_EXPECT(jrr[jss::marker])) return; jrr = next(env, A3, 3, 9, 3, false, jrr[jss::marker]); txs = jrr[jss::transactions]; if (!BEAST_EXPECT(txs.isArray() && txs.size() == 3)) return; BEAST_EXPECT(checkTransaction(txs[0u], 6, 6)); BEAST_EXPECT(checkTransaction(txs[1u], 5, 5)); BEAST_EXPECT(checkTransaction(txs[2u], 4, 5)); if (!BEAST_EXPECT(jrr[jss::marker])) return; jrr = next(env, A3, 3, 9, 3, false, jrr[jss::marker]); txs = jrr[jss::transactions]; if (!BEAST_EXPECT(txs.isArray() && txs.size() == 3)) return; BEAST_EXPECT(checkTransaction(txs[0u], 4, 4)); BEAST_EXPECT(checkTransaction(txs[1u], 4, 4)); BEAST_EXPECT(checkTransaction(txs[2u], 3, 3)); if (!BEAST_EXPECT(jrr[jss::marker])) return; jrr = next(env, A3, 3, 9, 3, false, jrr[jss::marker]); txs = jrr[jss::transactions]; if (!BEAST_EXPECT(txs.isArray() && txs.size() == 1)) return; BEAST_EXPECT(checkTransaction(txs[0u], 3, 3)); BEAST_EXPECT(!jrr[jss::marker]); } } class GrpcAccountTxClient : public test::GRPCTestClientBase { public: org::xrpl::rpc::v1::GetAccountTransactionHistoryRequest request; org::xrpl::rpc::v1::GetAccountTransactionHistoryResponse reply; explicit GrpcAccountTxClient(std::string const& port) : GRPCTestClientBase(port) { } void AccountTx() { status = stub_->GetAccountTransactionHistory(&context, request, &reply); } }; bool checkTransaction( org::xrpl::rpc::v1::GetTransactionResponse const& tx, int sequence, int ledger) { return ( tx.transaction().sequence().value() == sequence && tx.ledger_index() == ledger); } std::pair< org::xrpl::rpc::v1::GetAccountTransactionHistoryResponse, grpc::Status> nextBinary( std::string grpcPort, test::jtx::Env& env, std::string const& account = "", int ledger_min = -1, int ledger_max = -1, int limit = -1, bool forward = false, org::xrpl::rpc::v1::Marker* marker = nullptr) { GrpcAccountTxClient client{grpcPort}; auto& request = client.request; if (account != "") request.mutable_account()->set_address(account); if (ledger_min != -1) request.mutable_ledger_range()->set_ledger_index_min(ledger_min); if (ledger_max != -1) request.mutable_ledger_range()->set_ledger_index_max(ledger_max); request.set_forward(forward); request.set_binary(true); if (limit != -1) request.set_limit(limit); if (marker) { *request.mutable_marker() = *marker; } client.AccountTx(); return {client.reply, client.status}; } std::pair< org::xrpl::rpc::v1::GetAccountTransactionHistoryResponse, grpc::Status> next( std::string grpcPort, test::jtx::Env& env, std::string const& account = "", int ledger_min = -1, int ledger_max = -1, int limit = -1, bool forward = false, org::xrpl::rpc::v1::Marker* marker = nullptr) { GrpcAccountTxClient client{grpcPort}; auto& request = client.request; if (account != "") request.mutable_account()->set_address(account); if (ledger_min != -1) request.mutable_ledger_range()->set_ledger_index_min(ledger_min); if (ledger_max != -1) request.mutable_ledger_range()->set_ledger_index_max(ledger_max); request.set_forward(forward); if (limit != -1) request.set_limit(limit); if (marker) { *request.mutable_marker() = *marker; } client.AccountTx(); return {client.reply, client.status}; } std::pair< org::xrpl::rpc::v1::GetAccountTransactionHistoryResponse, grpc::Status> nextWithSeq( std::string grpcPort, test::jtx::Env& env, std::string const& account = "", int ledger_seq = -1, int limit = -1, bool forward = false, org::xrpl::rpc::v1::Marker* marker = nullptr) { GrpcAccountTxClient client{grpcPort}; auto& request = client.request; if (account != "") request.mutable_account()->set_address(account); if (ledger_seq != -1) request.mutable_ledger_specifier()->set_sequence(ledger_seq); request.set_forward(forward); if (limit != -1) request.set_limit(limit); if (marker) { *request.mutable_marker() = *marker; } client.AccountTx(); return {client.reply, client.status}; } std::pair< org::xrpl::rpc::v1::GetAccountTransactionHistoryResponse, grpc::Status> nextWithHash( std::string grpcPort, test::jtx::Env& env, std::string const& account = "", uint256 const& hash = beast::zero, int limit = -1, bool forward = false, org::xrpl::rpc::v1::Marker* marker = nullptr) { GrpcAccountTxClient client{grpcPort}; auto& request = client.request; if (account != "") request.mutable_account()->set_address(account); if (hash != beast::zero) request.mutable_ledger_specifier()->set_hash( hash.data(), hash.size()); request.set_forward(forward); if (limit != -1) request.set_limit(limit); if (marker) { *request.mutable_marker() = *marker; } client.AccountTx(); return {client.reply, client.status}; } void testAccountTxParametersGrpc() { testcase("Test Account_tx Grpc"); using namespace test::jtx; std::unique_ptr<Config> config = envconfig(addGrpcConfig); std::string grpcPort = *(*config)["port_grpc"].get<std::string>("port"); Env env(*this, std::move(config)); Account A1{"A1"}; env.fund(XRP(10000), A1); env.close(); // Ledger 3 has the two txs associated with funding the account // All other ledgers have no txs auto hasTxs = [](auto res) { return res.second.error_code() == 0 && (res.first.transactions().size() == 2) && //(res.transactions()[0u].transaction().has_account_set()) && (res.first.transactions()[1u].transaction().has_payment()); }; auto noTxs = [](auto res) { return res.second.error_code() == 0 && (res.first.transactions().size() == 0); }; auto isErr = [](auto res, auto expect) { return res.second.error_code() == expect; }; BEAST_EXPECT( isErr(next(grpcPort, env, ""), grpc::StatusCode::INVALID_ARGUMENT)); BEAST_EXPECT(isErr( next(grpcPort, env, "0xDEADBEEF"), grpc::StatusCode::INVALID_ARGUMENT)); BEAST_EXPECT(hasTxs(next(grpcPort, env, A1.human()))); // Ledger min/max index { BEAST_EXPECT(hasTxs(next(grpcPort, env, A1.human()))); BEAST_EXPECT(hasTxs(next(grpcPort, env, A1.human(), 0, 100))); BEAST_EXPECT(noTxs(next(grpcPort, env, A1.human(), 1, 2))); BEAST_EXPECT(isErr( next(grpcPort, env, A1.human(), 2, 1), grpc::StatusCode::INVALID_ARGUMENT)); } // Ledger index min only { BEAST_EXPECT(hasTxs(next(grpcPort, env, A1.human(), -1))); BEAST_EXPECT(hasTxs(next(grpcPort, env, A1.human(), 1))); BEAST_EXPECT(isErr( next(grpcPort, env, A1.human(), env.current()->info().seq), grpc::StatusCode::INVALID_ARGUMENT)); } // Ledger index max only { BEAST_EXPECT(hasTxs(next(grpcPort, env, A1.human(), -1, -1))); BEAST_EXPECT(hasTxs(next( grpcPort, env, A1.human(), -1, env.current()->info().seq))); BEAST_EXPECT(hasTxs( next(grpcPort, env, A1.human(), -1, env.closed()->info().seq))); BEAST_EXPECT(noTxs(next( grpcPort, env, A1.human(), -1, env.closed()->info().seq - 1))); } // Ledger Sequence { BEAST_EXPECT(hasTxs(nextWithSeq( grpcPort, env, A1.human(), env.closed()->info().seq))); BEAST_EXPECT(noTxs(nextWithSeq( grpcPort, env, A1.human(), env.closed()->info().seq - 1))); BEAST_EXPECT(isErr( nextWithSeq( grpcPort, env, A1.human(), env.current()->info().seq), grpc::StatusCode::INVALID_ARGUMENT)); BEAST_EXPECT(isErr( nextWithSeq( grpcPort, env, A1.human(), env.current()->info().seq + 1), grpc::StatusCode::NOT_FOUND)); } // Ledger Hash { BEAST_EXPECT(hasTxs(nextWithHash( grpcPort, env, A1.human(), env.closed()->info().hash))); BEAST_EXPECT(noTxs(nextWithHash( grpcPort, env, A1.human(), env.closed()->info().parentHash))); } } struct TxCheck { uint32_t sequence; uint32_t ledgerIndex; std::string hash; std::function<bool(org::xrpl::rpc::v1::Transaction const& res)> checkTxn; }; void testAccountTxContentsGrpc() { testcase("Test AccountTx context grpc"); // Get results for all transaction types that can be associated // with an account. Start by generating all transaction types. using namespace test::jtx; using namespace std::chrono_literals; std::unique_ptr<Config> config = envconfig(addGrpcConfig); std::string grpcPort = *(*config)["port_grpc"].get<std::string>("port"); Env env(*this, std::move(config)); // Set time to this value (or greater) to get delivered_amount in meta env.timeKeeper().set(NetClock::time_point{446000001s}); Account const alice{"alice"}; Account const alie{"alie"}; Account const gw{"gw"}; auto const USD{gw["USD"]}; std::vector<std::shared_ptr<STTx const>> txns; env.fund(XRP(1000000), alice, gw); env.close(); // AccountSet env(noop(alice)); txns.emplace_back(env.tx()); // Payment env(pay(alice, gw, XRP(100)), stag(42), dtag(24), last_ledger_seq(20)); txns.emplace_back(env.tx()); // Regular key set env(regkey(alice, alie)); env.close(); txns.emplace_back(env.tx()); // Trust and Offers env(trust(alice, USD(200)), sig(alie)); txns.emplace_back(env.tx()); std::uint32_t const offerSeq{env.seq(alice)}; env(offer(alice, USD(50), XRP(150)), sig(alie)); txns.emplace_back(env.tx()); env.close(); env(offer_cancel(alice, offerSeq), sig(alie)); env.close(); txns.emplace_back(env.tx()); // SignerListSet env(signers(alice, 1, {{"bogie", 1}, {"demon", 1}, {gw, 1}}), sig(alie)); txns.emplace_back(env.tx()); // Escrow { // Create an escrow. Requires either a CancelAfter or FinishAfter. auto escrow = [](Account const& account, Account const& to, STAmount const& amount) { Json::Value escro; escro[jss::TransactionType] = jss::EscrowCreate; escro[jss::Flags] = tfUniversal; escro[jss::Account] = account.human(); escro[jss::Destination] = to.human(); escro[jss::Amount] = amount.getJson(JsonOptions::none); return escro; }; NetClock::time_point const nextTime{env.now() + 2s}; Json::Value escrowWithFinish{escrow(alice, alice, XRP(500))}; escrowWithFinish[sfFinishAfter.jsonName] = nextTime.time_since_epoch().count(); std::uint32_t const escrowFinishSeq{env.seq(alice)}; env(escrowWithFinish, sig(alie)); txns.emplace_back(env.tx()); Json::Value escrowWithCancel{escrow(alice, alice, XRP(500))}; escrowWithCancel[sfFinishAfter.jsonName] = nextTime.time_since_epoch().count(); escrowWithCancel[sfCancelAfter.jsonName] = nextTime.time_since_epoch().count() + 1; std::uint32_t const escrowCancelSeq{env.seq(alice)}; env(escrowWithCancel, sig(alie)); env.close(); txns.emplace_back(env.tx()); { Json::Value escrowFinish; escrowFinish[jss::TransactionType] = jss::EscrowFinish; escrowFinish[jss::Flags] = tfUniversal; escrowFinish[jss::Account] = alice.human(); escrowFinish[sfOwner.jsonName] = alice.human(); escrowFinish[sfOfferSequence.jsonName] = escrowFinishSeq; env(escrowFinish, sig(alie)); txns.emplace_back(env.tx()); } { Json::Value escrowCancel; escrowCancel[jss::TransactionType] = jss::EscrowCancel; escrowCancel[jss::Flags] = tfUniversal; escrowCancel[jss::Account] = alice.human(); escrowCancel[sfOwner.jsonName] = alice.human(); escrowCancel[sfOfferSequence.jsonName] = escrowCancelSeq; env(escrowCancel, sig(alie)); txns.emplace_back(env.tx()); } env.close(); } // PayChan { std::uint32_t payChanSeq{env.seq(alice)}; Json::Value payChanCreate; payChanCreate[jss::TransactionType] = jss::PaymentChannelCreate; payChanCreate[jss::Flags] = tfUniversal; payChanCreate[jss::Account] = alice.human(); payChanCreate[jss::Destination] = gw.human(); payChanCreate[jss::Amount] = XRP(500).value().getJson(JsonOptions::none); payChanCreate[sfSettleDelay.jsonName] = NetClock::duration{100s}.count(); payChanCreate[sfPublicKey.jsonName] = strHex(alice.pk().slice()); env(payChanCreate, sig(alie)); env.close(); txns.emplace_back(env.tx()); std::string const payChanIndex{ strHex(keylet::payChan(alice, gw, payChanSeq).key)}; { Json::Value payChanFund; payChanFund[jss::TransactionType] = jss::PaymentChannelFund; payChanFund[jss::Flags] = tfUniversal; payChanFund[jss::Account] = alice.human(); payChanFund[sfChannel.jsonName] = payChanIndex; payChanFund[jss::Amount] = XRP(200).value().getJson(JsonOptions::none); env(payChanFund, sig(alie)); env.close(); txns.emplace_back(env.tx()); } { Json::Value payChanClaim; payChanClaim[jss::TransactionType] = jss::PaymentChannelClaim; payChanClaim[jss::Flags] = tfClose; payChanClaim[jss::Account] = gw.human(); payChanClaim[sfChannel.jsonName] = payChanIndex; payChanClaim[sfPublicKey.jsonName] = strHex(alice.pk().slice()); env(payChanClaim); env.close(); txns.emplace_back(env.tx()); } } // Check { auto const aliceCheckId = keylet::check(alice, env.seq(alice)).key; env(check::create(alice, gw, XRP(300)), sig(alie)); auto txn = env.tx(); auto const gwCheckId = keylet::check(gw, env.seq(gw)).key; env(check::create(gw, alice, XRP(200))); env.close(); // need to switch the order of the previous 2 txns, since they are // in the same ledger and account_tx returns them in a different // order txns.emplace_back(env.tx()); txns.emplace_back(txn); env(check::cash(alice, gwCheckId, XRP(200)), sig(alie)); txns.emplace_back(env.tx()); env(check::cancel(alice, aliceCheckId), sig(alie)); txns.emplace_back(env.tx()); env.close(); } // Deposit preauthorization. env(deposit::auth(alice, gw), sig(alie)); env.close(); txns.emplace_back(env.tx()); // Multi Sig with memo auto const baseFee = env.current()->fees().base; env(noop(alice), msig(gw), fee(2 * baseFee), memo("data", "format", "type")); env.close(); txns.emplace_back(env.tx()); if (!BEAST_EXPECT(txns.size() == 20)) return; // Setup is done. Look at the transactions returned by account_tx. static const TxCheck txCheck[]{ {21, 15, strHex(txns[txns.size() - 1]->getTransactionID()), [this, &txns](auto res) { auto txnJson = txns[txns.size() - 1]->getJson(JsonOptions::none); return BEAST_EXPECT(res.has_account_set()) && BEAST_EXPECT(res.has_fee()) && BEAST_EXPECT(res.fee().drops() == 20) && BEAST_EXPECT(res.memos_size() == 1) && BEAST_EXPECT(res.memos(0).has_memo_data()) && BEAST_EXPECT(res.memos(0).memo_data().value() == "data") && BEAST_EXPECT(res.memos(0).has_memo_format()) && BEAST_EXPECT( res.memos(0).memo_format().value() == "format") && BEAST_EXPECT(res.memos(0).has_memo_type()) && BEAST_EXPECT(res.memos(0).memo_type().value() == "type") && BEAST_EXPECT(res.has_signing_public_key()) && BEAST_EXPECT(res.signing_public_key().value() == "") && BEAST_EXPECT(res.signers_size() == 1) && BEAST_EXPECT(res.signers(0).has_account()) && BEAST_EXPECT( res.signers(0).account().value().address() == txnJson["Signers"][0u]["Signer"]["Account"]) && BEAST_EXPECT(res.signers(0).has_transaction_signature()) && BEAST_EXPECT( strHex(res.signers(0) .transaction_signature() .value()) == txnJson["Signers"][0u]["Signer"]["TxnSignature"]) && BEAST_EXPECT(res.signers(0).has_signing_public_key()) && BEAST_EXPECT( strHex( res.signers(0).signing_public_key().value()) == txnJson["Signers"][0u]["Signer"]["SigningPubKey"]); }}, {20, 14, strHex(txns[txns.size() - 2]->getTransactionID()), [&txns, this](auto res) { return BEAST_EXPECT(res.has_deposit_preauth()) && BEAST_EXPECT( res.deposit_preauth() .authorize() .value() .address() == // TODO do them all like this txns[txns.size() - 2]->getJson( JsonOptions::none)["Authorize"]); }}, {19, 13, strHex(txns[txns.size() - 3]->getTransactionID()), [&txns, this](auto res) { return BEAST_EXPECT(res.has_check_cancel()) && BEAST_EXPECT( strHex(res.check_cancel().check_id().value()) == txns[txns.size() - 3]->getJson( JsonOptions::none)["CheckID"]); }}, {18, 13, strHex(txns[txns.size() - 4]->getTransactionID()), [&txns, this](auto res) { auto txnJson = txns[txns.size() - 4]->getJson(JsonOptions::none); return BEAST_EXPECT(res.has_check_cash()) && BEAST_EXPECT( strHex(res.check_cash().check_id().value()) == txnJson["CheckID"]) && BEAST_EXPECT(res.check_cash() .amount() .value() .has_xrp_amount()) && BEAST_EXPECT( res.check_cash() .amount() .value() .xrp_amount() .drops() == txnJson["Amount"].asUInt()); }}, {17, 12, strHex(txns[txns.size() - 5]->getTransactionID()), [&txns, this](auto res) { auto txnJson = txns[txns.size() - 5]->getJson(JsonOptions::none); return BEAST_EXPECT(res.has_check_create()) && BEAST_EXPECT( res.check_create() .destination() .value() .address() == txnJson["Destination"]) && BEAST_EXPECT(res.check_create() .send_max() .value() .has_xrp_amount()) && BEAST_EXPECT( res.check_create() .send_max() .value() .xrp_amount() .drops() == txnJson["SendMax"].asUInt()); }}, {5, 12, strHex(txns[txns.size() - 6]->getTransactionID()), [&txns, this](auto res) { auto txnJson = txns[txns.size() - 6]->getJson(JsonOptions::none); return BEAST_EXPECT(res.has_check_create()) && BEAST_EXPECT( res.check_create() .destination() .value() .address() == txnJson["Destination"]) && BEAST_EXPECT(res.check_create() .send_max() .value() .has_xrp_amount()) && BEAST_EXPECT( res.check_create() .send_max() .value() .xrp_amount() .drops() == txnJson["SendMax"].asUInt()); }}, {4, 11, strHex(txns[txns.size() - 7]->getTransactionID()), [&txns, this](auto res) { auto txnJson = txns[txns.size() - 7]->getJson(JsonOptions::none); return BEAST_EXPECT(res.has_payment_channel_claim()) && BEAST_EXPECT( strHex(res.payment_channel_claim() .channel() .value()) == txnJson["Channel"]) && BEAST_EXPECT( strHex(res.payment_channel_claim() .public_key() .value()) == txnJson["PublicKey"]); }}, {16, 10, strHex(txns[txns.size() - 8]->getTransactionID()), [&txns, this](auto res) { auto txnJson = txns[txns.size() - 8]->getJson(JsonOptions::none); return BEAST_EXPECT(res.has_payment_channel_fund()) && BEAST_EXPECT( strHex( res.payment_channel_fund().channel().value()) == txnJson["Channel"]) && BEAST_EXPECT(res.payment_channel_fund() .amount() .value() .has_xrp_amount()) && BEAST_EXPECT( res.payment_channel_fund() .amount() .value() .xrp_amount() .drops() == txnJson["Amount"].asUInt()); }}, {15, 9, strHex(txns[txns.size() - 9]->getTransactionID()), [&txns, this](auto res) { auto txnJson = txns[txns.size() - 9]->getJson(JsonOptions::none); return BEAST_EXPECT(res.has_payment_channel_create()) && BEAST_EXPECT(res.payment_channel_create() .amount() .value() .has_xrp_amount()) && BEAST_EXPECT( res.payment_channel_create() .amount() .value() .xrp_amount() .drops() == txnJson["Amount"].asUInt()) && BEAST_EXPECT( res.payment_channel_create() .destination() .value() .address() == txnJson["Destination"]) && BEAST_EXPECT( res.payment_channel_create() .settle_delay() .value() == txnJson["SettleDelay"].asUInt()) && BEAST_EXPECT( strHex(res.payment_channel_create() .public_key() .value()) == txnJson["PublicKey"]); }}, {14, 8, strHex(txns[txns.size() - 10]->getTransactionID()), [&txns, this](auto res) { auto txnJson = txns[txns.size() - 10]->getJson(JsonOptions::none); return BEAST_EXPECT(res.has_escrow_cancel()) && BEAST_EXPECT( res.escrow_cancel().owner().value().address() == txnJson["Owner"]) && BEAST_EXPECT( res.escrow_cancel().offer_sequence().value() == txnJson["OfferSequence"].asUInt() ); }}, {13, 8, strHex(txns[txns.size() - 11]->getTransactionID()), [&txns, this](auto res) { auto txnJson = txns[txns.size() - 11]->getJson(JsonOptions::none); return BEAST_EXPECT(res.has_escrow_finish()) && BEAST_EXPECT( res.escrow_finish().owner().value().address() == txnJson["Owner"]) && BEAST_EXPECT( res.escrow_finish().offer_sequence().value() == txnJson["OfferSequence"].asUInt() ); }}, {12, 7, strHex(txns[txns.size() - 12]->getTransactionID()), [&txns, this](auto res) { auto txnJson = txns[txns.size() - 12]->getJson(JsonOptions::none); return BEAST_EXPECT(res.has_escrow_create()) && BEAST_EXPECT(res.escrow_create() .amount() .value() .has_xrp_amount()) && BEAST_EXPECT( res.escrow_create() .amount() .value() .xrp_amount() .drops() == txnJson["Amount"].asUInt()) && BEAST_EXPECT( res.escrow_create() .destination() .value() .address() == txnJson["Destination"]) && BEAST_EXPECT( res.escrow_create().cancel_after().value() == txnJson["CancelAfter"].asUInt()) && BEAST_EXPECT( res.escrow_create().finish_after().value() == txnJson["FinishAfter"].asUInt()); }}, {11, 7, strHex(txns[txns.size() - 13]->getTransactionID()), [&txns, this](auto res) { auto txnJson = txns[txns.size() - 13]->getJson(JsonOptions::none); return BEAST_EXPECT(res.has_escrow_create()) && BEAST_EXPECT(res.escrow_create() .amount() .value() .has_xrp_amount()) && BEAST_EXPECT( res.escrow_create() .amount() .value() .xrp_amount() .drops() == txnJson["Amount"].asUInt()) && BEAST_EXPECT( res.escrow_create() .destination() .value() .address() == txnJson["Destination"]) && BEAST_EXPECT( res.escrow_create().finish_after().value() == txnJson["FinishAfter"].asUInt()); }}, {10, 7, strHex(txns[txns.size() - 14]->getTransactionID()), [&txns, this](auto res) { auto txnJson = txns[txns.size() - 14]->getJson(JsonOptions::none); return BEAST_EXPECT(res.has_signer_list_set()) && BEAST_EXPECT( res.signer_list_set().signer_quorum().value() == txnJson["SignerQuorum"].asUInt()) && BEAST_EXPECT( res.signer_list_set().signer_entries().size() == 3) && BEAST_EXPECT( res.signer_list_set() .signer_entries()[0] .account() .value() .address() == txnJson["SignerEntries"][0u]["SignerEntry"] ["Account"]) && BEAST_EXPECT( res.signer_list_set() .signer_entries()[0] .signer_weight() .value() == txnJson["SignerEntries"][0u]["SignerEntry"] ["SignerWeight"] .asUInt()) && BEAST_EXPECT( res.signer_list_set() .signer_entries()[1] .account() .value() .address() == txnJson["SignerEntries"][1u]["SignerEntry"] ["Account"]) && BEAST_EXPECT( res.signer_list_set() .signer_entries()[1] .signer_weight() .value() == txnJson["SignerEntries"][1u]["SignerEntry"] ["SignerWeight"] .asUInt()) && BEAST_EXPECT( res.signer_list_set() .signer_entries()[2] .account() .value() .address() == txnJson["SignerEntries"][2u]["SignerEntry"] ["Account"]) && BEAST_EXPECT( res.signer_list_set() .signer_entries()[2] .signer_weight() .value() == txnJson["SignerEntries"][2u]["SignerEntry"] ["SignerWeight"] .asUInt()); }}, {9, 6, strHex(txns[txns.size() - 15]->getTransactionID()), [&txns, this](auto res) { auto txnJson = txns[txns.size() - 15]->getJson(JsonOptions::none); return BEAST_EXPECT(res.has_offer_cancel()) && BEAST_EXPECT( res.offer_cancel().offer_sequence().value() == txnJson["OfferSequence"].asUInt()); }}, {8, 5, strHex(txns[txns.size() - 16]->getTransactionID()), [&txns, this](auto res) { auto txnJson = txns[txns.size() - 16]->getJson(JsonOptions::none); return BEAST_EXPECT(res.has_offer_create()) && BEAST_EXPECT(res.offer_create() .taker_gets() .value() .has_xrp_amount()) && BEAST_EXPECT( res.offer_create() .taker_gets() .value() .xrp_amount() .drops() == txnJson["TakerGets"].asUInt()) && BEAST_EXPECT(res.offer_create() .taker_pays() .value() .has_issued_currency_amount()) && BEAST_EXPECT( res.offer_create() .taker_pays() .value() .issued_currency_amount() .currency() .name() == txnJson["TakerPays"]["currency"]) && BEAST_EXPECT( res.offer_create() .taker_pays() .value() .issued_currency_amount() .value() == txnJson["TakerPays"]["value"]) && BEAST_EXPECT( res.offer_create() .taker_pays() .value() .issued_currency_amount() .issuer() .address() == txnJson["TakerPays"]["issuer"]); }}, {7, 5, strHex(txns[txns.size() - 17]->getTransactionID()), [&txns, this](auto res) { auto txnJson = txns[txns.size() - 17]->getJson(JsonOptions::none); return BEAST_EXPECT(res.has_trust_set()) && BEAST_EXPECT(res.trust_set() .limit_amount() .value() .has_issued_currency_amount()) && BEAST_EXPECT( res.trust_set() .limit_amount() .value() .issued_currency_amount() .currency() .name() == txnJson["LimitAmount"]["currency"]) && BEAST_EXPECT( res.trust_set() .limit_amount() .value() .issued_currency_amount() .value() == txnJson["LimitAmount"]["value"]) && BEAST_EXPECT( res.trust_set() .limit_amount() .value() .issued_currency_amount() .issuer() .address() == txnJson["LimitAmount"]["issuer"]); }}, {6, 4, strHex(txns[txns.size() - 18]->getTransactionID()), [&txns, this](auto res) { auto txnJson = txns[txns.size() - 18]->getJson(JsonOptions::none); return BEAST_EXPECT(res.has_set_regular_key()) && BEAST_EXPECT( res.set_regular_key() .regular_key() .value() .address() == txnJson["RegularKey"]); }}, {5, 4, strHex(txns[txns.size() - 19]->getTransactionID()), [&txns, this](auto res) { auto txnJson = txns[txns.size() - 19]->getJson(JsonOptions::none); return BEAST_EXPECT(res.has_payment()) && BEAST_EXPECT( res.payment().amount().value().has_xrp_amount()) && BEAST_EXPECT( res.payment() .amount() .value() .xrp_amount() .drops() == txnJson["Amount"].asUInt()) && BEAST_EXPECT( res.payment().destination().value().address() == txnJson["Destination"]) && BEAST_EXPECT(res.has_source_tag()) && BEAST_EXPECT( res.source_tag().value() == txnJson["SourceTag"].asUInt()) && BEAST_EXPECT(res.payment().has_destination_tag()) && BEAST_EXPECT( res.payment().destination_tag().value() == txnJson["DestinationTag"].asUInt()) && BEAST_EXPECT(res.has_last_ledger_sequence()) && BEAST_EXPECT( res.last_ledger_sequence().value() == txnJson["LastLedgerSequence"].asUInt()) && BEAST_EXPECT(res.has_transaction_signature()) && BEAST_EXPECT(res.has_account()) && BEAST_EXPECT( res.account().value().address() == txnJson["Account"]) && BEAST_EXPECT(res.has_flags()) && BEAST_EXPECT( res.flags().value() == txnJson["Flags"].asUInt()); }}, {4, 4, strHex(txns[txns.size() - 20]->getTransactionID()), [this](auto res) { return BEAST_EXPECT(res.has_account_set()); }}, {3, 3, "9CE54C3B934E473A995B477E92EC229F99CED5B62BF4D2ACE4DC42719103AE2F", [this](auto res) { return BEAST_EXPECT(res.has_account_set()) && BEAST_EXPECT(res.account_set().set_flag().value() == 8); }}, {1, 3, "2B5054734FA43C6C7B54F61944FAD6178ACD5D0272B39BA7FCD32A5D3932FBFF", [&alice, this](auto res) { return BEAST_EXPECT(res.has_payment()) && BEAST_EXPECT( res.payment().amount().value().has_xrp_amount()) && BEAST_EXPECT( res.payment() .amount() .value() .xrp_amount() .drops() == 1000000000010) && BEAST_EXPECT( res.payment().destination().value().address() == alice.human()); }}}; using MetaCheck = std::function<bool(org::xrpl::rpc::v1::Meta const& res)>; static const MetaCheck txMetaCheck[]{ {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 0) && BEAST_EXPECT(meta.affected_nodes_size() == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](org::xrpl::rpc::v1::AffectedNode const& entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 1); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 0) && BEAST_EXPECT(meta.affected_nodes_size() == 3) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_DEPOSIT_PREAUTH; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_DIRECTORY_NODE; }) == 1); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 1) && BEAST_EXPECT(meta.affected_nodes_size() == 5) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_CHECK; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_DIRECTORY_NODE; }) == 2) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 2); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 0) && BEAST_EXPECT(meta.affected_nodes_size() == 5) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_CHECK; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_DIRECTORY_NODE; }) == 2) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 2); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 1) && BEAST_EXPECT(meta.affected_nodes_size() == 5) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_CHECK; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_DIRECTORY_NODE; }) == 2) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 2); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 0) && BEAST_EXPECT(meta.affected_nodes_size() == 5) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_CHECK; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_DIRECTORY_NODE; }) == 2) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 2); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 0) && BEAST_EXPECT(meta.affected_nodes_size() == 5) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_PAY_CHANNEL; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_DIRECTORY_NODE; }) == 2) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 2); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 0) && BEAST_EXPECT(meta.affected_nodes_size() == 2) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_PAY_CHANNEL; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 1); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 0) && BEAST_EXPECT(meta.affected_nodes_size() == 5) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_PAY_CHANNEL; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_DIRECTORY_NODE; }) == 2) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 2); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 1) && BEAST_EXPECT(meta.affected_nodes_size() == 3) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ESCROW; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_DIRECTORY_NODE; }) == 1); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 0) && BEAST_EXPECT(meta.affected_nodes_size() == 3) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ESCROW; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_DIRECTORY_NODE; }) == 1); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 2) && BEAST_EXPECT(meta.affected_nodes_size() == 3) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ESCROW; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_DIRECTORY_NODE; }) == 1); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 1) && BEAST_EXPECT(meta.affected_nodes_size() == 3) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ESCROW; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_DIRECTORY_NODE; }) == 1); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 0) && BEAST_EXPECT(meta.affected_nodes_size() == 3) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_SIGNER_LIST; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_DIRECTORY_NODE; }) == 1); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 0) && BEAST_EXPECT(meta.affected_nodes_size() == 4) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_DIRECTORY_NODE; }) == 2) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_OFFER; }) == 1); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 1) && BEAST_EXPECT(meta.affected_nodes_size() == 4) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_DIRECTORY_NODE; }) == 2) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_OFFER; }) == 1); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 0) && BEAST_EXPECT(meta.affected_nodes_size() == 5) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_DIRECTORY_NODE; }) == 2) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 2) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_RIPPLE_STATE; }) == 1); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 2) && BEAST_EXPECT(meta.affected_nodes_size() == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 1); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 1) && BEAST_EXPECT(meta.affected_nodes_size() == 2) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 2); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 0) && BEAST_EXPECT(meta.affected_nodes_size() == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 1); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 2) && BEAST_EXPECT(meta.affected_nodes_size() == 1) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 1); }}, {[this](auto meta) { return BEAST_EXPECT(meta.transaction_index() == 0) && BEAST_EXPECT(meta.affected_nodes_size() == 2) && BEAST_EXPECT( std::count_if( meta.affected_nodes().begin(), meta.affected_nodes().end(), [](auto entry) { return entry.ledger_entry_type() == org::xrpl::rpc::v1::LedgerEntryType:: LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; }) == 2); }}}; auto doCheck = [this](auto txn, auto txCheck) { return BEAST_EXPECT(txn.has_transaction()) && BEAST_EXPECT(txn.validated()) && BEAST_EXPECT(strHex(txn.hash()) == txCheck.hash) && BEAST_EXPECT(txn.ledger_index() == txCheck.ledgerIndex) && BEAST_EXPECT( txn.transaction().sequence().value() == txCheck.sequence) && txCheck.checkTxn(txn.transaction()); }; auto doMetaCheck = [this](auto txn, auto txMetaCheck) { return BEAST_EXPECT(txn.has_meta()) && BEAST_EXPECT(txn.meta().has_transaction_result()) && BEAST_EXPECT( txn.meta().transaction_result().result_type() == org::xrpl::rpc::v1::TransactionResult:: RESULT_TYPE_TES) && BEAST_EXPECT( txn.meta().transaction_result().result() == "tesSUCCESS") && txMetaCheck(txn.meta()); }; auto [res, status] = next(grpcPort, env, alice.human()); if (!BEAST_EXPECT(status.error_code() == 0)) return; if (!BEAST_EXPECT(res.transactions().size() == std::size(txCheck))) return; for (int i = 0; i < res.transactions().size(); ++i) { BEAST_EXPECT(doCheck(res.transactions()[i], txCheck[i])); BEAST_EXPECT(doMetaCheck(res.transactions()[i], txMetaCheck[i])); } // test binary representation std::tie(res, status) = nextBinary(grpcPort, env, alice.human()); // txns vector does not contain the first two transactions returned by // account_tx if (!BEAST_EXPECT(res.transactions().size() == txns.size() + 2)) return; std::reverse(txns.begin(), txns.end()); for (int i = 0; i < txns.size(); ++i) { auto toByteString = [](auto data) { const char* bytes = reinterpret_cast<const char*>(data.data()); return std::string(bytes, data.size()); }; auto tx = txns[i]; Serializer s = tx->getSerializer(); std::string bin = toByteString(s); BEAST_EXPECT(res.transactions(i).transaction_binary() == bin); } } void testAccountTxPagingGrpc() { testcase("Test Account_tx Grpc"); using namespace test::jtx; std::unique_ptr<Config> config = envconfig(addGrpcConfig); std::string grpcPort = *(*config)["port_grpc"].get<std::string>("port"); Env env(*this, std::move(config)); Account A1{"A1"}; Account A2{"A2"}; Account A3{"A3"}; env.fund(XRP(10000), A1, A2, A3); env.close(); env.trust(A3["USD"](1000), A1); env.trust(A2["USD"](1000), A1); env.trust(A3["USD"](1000), A2); env.close(); for (auto i = 0; i < 5; ++i) { env(pay(A2, A1, A2["USD"](2))); env(pay(A3, A1, A3["USD"](2))); env(offer(A1, XRP(11), A1["USD"](1))); env(offer(A2, XRP(10), A2["USD"](1))); env(offer(A3, XRP(9), A3["USD"](1))); env.close(); } /* The sequence/ledger for A3 are as follows: * seq ledger_index * 3 ----> 3 * 1 ----> 3 * 2 ----> 4 * 2 ----> 4 * 2 ----> 5 * 3 ----> 5 * 4 ----> 6 * 5 ----> 6 * 6 ----> 7 * 7 ----> 7 * 8 ----> 8 * 9 ----> 8 * 10 ----> 9 * 11 ----> 9 */ // page through the results in several ways. { // limit = 2, 3 batches giving the first 6 txs auto [res, status] = next(grpcPort, env, A3.human(), 2, 5, 2, true); auto txs = res.transactions(); if (!BEAST_EXPECT(txs.size() == 2)) return; BEAST_EXPECT(checkTransaction(txs[0u], 3, 3)); BEAST_EXPECT(checkTransaction(txs[1u], 3, 3)); if (!BEAST_EXPECT(res.has_marker())) return; std::tie(res, status) = next( grpcPort, env, A3.human(), 2, 5, 2, true, res.mutable_marker()); txs = res.transactions(); if (!BEAST_EXPECT(txs.size() == 2)) return; BEAST_EXPECT(checkTransaction(txs[0u], 4, 4)); BEAST_EXPECT(checkTransaction(txs[1u], 4, 4)); if (!BEAST_EXPECT(res.has_marker())) return; std::tie(res, status) = next( grpcPort, env, A3.human(), 2, 5, 2, true, res.mutable_marker()); txs = res.transactions(); if (!BEAST_EXPECT(txs.size() == 2)) return; BEAST_EXPECT(checkTransaction(txs[0u], 4, 5)); BEAST_EXPECT(checkTransaction(txs[1u], 5, 5)); BEAST_EXPECT(!res.has_marker()); return; } { // limit 1, 3 requests giving the first 3 txs auto [res, status] = next(grpcPort, env, A3.human(), 3, 9, 1, true); auto txs = res.transactions(); if (!BEAST_EXPECT(txs.size() == 1)) return; BEAST_EXPECT(checkTransaction(txs[0u], 3, 3)); if (!BEAST_EXPECT(res.has_marker())) return; std::tie(res, status) = next( grpcPort, env, A3.human(), 3, 9, 1, true, res.mutable_marker()); txs = res.transactions(); if (!BEAST_EXPECT(txs.size() == 1)) return; BEAST_EXPECT(checkTransaction(txs[0u], 3, 3)); if (!BEAST_EXPECT(res.has_marker())) return; std::tie(res, status) = next( grpcPort, env, A3.human(), 3, 9, 1, true, res.mutable_marker()); txs = res.transactions(); if (!BEAST_EXPECT(txs.size() == 1)) return; BEAST_EXPECT(checkTransaction(txs[0u], 4, 4)); if (!BEAST_EXPECT(res.has_marker())) return; // continue with limit 3, to end of all txs std::tie(res, status) = next( grpcPort, env, A3.human(), 3, 9, 3, true, res.mutable_marker()); txs = res.transactions(); if (!BEAST_EXPECT(txs.size() == 3)) return; BEAST_EXPECT(checkTransaction(txs[0u], 4, 4)); BEAST_EXPECT(checkTransaction(txs[1u], 4, 5)); BEAST_EXPECT(checkTransaction(txs[2u], 5, 5)); if (!BEAST_EXPECT(res.has_marker())) return; std::tie(res, status) = next( grpcPort, env, A3.human(), 3, 9, 3, true, res.mutable_marker()); txs = res.transactions(); if (!BEAST_EXPECT(txs.size() == 3)) return; BEAST_EXPECT(checkTransaction(txs[0u], 6, 6)); BEAST_EXPECT(checkTransaction(txs[1u], 7, 6)); BEAST_EXPECT(checkTransaction(txs[2u], 8, 7)); if (!BEAST_EXPECT(res.has_marker())) return; std::tie(res, status) = next( grpcPort, env, A3.human(), 3, 9, 3, true, res.mutable_marker()); txs = res.transactions(); if (!BEAST_EXPECT(txs.size() == 3)) return; BEAST_EXPECT(checkTransaction(txs[0u], 9, 7)); BEAST_EXPECT(checkTransaction(txs[1u], 10, 8)); BEAST_EXPECT(checkTransaction(txs[2u], 11, 8)); if (!BEAST_EXPECT(res.has_marker())) return; std::tie(res, status) = next( grpcPort, env, A3.human(), 3, 9, 3, true, res.mutable_marker()); txs = res.transactions(); if (!BEAST_EXPECT(txs.size() == 2)) return; BEAST_EXPECT(checkTransaction(txs[0u], 12, 9)); BEAST_EXPECT(checkTransaction(txs[1u], 13, 9)); BEAST_EXPECT(!res.has_marker()); } { // limit 2, descending, 2 batches giving last 4 txs auto [res, status] = next(grpcPort, env, A3.human(), 3, 9, 2, false); auto txs = res.transactions(); if (!BEAST_EXPECT(txs.size() == 2)) return; BEAST_EXPECT(checkTransaction(txs[0u], 13, 9)); BEAST_EXPECT(checkTransaction(txs[1u], 12, 9)); if (!BEAST_EXPECT(res.has_marker())) return; std::tie(res, status) = next( grpcPort, env, A3.human(), 3, 9, 2, false, res.mutable_marker()); txs = res.transactions(); if (!BEAST_EXPECT(txs.size() == 2)) return; BEAST_EXPECT(checkTransaction(txs[0u], 11, 8)); BEAST_EXPECT(checkTransaction(txs[1u], 10, 8)); if (!BEAST_EXPECT(res.has_marker())) return; // continue with limit 3 until all txs have been seen std::tie(res, status) = next( grpcPort, env, A3.human(), 3, 9, 3, false, res.mutable_marker()); txs = res.transactions(); if (!BEAST_EXPECT(txs.size() == 3)) return; BEAST_EXPECT(checkTransaction(txs[0u], 9, 7)); BEAST_EXPECT(checkTransaction(txs[1u], 8, 7)); BEAST_EXPECT(checkTransaction(txs[2u], 7, 6)); if (!BEAST_EXPECT(res.has_marker())) return; std::tie(res, status) = next( grpcPort, env, A3.human(), 3, 9, 3, false, res.mutable_marker()); txs = res.transactions(); if (!BEAST_EXPECT(txs.size() == 3)) return; BEAST_EXPECT(checkTransaction(txs[0u], 6, 6)); BEAST_EXPECT(checkTransaction(txs[1u], 5, 5)); BEAST_EXPECT(checkTransaction(txs[2u], 4, 5)); if (!BEAST_EXPECT(res.has_marker())) return; std::tie(res, status) = next( grpcPort, env, A3.human(), 3, 9, 3, false, res.mutable_marker()); txs = res.transactions(); if (!BEAST_EXPECT(txs.size() == 3)) return; BEAST_EXPECT(checkTransaction(txs[0u], 4, 4)); BEAST_EXPECT(checkTransaction(txs[1u], 4, 4)); BEAST_EXPECT(checkTransaction(txs[2u], 3, 3)); if (!BEAST_EXPECT(res.has_marker())) return; std::tie(res, status) = next( grpcPort, env, A3.human(), 3, 9, 3, false, res.mutable_marker()); txs = res.transactions(); if (!BEAST_EXPECT(txs.size() == 1)) return; BEAST_EXPECT(checkTransaction(txs[0u], 3, 3)); BEAST_EXPECT(!res.has_marker()); } } public: void run() override { testAccountTxPaging(); testAccountTxPagingGrpc(); testAccountTxParametersGrpc(); testAccountTxContentsGrpc(); } }; BEAST_DEFINE_TESTSUITE(AccountTxPaging, app, ripple); } // namespace ripple
94,272
26,660
/** * @file * @copyright defined in eos/LICENSE.txt */ #pragma once #include <eosio/chain/transaction.hpp> #include "model.hpp" namespace fc { class variant; } namespace eosio { namespace account { /// Provides associate of wallet name to wallet and manages the interaction with each wallet. /// /// The name of the wallet is also used as part of the file name by wallet_api. See account_manager::create. /// No const methods because timeout may cause lock_all() to be called. class account_manager { public: account_manager() = default; account_manager(const account_manager&) = delete; account_manager(account_manager&&) = delete; account_manager& operator=(const account_manager&) = delete; account_manager& operator=(account_manager&&) = delete; ~account_manager() = default; /// Create a new wallet. /// A new wallet is created in file dir/{name}.wallet see set_dir. /// The new wallet is unlocked after creation. /// @param name of the wallet and name of the file without ext .wallet. /// @return Plaintext password that is needed to unlock wallet. Caller is responsible for saving password otherwise /// they will not be able to unlock their wallet. Note user supplied passwords are not supported. /// @throws fc::exception if wallet with name already exists (or filename already exists) fc::variant create(const eosio::account::account_create& args); fc::mutable_variant_object createkey(const int& num); // get account balance fc::variant get_account_balance(const currency_balance& args); // transfer fc::variant transfer(const transfer_info& args); private: std::string eosio_key = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"; }; } // namespace wallet } // namespace eosio
1,791
545
/********************************************************************************** * MIT License * * Copyright (c) 2018 Antoine Beauchamp * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. *********************************************************************************/ #include "TestBufferedConnection.h" #include "pbop/BufferedConnection.h" using namespace pbop; void TestBufferedConnection::SetUp() { } void TestBufferedConnection::TearDown() { } TEST_F(TestBufferedConnection, testReadWrite) { std::string bufferA; std::string bufferB; BufferedConnection conn1(&bufferA, &bufferB); BufferedConnection conn2(&bufferB, &bufferA); Status s; //write data to connection 1 const std::string write_data = "hello!"; s = conn1.Write(write_data); ASSERT_TRUE( s.Success() ) << s.GetDescription(); //read data from connection 2 std::string read_data; s = conn2.Read(read_data); ASSERT_TRUE( s.Success() ) << s.GetDescription(); //expect readed and written data to be identical. ASSERT_EQ(write_data, read_data); } TEST_F(TestBufferedConnection, testInvalidWrite) { std::string buffer; BufferedConnection conn(&buffer, NULL); //write data to connection const std::string data = "hello!"; Status s = conn.Write(data); ASSERT_FALSE( s.Success() ) << s.GetDescription(); } TEST_F(TestBufferedConnection, testInvalidRead) { std::string buffer; BufferedConnection conn(NULL, &buffer); //read data to connection std::string data; Status s = conn.Read(data); ASSERT_FALSE( s.Success() ) << s.GetDescription(); }
2,602
825
#include "StdAfx.h" #include "Selection.h" // =========================================================================== class NoCaseCompare { public: bool operator ()(const std::_tstring & l, const std::_tstring & r) const { return _tcsicmp(l.c_str(), r.c_str()) < 0; } }; // =========================================================================== CSelection::CSelection(WTL::CTreeViewCtrlEx * tree) { m_tree = tree; } CSelection::~CSelection(void) { } void CSelection::Clear() { CWaitCursor wait; m_attrs.clear(); WTL::CTreeItem item = m_tree->GetRootItem(); while (item) { InitState((CTreeNode*)item.GetData(), THREESTATE_UNCHECKED); item = item.GetNextVisible(); } } THREESTATE CSelection::CalcWorkspaceState(IWorkspace * workspace) const { bool unchecked = false; bool checked = false; IWorkspaceVector workspaces; workspace->GetChildWorkspaces(workspaces); for(IWorkspaceVector::const_iterator itr = workspaces.begin(); itr != workspaces.end(); ++itr) { switch(CalcWorkspaceState(itr->get())) { case THREESTATE_CHECKED: checked = true; break; case THREESTATE_UNCHECKED: unchecked = true; break; case THREESTATE_PARTIALCHECKED: return THREESTATE_PARTIALCHECKED; } } IWorkspaceItemVector workspaceItems; workspace->GetWindows(workspaceItems); for(IWorkspaceItemVector::const_iterator itr = workspaceItems.begin();itr != workspaceItems.end(); ++itr) { if (itr->get()->GetType() == WORKSPACE_ITEM_ATTRIBUTE) { AttributeStateMap::const_iterator found = m_attrs.find(itr->get()->GetAttribute()); if (found != m_attrs.end() && found->second.m_checked == true) checked = true; else unchecked = true; } if (checked && unchecked) return THREESTATE_PARTIALCHECKED; } if (checked) return THREESTATE_CHECKED; return THREESTATE_UNCHECKED; } THREESTATE CSelection::CalcModuleState(IModule * module) const { bool unchecked = false; bool checked = false; IModuleVector modules; module->GetModules(modules); for(IModuleVector::iterator itr = modules.begin(); itr != modules.end(); ++itr) { switch (CalcModuleState(itr->get())) { case THREESTATE_CHECKED: checked = true; break; case THREESTATE_UNCHECKED: unchecked = true; break; default: return THREESTATE_PARTIALCHECKED; } if (checked && unchecked) return THREESTATE_PARTIALCHECKED; } unsigned int attrCount = 0; for(AttributeStateMap::const_iterator itr = m_attrs.begin(); itr != m_attrs.end(); ++itr) { if (boost::algorithm::iequals(module->GetQualifiedLabel(), itr->first->GetModule()->GetQualifiedLabel())) { attrCount++; if (itr->second.m_checked == true) checked = true; else unchecked = true; } if (checked && unchecked) return THREESTATE_PARTIALCHECKED; } if (checked) { IAttributeVector attributes; return module->GetAttributes(attributes, true) - attrCount == 0 ? THREESTATE_CHECKED : THREESTATE_PARTIALCHECKED; //(Module could contain more attrs than the ones we know about in m_attrs) } return THREESTATE_UNCHECKED; } THREESTATE CSelection::CalcAttributeState(IAttribute * attribute) const { if (!attribute->Exists()) return THREESTATE_BLANK; AttributeStateMap::const_iterator itr = m_attrs.find(attribute); if (itr != m_attrs.end()) { return itr->second.m_checked ? THREESTATE_CHECKED : THREESTATE_UNCHECKED; } return THREESTATE_UNCHECKED; } THREESTATE CSelection::CalcAttributeHistoryState(IAttribute * attribute, int version) const { AttributeStateMap::const_iterator itr = m_attrs.find(attribute); if (itr != m_attrs.end() && itr->second.m_version == version && itr->second.m_checked) { return THREESTATE_RADIO_CHECKED; } return THREESTATE_RADIO_UNCHECKED; } void CSelection::InitState(CTreeNode * item, THREESTATE knownState) { if (CComQIPtr<CWorkspacePairNode> node = item) { SetCheckThreeState(*item, knownState != THREESTATE_UNKNOWN ? knownState : CalcWorkspaceState(node->m_lhs)); } else if (CComQIPtr<CModulePairNode> node = item) { SetCheckThreeState(*item, knownState != THREESTATE_UNKNOWN ? knownState : CalcModuleState(node->m_lhs)); } else if (CComQIPtr<CAttributePairNode> node = item) { THREESTATE state = knownState != THREESTATE_UNKNOWN ? knownState : CalcAttributeState(node->m_lhs); SetState(node->m_lhs, state == THREESTATE_CHECKED); SetCheckThreeState(*item, state); } else if (CComQIPtr<CAttributeHistoryPairNode> node = item) { CComQIPtr<CAttributePairNode> parent = item->GetParentNode(); IAttribute * attr = parent->m_lhs; if (m_attrs[attr].m_version == 0 && *parent->GetChildNode() == *item && !attr->IsSandboxed() && m_attrs[attr].m_checked) SetCheckThreeState(*item, THREESTATE_RADIO_CHECKED); else SetCheckThreeState(*item, CalcAttributeHistoryState(parent->m_lhs, node->m_lhs->GetVersion())); } } void CSelection::SetState(IAttribute * attribute, bool checked) { ATLASSERT(attribute); if (!attribute->Exists()) return; AttributeStateMap::const_iterator itr = m_attrs.find(attribute); if (itr == m_attrs.end()) //Does not exist... { m_attrs[attribute].m_version = 0; m_attrs[attribute].m_history = NULL; m_attrs[attribute].m_checked = false; } if (m_attrs[attribute].m_checked != checked) { m_attrs[attribute].m_version = 0; m_attrs[attribute].m_history = NULL; m_attrs[attribute].m_checked = checked; } } void CSelection::ItemClicked(CTreeNode * item, IAttributeVector * attrs, IAttributeVector * dependants) { THREESTATE curState = GetCheckThreeState(*item); THREESTATE newState = curState == THREESTATE_BUSY_CHECKED ? THREESTATE_UNCHECKED : THREESTATE_CHECKED; if (CComQIPtr<CWorkspacePairNode> node = item) { IWorkspace * ws = node->m_lhs; SetSelection(item, *attrs, newState == THREESTATE_CHECKED); } else if (CComQIPtr<CModulePairNode> node = item) { IModule * mod = node->m_lhs; SetSelection(item, *attrs, newState == THREESTATE_CHECKED); } else if (CComQIPtr<CAttributePairNode> node = item) { attrs->push_back(node->m_lhs.p); SetSelection(item, *attrs, newState == THREESTATE_CHECKED); } else if (CComQIPtr<CAttributeHistoryPairNode> node = item) { CComQIPtr<CAttributePairNode> parent = node->GetParentNode(); CComQIPtr<CModulePairNode> gparent = parent->GetParentNode(); IAttribute * attr = parent->m_lhs; m_attrs[attr].m_version = node->m_lhs->GetVersion(); m_attrs[attr].m_history = node->m_lhs; m_attrs[attr].m_checked = true; // TODO: GJS - This doesn't refresh to the top of nested modules... Refresh(parent); } // Dependents could be anywhere... if (!dependants->empty()) SetSelection(*dependants, newState == THREESTATE_CHECKED); } void CSelection::Refresh(CAttributePairNode * node) { SetCheckThreeState(*node, CalcAttributeState(node->m_lhs)); // Recalc Parents. if (CComQIPtr<CWorkspacePairNode> parent = node->GetParentNode()) SetCheckThreeState(*parent, CalcWorkspaceState(parent->m_lhs)); else if (CComQIPtr<CModulePairNode> parent = node->GetParentNode()) SetCheckThreeState(*parent, CalcModuleState(parent->m_lhs)); // Recalc Children. for(CComQIPtr<CAttributeHistoryPairNode> child = node->GetChildNode(); child; child = child->GetNextSiblingItem()) { InitState(child); } } bool CSelection::HasSelection() const { for(AttributeStateMap::const_iterator itr = m_attrs.begin(); itr != m_attrs.end(); ++itr) { if (itr->second.m_checked) return true; } return false; } //class IAttributeSelectionCompare //{ //public: // bool operator ()(IAttributeSelection & l, IAttributeSelection & r) // { // CString lhsModule = l.m_moduleLabel.c_str(); // int compare = lhsModule.CompareNoCase(r.m_moduleLabel.c_str()); // if (compare == 0) // { // CString lhs = l.m_attrLabel.c_str(); // return lhs.CompareNoCase(r.m_attrLabel.c_str()) > 0 ? false : true; // } // else // return compare > 0 ? false : true; // } //}; int CSelection::GetSelection(IRepository * rep, IWorkspaceVector & workspaces, IAttributeHistoryVector & attrs) const { WTL::CTreeItem item = m_tree->GetRootItem(); while(item) { CComPtr<CTreeNode> node = (CTreeNode *)item.GetData(); if (CComQIPtr<CWorkspacePairNode> ws_node = node) { switch (GetCheckThreeState(item)) { case THREESTATE_CHECKED: case THREESTATE_PARTIALCHECKED: workspaces.push_back(ws_node->m_lhs.p); break; } } item = item.GetNextVisible(); } for(AttributeStateMap::const_iterator itr = m_attrs.begin(); itr != m_attrs.end(); ++itr) { if (itr->second.m_checked) { if (itr->second.m_version == 0) attrs.push_back(itr->first->GetAsHistory()); else attrs.push_back(itr->second.m_history.p); } } IAttributeHistoryCompare compare; std::sort(attrs.begin(), attrs.end(), compare); return attrs.size(); } void CSelection::SetSelection(IAttributeVector & attrs, bool checked) { if (attrs.empty()) return; CWaitCursor wait; for(IAttributeVector::const_iterator itr = attrs.begin(); itr != attrs.end(); ++itr) { SetState(*itr, checked); } WTL::CTreeItem item = m_tree->GetRootItem(); while(item) { InitState((CTreeNode *)item.GetData()); item = item.GetNextVisible(); } } void RecursiveRefreshChildren(CSelection * self, WTL::CTreeItem * _item, THREESTATE knownState) { WTL::CTreeItem childItem = _item->GetChild(); while(childItem) { self->InitState((CTreeNode *)childItem.GetData(), knownState); RecursiveRefreshChildren(self, &childItem, knownState); childItem = childItem.GetNextSibling(); } } void CSelection::SetSelection(CTreeNode * _item, IAttributeVector & attrs, bool checked) { for(IAttributeVector::const_iterator itr = attrs.begin(); itr != attrs.end(); ++itr) { SetState(*itr, checked); //GJS this does not work for dependent attrs!!!! } SetCheckThreeState(*_item, checked ? THREESTATE_CHECKED : THREESTATE_UNCHECKED); // Refresh Ancestors WTL::CTreeItem item = _item->GetParent(); while(item) { InitState((CTreeNode *)item.GetData()); item = item.GetParent(); } // Refresh Children if (_item->IsExpanded()) RecursiveRefreshChildren(this, _item, checked ? THREESTATE_CHECKED : THREESTATE_UNCHECKED); SetSelection(attrs, checked); } int CSelection::GetSelection(std::_tstring & attrs) const { for(AttributeStateMap::const_iterator itr = m_attrs.begin(); itr != m_attrs.end(); ++itr) { if (itr->second.m_checked) { if (attrs.length()) attrs += _T("\r\n"); attrs += itr->first->GetQualifiedLabel(); } } return attrs.size(); } THREESTATE CSelection::GetCheckThreeState(HTREEITEM hItem) const { ATLASSERT(m_tree && m_tree->IsWindow()); UINT uRet = m_tree->GetItemState(hItem, TVIS_STATEIMAGEMASK); return (THREESTATE)((uRet >> 12) - 1); } BOOL CSelection::SetCheckThreeState(HTREEITEM hItem, THREESTATE state) { ATLASSERT(m_tree && m_tree->IsWindow()); int nCheck = (int)state; ATLASSERT(nCheck > THREESTATE_UNKNOWN && nCheck < THREESTATE_LAST); return m_tree->SetItemState(hItem, INDEXTOSTATEIMAGEMASK(nCheck+1), TVIS_STATEIMAGEMASK); }
12,318
4,086
#ifndef SHADER_HPP_ #define SHADER_HPP_ #include <string> #include <fstream> #include <sstream> #include "includesOpengl.hpp" /** * @brief Shader class used to manage shader compilation * * It also adds some tools to set uniform and activate shader easier * Warning! before instantiating a Shader object you need to create the opengl contex with glfwCreateWindow */ class Shader { public: Shader(std::string const vsPath, std::string const fsPath, std::string const gsPath = ""); Shader(Shader const &src); virtual ~Shader(); Shader &operator=(Shader const &rhs); void use(); void unuse(); void setBool(const std::string &name, bool value) const; void setInt(const std::string &name, int value) const; void setFloat(const std::string &name, float value) const; void setDouble(const std::string &name, double value) const; void setVec2(const std::string &name, float x, float y) const; void setVec2(const std::string &name, const glm::vec2 &vec) const; void setVec2Double(const std::string &name, double x, double y) const; void setVec2Double(const std::string &name, const glm::tvec2<double> &vec) const; void setVec3(const std::string &name, float x, float y, float z) const; void setVec3(const std::string &name, const glm::vec3 &vec) const; void setVec3Double(const std::string &name, double x, double y, double z) const; void setVec3Double(const std::string &name, const glm::tvec3<double> &vec) const; void setVec4(const std::string &name, float x, float y, float z, float w) const; void setVec4(const std::string &name, const glm::vec4 &vec) const; void setVec4Double(const std::string &name, double x, double y, double z, double w) const; void setVec4Double(const std::string &name, const glm::tvec4<double> &vec) const; void setMat2(const std::string &name, const glm::mat2 &mat) const; void setMat2Double(const std::string &name, const glm::dmat2 &mat) const; void setMat3(const std::string &name, const glm::mat3 &mat) const; void setMat3Double(const std::string &name, const glm::dmat3 &mat) const; void setMat4(const std::string &name, const glm::mat4 &mat) const; void setMat4Double(const std::string &name, const glm::dmat4 &mat) const; /** * @brief Shader exception */ class ShaderError : public std::exception { public: /** * @brief Function auto called on errors * * @return const char* Error message */ virtual const char* what() const throw() = 0; }; /** * @brief Shader compilation exception */ class ShaderCompileException : public ShaderError { public: /** * @brief Function auto called on errors * * @return const char* Error message */ virtual const char* what() const throw() { return ("Shader failed to compile!"); } }; /** * @brief Shader linking exception */ class ShaderLinkingException : public ShaderError { public: /** * @brief Function auto called on errors * * @return const char* Error message */ virtual const char* what() const throw() { return ("Shader program failed to link!"); } }; uint32_t id; /**< shader ID */ private: void checkCompileErrors(uint32_t shader, std::string type); std::string _vsPath; std::string _gsPath; std::string _fsPath; }; #endif // SHADER_HPP_
3,347
1,252
#include "aws_robomaker_gazebo_bridge/AwsPerfMetrics.h" #include "boost/shared_ptr.hpp" #include "gazebo/gazebo.hh" #include "gazebo/gazebo_client.hh" #include "ros/ros.h" using PerfMetrics = aws_robomaker_gazebo_bridge::AwsPerfMetrics; const std::string kDefaultGazeboPerfMetricsTopic = "/gazebo/aws/perf_metrics"; const std::string kDefaultGazeboWorldStatsTopic = "/gazebo/default/world_stats"; namespace { // TODO: This calculation is wrong, since it doesn't account for times when // the simulation is paused, or changes in RTF over time. Need to // implement a better filter, but for now double real_time_factor(const gazebo::common::Time &simTime, const gazebo::common::Time &realTime) { if (realTime == gazebo::common::Time::Zero) { return 0.0; } return (simTime / realTime).Double(); } } namespace robomaker { class PerfMetricsBridge { public: PerfMetricsBridge() : nh_("~") { std::string world_stats_topic, perf_metrics_topic; nh_.param<std::string>("gazebo_perf_metrics_topic", perf_metrics_topic, kDefaultGazeboPerfMetricsTopic); nh_.param<std::string>("gazebo_world_stats_topic", world_stats_topic, kDefaultGazeboWorldStatsTopic); perf_metrics_pub_ = nh_.advertise<PerfMetrics>(perf_metrics_topic, 1); gazebo_node_ = boost::make_shared<gazebo::transport::Node>(); gazebo_node_->Init(); gazebo_world_stats_sub_ = gazebo_node_->Subscribe(world_stats_topic, &PerfMetricsBridge::publish_perf_metrics, this); } void publish_perf_metrics(ConstWorldStatisticsPtr& msg) { PerfMetrics metrics; metrics.rtf = real_time_factor( gazebo::msgs::Convert(msg->sim_time()), gazebo::msgs::Convert(msg->real_time()) ); perf_metrics_pub_.publish(metrics); } private: ros::NodeHandle nh_; ros::Publisher perf_metrics_pub_; boost::shared_ptr<gazebo::transport::Node> gazebo_node_; gazebo::transport::SubscriberPtr gazebo_world_stats_sub_; }; } int main(int argc, char **argv) { gazebo::client::setup(argc, argv); ros::init(argc, argv, "aws_perf_metrics_bridge"); robomaker::PerfMetricsBridge bridge; ros::spin(); return 0; }
2,348
825
#pragma once #include "BaseAnimatedWithPhysicsGameObject.hpp" #include "Path.hpp" #include "FunctionFwd.hpp" enum class SwitchOp : s16; enum class FootSwitchTriggerBy : s16 { eOnlyAbe_0 = 0, eAnyone_1 = 1, }; struct Path_FootSwitch final : public Path_TLV { s16 field_10_id; Scale_short field_12_scale; SwitchOp field_14_action; FootSwitchTriggerBy field_16_trigger_by; }; ALIVE_ASSERT_SIZEOF_ALWAYS(Path_FootSwitch, 0x18); struct FootSwitch_Data final { s32 field_0_frameTableOffset; s32 field_4_frameTableOffset; s16 field_8_maxH; s16 field_A_frameTableOffset; }; ALIVE_ASSERT_SIZEOF_ALWAYS(FootSwitch_Data, 0xC); class FootSwitch final : public ::BaseAnimatedWithPhysicsGameObject { public: EXPORT FootSwitch* ctor_4DE090(Path_FootSwitch* pTlv, s32 tlvInfo); virtual BaseGameObject* VDestructor(s32 flags) override; virtual void VUpdate() override; virtual void VScreenChanged() override; private: EXPORT FootSwitch* vdtor_4DE240(s32 flags); EXPORT void dtor_4DE670(); EXPORT void vScreenChanged_4DE650(); EXPORT void vUpdate_4DE270(); EXPORT BaseAliveGameObject* WhoIsStoodOnMe_4DE700(); private: s32 field_F4_tlvInfo; enum class States : s16 { eWaitForStepOnMe_0 = 0, eWaitForGetOffMe_1 = 1, }; States field_F8_state; s16 field_FA_id; SwitchOp field_FC_action; FootSwitchTriggerBy field_FE_trigger_by; s32 field_100_obj_id; s16 field_104_bUnknown; s16 field_106_bFindStander; }; ALIVE_ASSERT_SIZEOF(FootSwitch, 0x108);
1,570
629
// Copyright 2021 The Tint Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or stateied. // See the License for the specific language governing permissions and // limitations under the License. #include "src/tint/reader/spirv/parser_type.h" #include <string> #include <unordered_map> #include <utility> #include "src/tint/program_builder.h" #include "src/tint/utils/hash.h" #include "src/tint/utils/map.h" #include "src/tint/utils/unique_allocator.h" TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Type); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Void); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Bool); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::U32); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::F32); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::I32); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Pointer); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Reference); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Vector); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Matrix); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Array); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Sampler); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Texture); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::DepthTexture); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::DepthMultisampledTexture); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::MultisampledTexture); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::SampledTexture); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::StorageTexture); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Named); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Alias); TINT_INSTANTIATE_TYPEINFO(tint::reader::spirv::Struct); namespace tint::reader::spirv { namespace { struct PointerHasher { size_t operator()(const Pointer& t) const { return utils::Hash(t.type, t.storage_class); } }; struct ReferenceHasher { size_t operator()(const Reference& t) const { return utils::Hash(t.type, t.storage_class); } }; struct VectorHasher { size_t operator()(const Vector& t) const { return utils::Hash(t.type, t.size); } }; struct MatrixHasher { size_t operator()(const Matrix& t) const { return utils::Hash(t.type, t.columns, t.rows); } }; struct ArrayHasher { size_t operator()(const Array& t) const { return utils::Hash(t.type, t.size, t.stride); } }; struct AliasHasher { size_t operator()(const Alias& t) const { return utils::Hash(t.name); } }; struct StructHasher { size_t operator()(const Struct& t) const { return utils::Hash(t.name); } }; struct SamplerHasher { size_t operator()(const Sampler& s) const { return utils::Hash(s.kind); } }; struct DepthTextureHasher { size_t operator()(const DepthTexture& t) const { return utils::Hash(t.dims); } }; struct DepthMultisampledTextureHasher { size_t operator()(const DepthMultisampledTexture& t) const { return utils::Hash(t.dims); } }; struct MultisampledTextureHasher { size_t operator()(const MultisampledTexture& t) const { return utils::Hash(t.dims, t.type); } }; struct SampledTextureHasher { size_t operator()(const SampledTexture& t) const { return utils::Hash(t.dims, t.type); } }; struct StorageTextureHasher { size_t operator()(const StorageTexture& t) const { return utils::Hash(t.dims, t.format, t.access); } }; } // namespace // Equality operators //! @cond Doxygen_Suppress static bool operator==(const Pointer& a, const Pointer& b) { return a.type == b.type && a.storage_class == b.storage_class; } static bool operator==(const Reference& a, const Reference& b) { return a.type == b.type && a.storage_class == b.storage_class; } static bool operator==(const Vector& a, const Vector& b) { return a.type == b.type && a.size == b.size; } static bool operator==(const Matrix& a, const Matrix& b) { return a.type == b.type && a.columns == b.columns && a.rows == b.rows; } static bool operator==(const Array& a, const Array& b) { return a.type == b.type && a.size == b.size && a.stride == b.stride; } static bool operator==(const Named& a, const Named& b) { return a.name == b.name; } static bool operator==(const Sampler& a, const Sampler& b) { return a.kind == b.kind; } static bool operator==(const DepthTexture& a, const DepthTexture& b) { return a.dims == b.dims; } static bool operator==(const DepthMultisampledTexture& a, const DepthMultisampledTexture& b) { return a.dims == b.dims; } static bool operator==(const MultisampledTexture& a, const MultisampledTexture& b) { return a.dims == b.dims && a.type == b.type; } static bool operator==(const SampledTexture& a, const SampledTexture& b) { return a.dims == b.dims && a.type == b.type; } static bool operator==(const StorageTexture& a, const StorageTexture& b) { return a.dims == b.dims && a.format == b.format; } //! @endcond const ast::Type* Void::Build(ProgramBuilder& b) const { return b.ty.void_(); } const ast::Type* Bool::Build(ProgramBuilder& b) const { return b.ty.bool_(); } const ast::Type* U32::Build(ProgramBuilder& b) const { return b.ty.u32(); } const ast::Type* F32::Build(ProgramBuilder& b) const { return b.ty.f32(); } const ast::Type* I32::Build(ProgramBuilder& b) const { return b.ty.i32(); } Pointer::Pointer(const Type* t, ast::StorageClass s) : type(t), storage_class(s) {} Pointer::Pointer(const Pointer&) = default; const ast::Type* Pointer::Build(ProgramBuilder& b) const { return b.ty.pointer(type->Build(b), storage_class); } Reference::Reference(const Type* t, ast::StorageClass s) : type(t), storage_class(s) {} Reference::Reference(const Reference&) = default; const ast::Type* Reference::Build(ProgramBuilder& b) const { return type->Build(b); } Vector::Vector(const Type* t, uint32_t s) : type(t), size(s) {} Vector::Vector(const Vector&) = default; const ast::Type* Vector::Build(ProgramBuilder& b) const { return b.ty.vec(type->Build(b), size); } Matrix::Matrix(const Type* t, uint32_t c, uint32_t r) : type(t), columns(c), rows(r) {} Matrix::Matrix(const Matrix&) = default; const ast::Type* Matrix::Build(ProgramBuilder& b) const { return b.ty.mat(type->Build(b), columns, rows); } Array::Array(const Type* t, uint32_t sz, uint32_t st) : type(t), size(sz), stride(st) {} Array::Array(const Array&) = default; const ast::Type* Array::Build(ProgramBuilder& b) const { if (size > 0) { return b.ty.array(type->Build(b), u32(size), stride); } else { return b.ty.array(type->Build(b), nullptr, stride); } } Sampler::Sampler(ast::SamplerKind k) : kind(k) {} Sampler::Sampler(const Sampler&) = default; const ast::Type* Sampler::Build(ProgramBuilder& b) const { return b.ty.sampler(kind); } Texture::Texture(ast::TextureDimension d) : dims(d) {} Texture::Texture(const Texture&) = default; DepthTexture::DepthTexture(ast::TextureDimension d) : Base(d) {} DepthTexture::DepthTexture(const DepthTexture&) = default; const ast::Type* DepthTexture::Build(ProgramBuilder& b) const { return b.ty.depth_texture(dims); } DepthMultisampledTexture::DepthMultisampledTexture(ast::TextureDimension d) : Base(d) {} DepthMultisampledTexture::DepthMultisampledTexture(const DepthMultisampledTexture&) = default; const ast::Type* DepthMultisampledTexture::Build(ProgramBuilder& b) const { return b.ty.depth_multisampled_texture(dims); } MultisampledTexture::MultisampledTexture(ast::TextureDimension d, const Type* t) : Base(d), type(t) {} MultisampledTexture::MultisampledTexture(const MultisampledTexture&) = default; const ast::Type* MultisampledTexture::Build(ProgramBuilder& b) const { return b.ty.multisampled_texture(dims, type->Build(b)); } SampledTexture::SampledTexture(ast::TextureDimension d, const Type* t) : Base(d), type(t) {} SampledTexture::SampledTexture(const SampledTexture&) = default; const ast::Type* SampledTexture::Build(ProgramBuilder& b) const { return b.ty.sampled_texture(dims, type->Build(b)); } StorageTexture::StorageTexture(ast::TextureDimension d, ast::TexelFormat f, ast::Access a) : Base(d), format(f), access(a) {} StorageTexture::StorageTexture(const StorageTexture&) = default; const ast::Type* StorageTexture::Build(ProgramBuilder& b) const { return b.ty.storage_texture(dims, format, access); } Named::Named(Symbol n) : name(n) {} Named::Named(const Named&) = default; Named::~Named() = default; Alias::Alias(Symbol n, const Type* ty) : Base(n), type(ty) {} Alias::Alias(const Alias&) = default; const ast::Type* Alias::Build(ProgramBuilder& b) const { return b.ty.type_name(name); } Struct::Struct(Symbol n, TypeList m) : Base(n), members(std::move(m)) {} Struct::Struct(const Struct&) = default; Struct::~Struct() = default; const ast::Type* Struct::Build(ProgramBuilder& b) const { return b.ty.type_name(name); } /// The PIMPL state of the Types object. struct TypeManager::State { /// The allocator of primitive types utils::BlockAllocator<Type> allocator_; /// The lazily-created Void type spirv::Void const* void_ = nullptr; /// The lazily-created Bool type spirv::Bool const* bool_ = nullptr; /// The lazily-created U32 type spirv::U32 const* u32_ = nullptr; /// The lazily-created F32 type spirv::F32 const* f32_ = nullptr; /// The lazily-created I32 type spirv::I32 const* i32_ = nullptr; /// Unique Pointer instances utils::UniqueAllocator<spirv::Pointer, PointerHasher> pointers_; /// Unique Reference instances utils::UniqueAllocator<spirv::Reference, ReferenceHasher> references_; /// Unique Vector instances utils::UniqueAllocator<spirv::Vector, VectorHasher> vectors_; /// Unique Matrix instances utils::UniqueAllocator<spirv::Matrix, MatrixHasher> matrices_; /// Unique Array instances utils::UniqueAllocator<spirv::Array, ArrayHasher> arrays_; /// Unique Alias instances utils::UniqueAllocator<spirv::Alias, AliasHasher> aliases_; /// Unique Struct instances utils::UniqueAllocator<spirv::Struct, StructHasher> structs_; /// Unique Sampler instances utils::UniqueAllocator<spirv::Sampler, SamplerHasher> samplers_; /// Unique DepthTexture instances utils::UniqueAllocator<spirv::DepthTexture, DepthTextureHasher> depth_textures_; /// Unique DepthMultisampledTexture instances utils::UniqueAllocator<spirv::DepthMultisampledTexture, DepthMultisampledTextureHasher> depth_multisampled_textures_; /// Unique MultisampledTexture instances utils::UniqueAllocator<spirv::MultisampledTexture, MultisampledTextureHasher> multisampled_textures_; /// Unique SampledTexture instances utils::UniqueAllocator<spirv::SampledTexture, SampledTextureHasher> sampled_textures_; /// Unique StorageTexture instances utils::UniqueAllocator<spirv::StorageTexture, StorageTextureHasher> storage_textures_; }; const Type* Type::UnwrapPtr() const { const Type* type = this; while (auto* ptr = type->As<Pointer>()) { type = ptr->type; } return type; } const Type* Type::UnwrapRef() const { const Type* type = this; while (auto* ptr = type->As<Reference>()) { type = ptr->type; } return type; } const Type* Type::UnwrapAlias() const { const Type* type = this; while (auto* alias = type->As<Alias>()) { type = alias->type; } return type; } const Type* Type::UnwrapAll() const { auto* type = this; while (true) { if (auto* alias = type->As<Alias>()) { type = alias->type; } else if (auto* ptr = type->As<Pointer>()) { type = ptr->type; } else { break; } } return type; } bool Type::IsFloatScalar() const { return Is<F32>(); } bool Type::IsFloatScalarOrVector() const { return IsFloatScalar() || IsFloatVector(); } bool Type::IsFloatVector() const { return Is([](const Vector* v) { return v->type->IsFloatScalar(); }); } bool Type::IsIntegerScalar() const { return IsAnyOf<U32, I32>(); } bool Type::IsIntegerScalarOrVector() const { return IsUnsignedScalarOrVector() || IsSignedScalarOrVector(); } bool Type::IsScalar() const { return IsAnyOf<F32, U32, I32, Bool>(); } bool Type::IsSignedIntegerVector() const { return Is([](const Vector* v) { return v->type->Is<I32>(); }); } bool Type::IsSignedScalarOrVector() const { return Is<I32>() || IsSignedIntegerVector(); } bool Type::IsUnsignedIntegerVector() const { return Is([](const Vector* v) { return v->type->Is<U32>(); }); } bool Type::IsUnsignedScalarOrVector() const { return Is<U32>() || IsUnsignedIntegerVector(); } TypeManager::TypeManager() { state = std::make_unique<State>(); } TypeManager::~TypeManager() = default; const spirv::Void* TypeManager::Void() { if (!state->void_) { state->void_ = state->allocator_.Create<spirv::Void>(); } return state->void_; } const spirv::Bool* TypeManager::Bool() { if (!state->bool_) { state->bool_ = state->allocator_.Create<spirv::Bool>(); } return state->bool_; } const spirv::U32* TypeManager::U32() { if (!state->u32_) { state->u32_ = state->allocator_.Create<spirv::U32>(); } return state->u32_; } const spirv::F32* TypeManager::F32() { if (!state->f32_) { state->f32_ = state->allocator_.Create<spirv::F32>(); } return state->f32_; } const spirv::I32* TypeManager::I32() { if (!state->i32_) { state->i32_ = state->allocator_.Create<spirv::I32>(); } return state->i32_; } const spirv::Pointer* TypeManager::Pointer(const Type* el, ast::StorageClass sc) { return state->pointers_.Get(el, sc); } const spirv::Reference* TypeManager::Reference(const Type* el, ast::StorageClass sc) { return state->references_.Get(el, sc); } const spirv::Vector* TypeManager::Vector(const Type* el, uint32_t size) { return state->vectors_.Get(el, size); } const spirv::Matrix* TypeManager::Matrix(const Type* el, uint32_t columns, uint32_t rows) { return state->matrices_.Get(el, columns, rows); } const spirv::Array* TypeManager::Array(const Type* el, uint32_t size, uint32_t stride) { return state->arrays_.Get(el, size, stride); } const spirv::Alias* TypeManager::Alias(Symbol name, const Type* ty) { return state->aliases_.Get(name, ty); } const spirv::Struct* TypeManager::Struct(Symbol name, TypeList members) { return state->structs_.Get(name, std::move(members)); } const spirv::Sampler* TypeManager::Sampler(ast::SamplerKind kind) { return state->samplers_.Get(kind); } const spirv::DepthTexture* TypeManager::DepthTexture(ast::TextureDimension dims) { return state->depth_textures_.Get(dims); } const spirv::DepthMultisampledTexture* TypeManager::DepthMultisampledTexture( ast::TextureDimension dims) { return state->depth_multisampled_textures_.Get(dims); } const spirv::MultisampledTexture* TypeManager::MultisampledTexture(ast::TextureDimension dims, const Type* ty) { return state->multisampled_textures_.Get(dims, ty); } const spirv::SampledTexture* TypeManager::SampledTexture(ast::TextureDimension dims, const Type* ty) { return state->sampled_textures_.Get(dims, ty); } const spirv::StorageTexture* TypeManager::StorageTexture(ast::TextureDimension dims, ast::TexelFormat fmt, ast::Access access) { return state->storage_textures_.Get(dims, fmt, access); } // Debug String() methods for Type classes. Only enabled in debug builds. #ifndef NDEBUG std::string Void::String() const { return "void"; } std::string Bool::String() const { return "bool"; } std::string U32::String() const { return "u32"; } std::string F32::String() const { return "f32"; } std::string I32::String() const { return "i32"; } std::string Pointer::String() const { std::stringstream ss; ss << "ptr<" << std::string(ast::ToString(storage_class)) << ", " << type->String() + ">"; return ss.str(); } std::string Reference::String() const { std::stringstream ss; ss << "ref<" + std::string(ast::ToString(storage_class)) << ", " << type->String() << ">"; return ss.str(); } std::string Vector::String() const { std::stringstream ss; ss << "vec" << size << "<" << type->String() << ">"; return ss.str(); } std::string Matrix::String() const { std::stringstream ss; ss << "mat" << columns << "x" << rows << "<" << type->String() << ">"; return ss.str(); } std::string Array::String() const { std::stringstream ss; ss << "array<" << type->String() << ", " << size << ", " << stride << ">"; return ss.str(); } std::string Sampler::String() const { switch (kind) { case ast::SamplerKind::kSampler: return "sampler"; case ast::SamplerKind::kComparisonSampler: return "sampler_comparison"; } return "<unknown sampler>"; } std::string DepthTexture::String() const { std::stringstream ss; ss << "depth_" << dims; return ss.str(); } std::string DepthMultisampledTexture::String() const { std::stringstream ss; ss << "depth_multisampled_" << dims; return ss.str(); } std::string MultisampledTexture::String() const { std::stringstream ss; ss << "texture_multisampled_" << dims << "<" << type << ">"; return ss.str(); } std::string SampledTexture::String() const { std::stringstream ss; ss << "texture_" << dims << "<" << type << ">"; return ss.str(); } std::string StorageTexture::String() const { std::stringstream ss; ss << "texture_storage_" << dims << "<" << format << ", " << access << ">"; return ss.str(); } std::string Named::String() const { return name.to_str(); } #endif // NDEBUG } // namespace tint::reader::spirv
18,366
6,301
// Copyright 2020 Tier IV, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <vector> #include "pointcloud_preprocessor/outlier_filter/voxel_grid_outlier_filter_nodelet.hpp" #include "pcl/kdtree/kdtree_flann.h" #include "pcl/search/kdtree.h" #include "pcl/segmentation/segment_differences.h" namespace pointcloud_preprocessor { VoxelGridOutlierFilterComponent::VoxelGridOutlierFilterComponent( const rclcpp::NodeOptions & options) : Filter("VoxelGridOutlierFilter", options) { // set initial parameters { voxel_size_x_ = static_cast<double>(declare_parameter("voxel_size_x", 0.3)); voxel_size_y_ = static_cast<double>(declare_parameter("voxel_size_y", 0.3)); voxel_size_z_ = static_cast<double>(declare_parameter("voxel_size_z", 0.1)); voxel_points_threshold_ = static_cast<int>(declare_parameter("voxel_points_threshold", 2)); } using std::placeholders::_1; set_param_res_ = this->add_on_set_parameters_callback( std::bind(&VoxelGridOutlierFilterComponent::paramCallback, this, _1)); } void VoxelGridOutlierFilterComponent::filter( const PointCloud2ConstPtr & input, const IndicesPtr & indices, PointCloud2 & output) { boost::mutex::scoped_lock lock(mutex_); pcl::PointCloud<pcl::PointXYZ>::Ptr pcl_input(new pcl::PointCloud<pcl::PointXYZ>); pcl::PointCloud<pcl::PointXYZ>::Ptr pcl_voxelized_input(new pcl::PointCloud<pcl::PointXYZ>); pcl::PointCloud<pcl::PointXYZ>::Ptr pcl_output(new pcl::PointCloud<pcl::PointXYZ>); pcl::fromROSMsg(*input, *pcl_input); pcl_voxelized_input->points.reserve(pcl_input->points.size()); voxel_filter.setInputCloud(pcl_input); voxel_filter.setSaveLeafLayout(true); voxel_filter.setLeafSize(voxel_size_x_, voxel_size_y_, voxel_size_z_); voxel_filter.setMinimumPointsNumberPerVoxel(voxel_points_threshold_); voxel_filter.filter(*pcl_voxelized_input); pcl_output->points.reserve(pcl_input->points.size()); for (size_t i = 0; i < pcl_input->points.size(); ++i) { const int index = voxel_filter.getCentroidIndexAt( voxel_filter.getGridCoordinates( pcl_input->points.at(i).x, pcl_input->points.at(i).y, pcl_input->points.at(i).z)); if (index != -1) { // not empty voxel pcl_output->points.push_back(pcl_input->points.at(i)); } } pcl::toROSMsg(*pcl_output, output); output.header = input->header; } rcl_interfaces::msg::SetParametersResult VoxelGridOutlierFilterComponent::paramCallback( const std::vector<rclcpp::Parameter> & p) { boost::mutex::scoped_lock lock(mutex_); if (get_param(p, "voxel_size_x", voxel_size_x_)) { RCLCPP_DEBUG(get_logger(), "Setting new distance threshold to: %f.", voxel_size_x_); } if (get_param(p, "voxel_size_y", voxel_size_y_)) { RCLCPP_DEBUG(get_logger(), "Setting new distance threshold to: %f.", voxel_size_y_); } if (get_param(p, "voxel_size_z", voxel_size_z_)) { RCLCPP_DEBUG(get_logger(), "Setting new distance threshold to: %f.", voxel_size_z_); } if (get_param(p, "voxel_points_threshold", voxel_points_threshold_)) { RCLCPP_DEBUG(get_logger(), "Setting new distance threshold to: %d.", voxel_points_threshold_); } rcl_interfaces::msg::SetParametersResult result; result.successful = true; result.reason = "success"; return result; } } // namespace pointcloud_preprocessor #include "rclcpp_components/register_node_macro.hpp" RCLCPP_COMPONENTS_REGISTER_NODE(pointcloud_preprocessor::VoxelGridOutlierFilterComponent)
3,958
1,485
/* * Copyright (c) 2016, The OpenThread Authors. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the copyright holder nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "test_platform.h" #include "openthread-instance.h" #include "common/debug.hpp" #include "common/timer.hpp" enum { kCallCountIndexAlarmStop = 0, kCallCountIndexAlarmStart, kCallCountIndexTimerHandler, kCallCountIndexMax }; uint32_t sNow; uint32_t sPlatT0; uint32_t sPlatDt; bool sTimerOn; uint32_t sCallCount[kCallCountIndexMax]; void testTimerAlarmStop(otInstance *) { sTimerOn = false; sCallCount[kCallCountIndexAlarmStop]++; } void testTimerAlarmStartAt(otInstance *, uint32_t aT0, uint32_t aDt) { sTimerOn = true; sCallCount[kCallCountIndexAlarmStart]++; sPlatT0 = aT0; sPlatDt = aDt; } uint32_t testTimerAlarmGetNow(void) { return sNow; } void InitTestTimer(void) { g_testPlatAlarmStop = testTimerAlarmStop; g_testPlatAlarmStartAt = testTimerAlarmStartAt; g_testPlatAlarmGetNow = testTimerAlarmGetNow; } void InitCounters(void) { memset(sCallCount, 0, sizeof(sCallCount)); } /** * `TestTimer` sub-classes `ot::TimerMilli` and provides a handler and a counter to keep track of number of times timer gets * fired. */ class TestTimer: public ot::TimerMilli { public: TestTimer(otInstance *aInstance): ot::TimerMilli(aInstance, TestTimer::HandleTimerFired, NULL), mFiredCounter(0) { } static void HandleTimerFired(ot::Timer &aTimer) { static_cast<TestTimer &>(aTimer).HandleTimerFired(); } void HandleTimerFired(void) { sCallCount[kCallCountIndexTimerHandler]++; mFiredCounter++; } uint32_t GetFiredCounter(void) { return mFiredCounter; } void ResetFiredCounter(void) { mFiredCounter = 0; } private: uint32_t mFiredCounter; //< Number of times timer has been fired so far }; /** * Test the TimerScheduler's behavior of one timer started and fired. */ int TestOneTimer(void) { const uint32_t kTimeT0 = 1000; const uint32_t kTimerInterval = 10; otInstance *instance = testInitInstance(); TestTimer timer(instance); // Test one Timer basic operation. InitTestTimer(); InitCounters(); printf("TestOneTimer() "); sNow = kTimeT0; timer.Start(kTimerInterval); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == 1, "TestOneTimer: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 0, "TestOneTimer: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 0, "TestOneTimer: Handler CallCount Failed.\n"); VerifyOrQuit(sPlatT0 == 1000 && sPlatDt == 10, "TestOneTimer: Start params Failed.\n"); VerifyOrQuit(timer.IsRunning(), "TestOneTimer: Timer running Failed.\n"); VerifyOrQuit(sTimerOn, "TestOneTimer: Platform Timer State Failed.\n"); sNow += kTimerInterval; otPlatAlarmMilliFired(instance); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == 1, "TestOneTimer: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 1, "TestOneTimer: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 1, "TestOneTimer: Handler CallCount Failed.\n"); VerifyOrQuit(timer.IsRunning() == false, "TestOneTimer: Timer running Failed.\n"); VerifyOrQuit(sTimerOn == false, "TestOneTimer: Platform Timer State Failed.\n"); // Test one Timer that spans the 32-bit wrap. InitCounters(); sNow = 0 - (kTimerInterval - 2); timer.Start(kTimerInterval); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == 1, "TestOneTimer: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 0, "TestOneTimer: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 0, "TestOneTimer: Handler CallCount Failed.\n"); VerifyOrQuit(sPlatT0 == 0 - (kTimerInterval - 2) && sPlatDt == 10, "TestOneTimer: Start params Failed.\n"); VerifyOrQuit(timer.IsRunning(), "TestOneTimer: Timer running Failed.\n"); VerifyOrQuit(sTimerOn, "TestOneTimer: Platform Timer State Failed.\n"); sNow += kTimerInterval; otPlatAlarmMilliFired(instance); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == 1, "TestOneTimer: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 1, "TestOneTimer: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 1, "TestOneTimer: Handler CallCount Failed.\n"); VerifyOrQuit(timer.IsRunning() == false, "TestOneTimer: Timer running Failed.\n"); VerifyOrQuit(sTimerOn == false, "TestOneTimer: Platform Timer State Failed.\n"); // Test one Timer that is late by several msec InitCounters(); sNow = kTimeT0; timer.Start(kTimerInterval); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == 1, "TestOneTimer: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 0, "TestOneTimer: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 0, "TestOneTimer: Handler CallCount Failed.\n"); VerifyOrQuit(sPlatT0 == 1000 && sPlatDt == 10, "TestOneTimer: Start params Failed.\n"); VerifyOrQuit(timer.IsRunning(), "TestOneTimer: Timer running Failed.\n"); VerifyOrQuit(sTimerOn, "TestOneTimer: Platform Timer State Failed.\n"); sNow += kTimerInterval + 5; otPlatAlarmMilliFired(instance); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == 1, "TestOneTimer: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 1, "TestOneTimer: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 1, "TestOneTimer: Handler CallCount Failed.\n"); VerifyOrQuit(timer.IsRunning() == false, "TestOneTimer: Timer running Failed.\n"); VerifyOrQuit(sTimerOn == false, "TestOneTimer: Platform Timer State Failed.\n"); // Test one Timer that is early by several msec InitCounters(); sNow = kTimeT0; timer.Start(kTimerInterval); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == 1, "TestOneTimer: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 0, "TestOneTimer: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 0, "TestOneTimer: Handler CallCount Failed.\n"); VerifyOrQuit(sPlatT0 == 1000 && sPlatDt == 10, "TestOneTimer: Start params Failed.\n"); VerifyOrQuit(timer.IsRunning(), "TestOneTimer: Timer running Failed.\n"); VerifyOrQuit(sTimerOn, "TestOneTimer: Platform Timer State Failed.\n"); sNow += kTimerInterval - 2; otPlatAlarmMilliFired(instance); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == 2, "TestOneTimer: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 0, "TestOneTimer: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 0, "TestOneTimer: Handler CallCount Failed.\n"); VerifyOrQuit(timer.IsRunning() == true, "TestOneTimer: Timer running Failed.\n"); VerifyOrQuit(sTimerOn == true, "TestOneTimer: Platform Timer State Failed.\n"); sNow += kTimerInterval; otPlatAlarmMilliFired(instance); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == 2, "TestOneTimer: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 1, "TestOneTimer: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 1, "TestOneTimer: Handler CallCount Failed.\n"); VerifyOrQuit(timer.IsRunning() == false, "TestOneTimer: Timer running Failed.\n"); VerifyOrQuit(sTimerOn == false, "TestOneTimer: Platform Timer State Failed.\n"); printf(" --> PASSED\n"); testFreeInstance(instance); return 0; } /** * Test the TimerScheduler's behavior of two timers started and fired. */ int TestTwoTimers(void) { const uint32_t kTimeT0 = 1000; const uint32_t kTimerInterval = 10; otInstance *instance = testInitInstance(); TestTimer timer1(instance); TestTimer timer2(instance); InitTestTimer(); printf("TestTwoTimers() "); // Test when second timer stars at the fire time of first timer (before alarm callback). InitCounters(); sNow = kTimeT0; timer1.Start(kTimerInterval); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == 1, "TestTwoTimers: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 0, "TestTwoTimers: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 0, "TestTwoTimers: Handler CallCount Failed.\n"); VerifyOrQuit(sPlatT0 == kTimeT0 && sPlatDt == kTimerInterval, "TestTwoTimers: Start params Failed.\n"); VerifyOrQuit(timer1.IsRunning(), "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(timer2.IsRunning() == false, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(sTimerOn, "TestTwoTimers: Platform Timer State Failed.\n"); sNow += kTimerInterval; timer2.Start(kTimerInterval); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == 1, "TestTwoTimers: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 0, "TestTwoTimers: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 0, "TestTwoTimers: Handler CallCount Failed.\n"); VerifyOrQuit(sPlatT0 == kTimeT0 && sPlatDt == kTimerInterval, "TestTwoTimers: Start params Failed.\n"); VerifyOrQuit(timer1.IsRunning() == true, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(timer2.IsRunning() == true, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(sTimerOn, "TestTwoTimers: Platform Timer State Failed.\n"); otPlatAlarmMilliFired(instance); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == 2, "TestTwoTimers: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 0, "TestTwoTimers: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 1, "TestTwoTimers: Handler CallCount Failed.\n"); VerifyOrQuit(timer1.GetFiredCounter() == 1, "TestTwoTimers: Fire Counter failed.\n"); VerifyOrQuit(sPlatT0 == sNow && sPlatDt == kTimerInterval, "TestTwoTimers: Start params Failed.\n"); VerifyOrQuit(timer1.IsRunning() == false, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(timer2.IsRunning() == true, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(sTimerOn == true, "TestTwoTimers: Platform Timer State Failed.\n"); sNow += kTimerInterval; otPlatAlarmMilliFired(instance); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == 2, "TestTwoTimers: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 1, "TestTwoTimers: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 2, "TestTwoTimers: Handler CallCount Failed.\n"); VerifyOrQuit(timer2.GetFiredCounter() == 1, "TestTwoTimers: Fire Counter failed.\n"); VerifyOrQuit(timer1.IsRunning() == false, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(timer2.IsRunning() == false, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(sTimerOn == false, "TestTwoTimers: Platform Timer State Failed.\n"); // Test when second timer starts at the fire time of first timer (before otPlatAlarmMilliFired()) and its fire time // is before the first timer. Ensure that the second timer handler is invoked before the first one. InitCounters(); timer1.ResetFiredCounter(); timer2.ResetFiredCounter(); sNow = kTimeT0; timer1.Start(kTimerInterval); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == 1, "TestTwoTimers: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 0, "TestTwoTimers: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 0, "TestTwoTimers: Handler CallCount Failed.\n"); VerifyOrQuit(sPlatT0 == kTimeT0 && sPlatDt == kTimerInterval, "TestTwoTimers: Start params Failed.\n"); VerifyOrQuit(timer1.IsRunning(), "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(timer2.IsRunning() == false, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(sTimerOn, "TestTwoTimers: Platform Timer State Failed.\n"); sNow += kTimerInterval; timer2.StartAt(kTimeT0, kTimerInterval - 2); // Timer 2 is even before timer 1 VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 0, "TestTwoTimers: Handler CallCount Failed.\n"); VerifyOrQuit(timer1.IsRunning() == true, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(timer2.IsRunning() == true, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(sTimerOn, "TestTwoTimers: Platform Timer State Failed.\n"); otPlatAlarmMilliFired(instance); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 0, "TestTwoTimers: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 1, "TestTwoTimers: Handler CallCount Failed.\n"); VerifyOrQuit(timer2.GetFiredCounter() == 1, "TestTwoTimers: Fire Counter failed.\n"); VerifyOrQuit(sPlatT0 == sNow && sPlatDt == 0, "TestTwoTimers: Start params Failed.\n"); VerifyOrQuit(timer1.IsRunning() == true, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(timer2.IsRunning() == false, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(sTimerOn == true, "TestTwoTimers: Platform Timer State Failed.\n"); otPlatAlarmMilliFired(instance); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 1, "TestTwoTimers: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 2, "TestTwoTimers: Handler CallCount Failed.\n"); VerifyOrQuit(timer1.GetFiredCounter() == 1, "TestTwoTimers: Fire Counter failed.\n"); VerifyOrQuit(timer1.IsRunning() == false, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(timer2.IsRunning() == false, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(sTimerOn == false, "TestTwoTimers: Platform Timer State Failed.\n"); // Timer 1 fire callback is late by some ticks/ms, and second timer is scheduled (before call to otPlatAlarmMilliFired) // with a maximum interval. This is to test (corner-case) scenario where the fire time of two timers spanning over // the maximum interval. InitCounters(); timer1.ResetFiredCounter(); timer2.ResetFiredCounter(); sNow = kTimeT0; timer1.Start(kTimerInterval); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == 1, "TestTwoTimers: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 0, "TestTwoTimers: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 0, "TestTwoTimers: Handler CallCount Failed.\n"); VerifyOrQuit(sPlatT0 == kTimeT0 && sPlatDt == kTimerInterval, "TestTwoTimers: Start params Failed.\n"); VerifyOrQuit(timer1.IsRunning(), "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(timer2.IsRunning() == false, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(sTimerOn, "TestTwoTimers: Platform Timer State Failed.\n"); sNow += kTimerInterval + 5; timer2.Start(ot::Timer::kMaxDt); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == 1, "TestTwoTimers: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 0, "TestTwoTimers: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 0, "TestTwoTimers: Handler CallCount Failed.\n"); VerifyOrQuit(timer1.IsRunning() == true, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(timer2.IsRunning() == true, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(sTimerOn, "TestTwoTimers: Platform Timer State Failed.\n"); otPlatAlarmMilliFired(instance); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == 2, "TestTwoTimers: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 0, "TestTwoTimers: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 1, "TestTwoTimers: Handler CallCount Failed.\n"); VerifyOrQuit(timer1.GetFiredCounter() == 1, "TestTwoTimers: Fire Counter failed.\n"); VerifyOrQuit(sPlatT0 == sNow, "TestTwoTimers: Start params Failed.\n"); VerifyOrQuit(sPlatDt == ot::Timer::kMaxDt, "TestTwoTimers: Start params Failed.\n"); VerifyOrQuit(timer1.IsRunning() == false, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(timer2.IsRunning() == true, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(sTimerOn == true, "TestTwoTimers: Platform Timer State Failed.\n"); sNow += ot::Timer::kMaxDt; otPlatAlarmMilliFired(instance); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == 2, "TestTwoTimers: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 1, "TestTwoTimers: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 2, "TestTwoTimers: Handler CallCount Failed.\n"); VerifyOrQuit(timer2.GetFiredCounter() == 1, "TestTwoTimers: Fire Counter failed.\n"); VerifyOrQuit(timer1.IsRunning() == false, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(timer2.IsRunning() == false, "TestTwoTimers: Timer running Failed.\n"); VerifyOrQuit(sTimerOn == false, "TestTwoTimers: Platform Timer State Failed.\n"); printf(" --> PASSED\n"); testFreeInstance(instance); return 0; } /** * Test the TimerScheduler's behavior of ten timers started and fired. * * `aTimeShift` is added to the t0 and trigger times for all timers. It can be used to check the ten timer behavior * at different start time (e.g., around a 32-bit wrap). */ static void TenTimers(uint32_t aTimeShift) { const uint32_t kNumTimers = 10; const uint32_t kNumTriggers = 7; const uint32_t kTimeT0[kNumTimers] = { 1000, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008 }; const uint32_t kTimerInterval[kNumTimers] = { 20, 100, (ot::Timer::kMaxDt - kTimeT0[2]), 100000, 1000000, 10, ot::Timer::kMaxDt, 200, 200, 200 }; // Expected timer fire order // timer # Trigger time // 5 1014 // 0 1020 // 1 1100 // 7 1206 // 8 1207 // 9 1208 // 3 101002 // 4 1001003 // 2 kMaxDt // 6 kMaxDt + 1005 const uint32_t kTriggerTimes[kNumTriggers] = { 1014, 1020, 1100, 1207, 101004, ot::Timer::kMaxDt, ot::Timer::kMaxDt + kTimeT0[6] }; // Expected timers fired by each kTriggerTimes[] value // Trigger # Timers Fired // 0 5 // 1 0 // 2 1 // 3 7, 8 // 4 9, 3 // 5 4, 2 // 6 6 const bool kTimerStateAfterTrigger [kNumTriggers][kNumTimers] = { { true, true, true, true, true, false, true, true, true, true}, // 5 { false, true, true, true, true, false, true, true, true, true}, // 0 { false, false, true, true, true, false, true, true, true, true}, // 1 { false, false, true, true, true, false, true, false, false, true}, // 7, 8 { false, false, true, false, true, false, true, false, false, false}, // 9, 3 { false, false, false, false, false, false, true, false, false, false}, // 4, 2 { false, false, false, false, false, false, false, false, false, false} // 6 }; const bool kSchedulerStateAfterTrigger[kNumTriggers] = { true, true, true, true, true, true, false }; const uint32_t kTimerHandlerCountAfterTrigger[kNumTriggers] = { 1, 2, 3, 5, 7, 9, 10 }; const uint32_t kTimerStopCountAfterTrigger[kNumTriggers] = { 0, 0, 0, 0, 0, 0, 1 }; const uint32_t kTimerStartCountAfterTrigger[kNumTriggers] = { 3, 4, 5, 7, 9, 11, 11 }; otInstance *instance = testInitInstance(); TestTimer timer0(instance); TestTimer timer1(instance); TestTimer timer2(instance); TestTimer timer3(instance); TestTimer timer4(instance); TestTimer timer5(instance); TestTimer timer6(instance); TestTimer timer7(instance); TestTimer timer8(instance); TestTimer timer9(instance); TestTimer *timers[kNumTimers] = { &timer0, &timer1, &timer2, &timer3, &timer4, &timer5, &timer6, &timer7, &timer8, &timer9 }; size_t i; printf("TestTenTimer() with aTimeShift=%-10u ", aTimeShift); // Start the Ten timers. InitTestTimer(); InitCounters(); for (i = 0; i < kNumTimers ; i++) { sNow = kTimeT0[i] + aTimeShift; timers[i]->Start(kTimerInterval[i]); } // given the order in which timers are started, the TimerScheduler should call otPlatAlarmMilliStartAt 2 times. // one for timer[0] and one for timer[5] which will supercede timer[0]. VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == 2, "TestTenTimer: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == 0, "TestTenTimer: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == 0, "TestTenTimer: Handler CallCount Failed.\n"); VerifyOrQuit(sPlatT0 == kTimeT0[5] + aTimeShift, "TestTenTimer: Start params Failed.\n"); VerifyOrQuit(sPlatDt == kTimerInterval[5], "TestTenTimer: Start params Failed.\n"); VerifyOrQuit(sTimerOn, "TestTenTimer: Platform Timer State Failed.\n"); for (i = 0 ; i < kNumTimers ; i++) { VerifyOrQuit(timers[i]->IsRunning(), "TestTenTimer: Timer running Failed.\n"); } // Issue the triggers and test the State after each trigger. for (size_t trigger = 0 ; trigger < kNumTriggers ; trigger++) { sNow = kTriggerTimes[trigger] + aTimeShift; do { // By design, each call to otPlatAlarmMilliFired() can result in 0 or 1 calls to a timer handler. // For some combinations of sNow and Timers queued, it is necessary to call otPlatAlarmMilliFired() // multiple times in order to handle all the expired timers. It can be determined that another // timer is ready to be triggered by examining the aDt arg passed into otPlatAlarmMilliStartAt(). If // that value is 0, then otPlatAlarmMilliFired should be fired immediately. This loop calls otPlatAlarmMilliFired() // the requisite number of times based on the aDt argument. otPlatAlarmMilliFired(instance); } while (sPlatDt == 0); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStart] == kTimerStartCountAfterTrigger[trigger], "TestTenTimer: Start CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexAlarmStop] == kTimerStopCountAfterTrigger[trigger], "TestTenTimer: Stop CallCount Failed.\n"); VerifyOrQuit(sCallCount[kCallCountIndexTimerHandler] == kTimerHandlerCountAfterTrigger[trigger], "TestTenTimer: Handler CallCount Failed.\n"); VerifyOrQuit(sTimerOn == kSchedulerStateAfterTrigger[trigger], "TestTenTimer: Platform Timer State Failed.\n"); for (i = 0 ; i < kNumTimers ; i++) { VerifyOrQuit( timers[i]->IsRunning() == kTimerStateAfterTrigger[trigger][i], "TestTenTimer: Timer running Failed.\n" ); } } for (i = 0 ; i < kNumTimers ; i++) { VerifyOrQuit(timers[i]->GetFiredCounter() == 1, "TestTenTimer: Timer fired counter Failed.\n"); } printf("--> PASSED\n"); testFreeInstance(instance); } int TestTenTimers(void) { // Time shift to change the start/fire time of ten timers. const uint32_t kTimeShift[] = { 0, 100000U, 0U - 1U, 0U - 1100U, ot::Timer::kMaxDt, ot::Timer::kMaxDt + 1020U, }; size_t i; for (i = 0; i < sizeof(kTimeShift) / sizeof(kTimeShift[0]); i++) { TenTimers(kTimeShift[i]); } return 0; } void RunTimerTests(void) { TestOneTimer(); TestTwoTimers(); TestTenTimers(); } #ifdef ENABLE_TEST_MAIN int main(void) { RunTimerTests(); printf("All tests passed\n"); return 0; } #endif
28,411
9,386
#include "strategy.h" #include "librisk.h" Strategy::Strategy() { this->driver = GameDriver::getInstance(); } /** * @brief A signal sent to the Strategy class from the game driver to indicate * that a computer-controlled player should made their move. * * The AI strategy implementations override each of the fooPhase() methods * which return the name(s) of the country or countries to act upon. * * Empty string indicates the AI wishes to make no move, or there are none * possible. */ void Strategy::takeAction(Mode mode) { RiskMap* map = this->driver->getRiskMap(); if (mode == REINFORCEMENT) { std::string countryName = this->reinforcePhase(); if (countryName == "") { return; } Country* country = map->getCountry(countryName); Player* player = map->getPlayer(country->getPlayer()); this->driver->reinforceCountry(player, country, player->getReinforcements()); } else if (mode == ATTACK) { std::pair<std::string, std::string> countryNames = this->attackPhase(); if (countryNames.first == "" || countryNames.second == "") { return; } driver->attackCountry(map->getCountry(countryNames.first), map->getCountry(countryNames.second)); } else if (mode == FORTIFICATION) { std::pair<std::string, std::string> countryNames = this->fortifyPhase(); if (countryNames.first == "" || countryNames.second == "") { return; } // Given the two countries, fortify so that the armies are as equal as possible. Country* origin = map->getCountry(countryNames.first); Country* destination = map->getCountry(countryNames.second); int splitDifference = std::abs(origin->getArmies() - destination->getArmies()) / 2; this->driver->fortifyCountry(origin, destination, splitDifference); } } /** * @brief Reinforcement phase decision making. Places all reinforcements on the * country with the fewest armies. */ std::string Strategy::reinforcePhase() { RiskMap* map = this->driver->getRiskMap(); std::string playerName = this->driver->getCurrentPlayerName(); int minArmies = 10000; Country* minArmiesCountry = nullptr; // add the reinforcements to the player int numCardsSelected = map->getPlayer(playerName)->getCards(); int armiesEarned = convertCardsToReinforcements(numCardsSelected); if (armiesEarned > 0) { this->driver->addCardsTradeReinforcements(armiesEarned); this->driver->updatePlayerCards(-numCardsSelected); } // Reinforce the weakest country for (const std::string countryName : map->getCountriesOwnedByPlayer(playerName)) { Country* country = map->getCountry(countryName); int armies = country->getArmies(); if (armies < minArmies) { minArmies = armies; minArmiesCountry = country; } } if (minArmiesCountry == nullptr) { return ""; } return minArmiesCountry->getName(); } /** * @brief Fortification phase decision making */ std::pair<std::string, std::string> Strategy::fortifyPhase() { // Not implemented in the current AI. return std::pair<std::string, std::string>("", ""); }
2,983
986
// // Created by buran on 14/03/18. // #include <PeriodType.h> #include <TwitchException.h> std::string TwitchXX::PeriodType::toString(TwitchXX::PeriodType::Value v) { static const std::map<Value, std::string> strs{{Value::ALL, "all"}, {Value::DAY, "day"}, {Value::WEEK, "week"}, {Value::MONTH, "month"}}; try { return strs.at(v); } catch(const std::out_of_range& e) { throw TwitchException("Value type is not supported"); } } TwitchXX::PeriodType::Value TwitchXX::PeriodType::fromString(const std::string &s) { static const std::map<std::string,Value> strs{{"all", Value::ALL}, {"day", Value::DAY}, {"week", Value::WEEK}, {"month", Value::MONTH}}; try { return strs.at(s); } catch(const std::out_of_range& e) { throw TwitchException("Can not convert string to period type"); } } TwitchXX::PeriodType::Value TwitchXX::PeriodType::fromInt(int i) { if(static_cast<int>(Value::ALL) > i || static_cast<int>(Value::MONTH) < i) { throw TwitchException("Value is not within PeriodType range"); } return static_cast<Value>(i); }
1,137
402
// Copyright (c) Glyn Matthews 2010. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) //[ hello_world_client_main /*` This is a part of the 'Hello World' example. We create a client object and make a single HTTP request. If we use make this request to the `hello_world_server`, then the output is simply "Hello, World!". */ #include <boost/network/protocol/http/client.hpp> #include <iostream> namespace http = boost::network::http; int main(int argc, char *argv[]) { if (argc != 2) { std::cerr << "Usage: " << argv[0] << " url" << std::endl; return 1; } try { /*<< Creates the client. >>*/ http::client client; /*<< Creates a request using a URI supplied on the command line. >>*/ http::client::request request(argv[1]); /*<< Gets a response from the HTTP server. >>*/ http::client::response response = client.get(request); /*<< Prints the response body to the console. >>*/ std::cout << body(response) << std::endl; } catch (std::exception &e) { std::cerr << e.what() << std::endl; return 1; } return 0; } //]
1,307
424
// // Created by xmac on 18-10-17. // #include <bits/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <cerrno> #include <cstring> int inet_pton(int family, const char* strptr, void* addrptr) { if (family == AF_INET) { struct in_addr in_val; if (inet_aton(strptr, &in_val)) { memcpy(addrptr, &in_val, sizeof(struct in_addr)); return (1); } return 0; } errno = EAFNOSUPPORT; }
464
187
#include <iostream> #include <vector> #include <utility> // pair using namespace std; // 동적 계획법 int pibonacci(int n) { if(n <= 1) { return n; } else { return pibonacci(n-1) + pibonacci(n-2); } } int main(void) { // int n; // cin>>n; for(int i = 0; i< 90;i++) { cout<<pibonacci(i)<<endl; } return 0; }
387
183
/************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ // MARKER(update_precomp.py): autogen include statement, do not remove #include "precompiled_sc.hxx" #include "pvlaydlg.hxx" #include <com/sun/star/sheet/DataPilotFieldOrientation.hpp> #include <com/sun/star/sheet/DataPilotFieldSortMode.hpp> #include <sfx2/dispatch.hxx> #include <vcl/mnemonic.hxx> #include <vcl/msgbox.hxx> #include "dbdocfun.hxx" #include "uiitems.hxx" #include "rangeutl.hxx" #include "document.hxx" #include "viewdata.hxx" #include "tabvwsh.hxx" #include "reffact.hxx" #include "scresid.hxx" #include "globstr.hrc" #include "pivot.hrc" #include "dpobject.hxx" #include "dpsave.hxx" #include "dpshttab.hxx" #include "scmod.hxx" #include "sc.hrc" //CHINA001 #include "scabstdlg.hxx" //CHINA001 // ============================================================================ using namespace ::com::sun::star; using ::rtl::OUString; // ============================================================================ namespace { const sal_uInt16 STD_FORMAT = sal_uInt16( SCA_VALID | SCA_TAB_3D | SCA_COL_ABSOLUTE | SCA_ROW_ABSOLUTE | SCA_TAB_ABSOLUTE ); OUString lclGetNameWithoutMnemonic( const FixedText& rFixedText ) { return MnemonicGenerator::EraseAllMnemonicChars( rFixedText.GetText() ); } } // namespace // ============================================================================ ScPivotLayoutDlg::ScPivotLayoutDlg( SfxBindings* pB, SfxChildWindow* pCW, Window* pParent, const ScDPObject& rDPObject ) : ScAnyRefDlg( pB, pCW, pParent, RID_SCDLG_PIVOT_LAYOUT ), maFlLayout( this, ScResId( FL_LAYOUT ) ), maScrPage( this, ScResId( SCROLL_PAGE ) ), maFtPage( this, ScResId( FT_PAGE ) ), maWndPage( this, ScResId( WND_PAGE ), maScrPage, &maFtPage, lclGetNameWithoutMnemonic( maFtPage ), PIVOTFIELDTYPE_PAGE, HID_SC_DPLAY_PAGE, POINTER_PIVOT_FIELD, 5, 2, 1, 0 ), maScrCol( this, ScResId( SCROLL_COL ) ), maFtCol( this, ScResId( FT_COL ) ), maWndCol( this, ScResId( WND_COL ), maScrCol, &maFtCol, lclGetNameWithoutMnemonic( maFtCol ), PIVOTFIELDTYPE_COL, HID_SC_DPLAY_COLUMN, POINTER_PIVOT_COL, 4, 2, 1, 0 ), maScrRow( this, ScResId( SCROLL_ROW ) ), maFtRow( this, ScResId( FT_ROW ) ), maWndRow( this, ScResId( WND_ROW ), maScrRow, &maFtRow, lclGetNameWithoutMnemonic( maFtRow ), PIVOTFIELDTYPE_ROW, HID_SC_DPLAY_ROW, POINTER_PIVOT_ROW, 1, 8, 1, 0 ), maScrData( this, ScResId( SCROLL_DATA ) ), maFtData( this, ScResId( FT_DATA ) ), maWndData( this, ScResId( WND_DATA ), maScrData, &maFtData, lclGetNameWithoutMnemonic( maFtData ), PIVOTFIELDTYPE_DATA, HID_SC_DPLAY_DATA, POINTER_PIVOT_FIELD, 1, 8, 4, 0 ), maFlSelect( this, ScResId( FL_SELECT ) ), maScrSelect( this, ScResId( WND_HSCROLL ) ), maWndSelect( this, ScResId( WND_SELECT ), maScrSelect, 0, String( ScResId( STR_SELECT ) ), PIVOTFIELDTYPE_SELECT, HID_SC_DPLAY_SELECT, POINTER_PIVOT_FIELD, 2, 10, 1, 2 ), maFtInfo( this, ScResId( FT_INFO ) ), maFlAreas( this, ScResId( FL_OUTPUT ) ), maFtInArea( this, ScResId( FT_INAREA) ), maEdInPos( this, this, ScResId( ED_INAREA) ), maRbInPos( this, ScResId( RB_INAREA ), &maEdInPos, this ), maLbOutPos( this, ScResId( LB_OUTAREA ) ), maFtOutArea( this, ScResId( FT_OUTAREA ) ), maEdOutPos( this, this, ScResId( ED_OUTAREA ) ), maRbOutPos( this, ScResId( RB_OUTAREA ), &maEdOutPos, this ), maBtnIgnEmptyRows( this, ScResId( BTN_IGNEMPTYROWS ) ), maBtnDetectCat( this, ScResId( BTN_DETECTCAT ) ), maBtnTotalCol( this, ScResId( BTN_TOTALCOL ) ), maBtnTotalRow( this, ScResId( BTN_TOTALROW ) ), maBtnFilter( this, ScResId( BTN_FILTER ) ), maBtnDrillDown( this, ScResId( BTN_DRILLDOWN ) ), maBtnOk( this, ScResId( BTN_OK ) ), maBtnCancel( this, ScResId( BTN_CANCEL ) ), maBtnHelp( this, ScResId( BTN_HELP ) ), maBtnRemove( this, ScResId( BTN_REMOVE ) ), maBtnOptions( this, ScResId( BTN_OPTIONS ) ), maBtnMore( this, ScResId( BTN_MORE ) ), mxDlgDPObject( new ScDPObject( rDPObject ) ), mpViewData( ((ScTabViewShell*)SfxViewShell::Current())->GetViewData() ), mpDoc( ((ScTabViewShell*)SfxViewShell::Current())->GetViewData()->GetDocument() ), mpFocusWindow( 0 ), mpTrackingWindow( 0 ), mpDropWindow( 0 ), mpActiveEdit( 0 ), mbRefInputMode( false ) { DBG_ASSERT( mpViewData && mpDoc, "ScPivotLayoutDlg::ScPivotLayoutDlg - missing document or view data" ); mxDlgDPObject->SetAlive( true ); // needed to get structure information mxDlgDPObject->FillOldParam( maPivotData ); mxDlgDPObject->FillLabelData( maPivotData ); maBtnRemove.SetClickHdl( LINK( this, ScPivotLayoutDlg, ClickHdl ) ); maBtnOptions.SetClickHdl( LINK( this, ScPivotLayoutDlg, ClickHdl ) ); // PIVOT_MAXFUNC defined in sc/inc/dpglobal.hxx maFuncNames.reserve( PIVOT_MAXFUNC ); for( sal_uInt16 i = 1; i <= PIVOT_MAXFUNC; ++i ) maFuncNames.push_back( String( ScResId( i ) ) ); maBtnMore.AddWindow( &maFlAreas ); maBtnMore.AddWindow( &maFtInArea ); maBtnMore.AddWindow( &maEdInPos ); maBtnMore.AddWindow( &maRbInPos ); maBtnMore.AddWindow( &maFtOutArea ); maBtnMore.AddWindow( &maLbOutPos ); maBtnMore.AddWindow( &maEdOutPos ); maBtnMore.AddWindow( &maRbOutPos ); maBtnMore.AddWindow( &maBtnIgnEmptyRows ); maBtnMore.AddWindow( &maBtnDetectCat ); maBtnMore.AddWindow( &maBtnTotalCol ); maBtnMore.AddWindow( &maBtnTotalRow ); maBtnMore.AddWindow( &maBtnFilter ); maBtnMore.AddWindow( &maBtnDrillDown ); maBtnMore.SetClickHdl( LINK( this, ScPivotLayoutDlg, MoreClickHdl ) ); if( mxDlgDPObject->GetSheetDesc() ) { maEdInPos.Enable(); maRbInPos.Enable(); ScRange aRange = mxDlgDPObject->GetSheetDesc()->aSourceRange; String aString; aRange.Format( aString, SCR_ABS_3D, mpDoc, mpDoc->GetAddressConvention() ); maEdInPos.SetText( aString ); } else { // data is not reachable, so could be a remote database maEdInPos.Disable(); maRbInPos.Disable(); } // #i29203# align right border of page window with data window long nPagePosX = maWndData.GetPosPixel().X() + maWndData.GetSizePixel().Width() - maWndPage.GetSizePixel().Width(); maWndPage.SetPosPixel( Point( nPagePosX, maWndPage.GetPosPixel().Y() ) ); maScrPage.SetPosPixel( Point( maScrData.GetPosPixel().X(), maScrPage.GetPosPixel().Y() ) ); InitFieldWindows(); maLbOutPos.SetSelectHdl( LINK( this, ScPivotLayoutDlg, SelAreaHdl ) ); maEdOutPos.SetModifyHdl( LINK( this, ScPivotLayoutDlg, EdOutModifyHdl ) ); maEdInPos.SetModifyHdl( LINK( this, ScPivotLayoutDlg, EdInModifyHdl ) ); maBtnOk.SetClickHdl( LINK( this, ScPivotLayoutDlg, OkHdl ) ); maBtnCancel.SetClickHdl( LINK( this, ScPivotLayoutDlg, CancelHdl ) ); if( mpViewData && mpDoc ) { /* * Aus den RangeNames des Dokumentes werden nun die * in einem Zeiger-Array gemerkt, bei denen es sich * um sinnvolle Bereiche handelt */ maLbOutPos.Clear(); maLbOutPos.InsertEntry( String( ScResId( SCSTR_UNDEFINED ) ), 0 ); maLbOutPos.InsertEntry( String( ScResId( SCSTR_NEWTABLE ) ), 1 ); ScAreaNameIterator aIter( mpDoc ); String aName; ScRange aRange; String aRefStr; while ( aIter.Next( aName, aRange ) ) { if ( !aIter.WasDBName() ) // hier keine DB-Bereiche ! { sal_uInt16 nInsert = maLbOutPos.InsertEntry( aName ); aRange.aStart.Format( aRefStr, SCA_ABS_3D, mpDoc, mpDoc->GetAddressConvention() ); maLbOutPos.SetEntryData( nInsert, new String( aRefStr ) ); } } } if ( maPivotData.nTab != MAXTAB+1 ) { String aStr; ScAddress( maPivotData.nCol, maPivotData.nRow, maPivotData.nTab ).Format( aStr, STD_FORMAT, mpDoc, mpDoc->GetAddressConvention() ); maEdOutPos.SetText( aStr ); EdOutModifyHdl( 0 ); } else { maLbOutPos.SelectEntryPos( maLbOutPos.GetEntryCount()-1 ); SelAreaHdl(NULL); } maBtnIgnEmptyRows.Check( maPivotData.bIgnoreEmptyRows ); maBtnDetectCat.Check( maPivotData.bDetectCategories ); maBtnTotalCol.Check( maPivotData.bMakeTotalCol ); maBtnTotalRow.Check( maPivotData.bMakeTotalRow ); const ScDPSaveData* pSaveData = mxDlgDPObject->GetSaveData(); maBtnFilter.Check( !pSaveData || pSaveData->GetFilterButton() ); maBtnDrillDown.Check( !pSaveData || pSaveData->GetDrillDown() ); // child event listener handles field movement when keyboard shortcut is pressed AddChildEventListener( LINK( this, ScPivotLayoutDlg, ChildEventListener ) ); GrabFieldFocus( maWndSelect ); FreeResource(); } ScPivotLayoutDlg::~ScPivotLayoutDlg() { RemoveChildEventListener( LINK( this, ScPivotLayoutDlg, ChildEventListener ) ); for( sal_uInt16 i = 2, nEntries = maLbOutPos.GetEntryCount(); i < nEntries; ++i ) delete (String*)maLbOutPos.GetEntryData( i ); } ScDPLabelData* ScPivotLayoutDlg::GetLabelData( SCCOL nCol, size_t* pnIndex ) { ScDPLabelData* pLabelData = 0; for( ScDPLabelDataVector::iterator aIt = maLabelData.begin(), aEnd = maLabelData.end(); !pLabelData && (aIt != aEnd); ++aIt ) { if( aIt->mnCol == nCol ) { pLabelData = &*aIt; if( pnIndex ) *pnIndex = aIt - maLabelData.begin(); } } return pLabelData; } String ScPivotLayoutDlg::GetFuncString( sal_uInt16& rnFuncMask, bool bIsValue ) { String aStr; if( (rnFuncMask == PIVOT_FUNC_NONE) || (rnFuncMask == PIVOT_FUNC_AUTO) ) { if( bIsValue ) { aStr = GetFuncName( PIVOTSTR_SUM ); rnFuncMask = PIVOT_FUNC_SUM; } else { aStr = GetFuncName( PIVOTSTR_COUNT ); rnFuncMask = PIVOT_FUNC_COUNT; } } else if( rnFuncMask == PIVOT_FUNC_SUM ) aStr = GetFuncName( PIVOTSTR_SUM ); else if( rnFuncMask == PIVOT_FUNC_COUNT ) aStr = GetFuncName( PIVOTSTR_COUNT ); else if( rnFuncMask == PIVOT_FUNC_AVERAGE ) aStr = GetFuncName( PIVOTSTR_AVG ); else if( rnFuncMask == PIVOT_FUNC_MAX ) aStr = GetFuncName( PIVOTSTR_MAX ); else if( rnFuncMask == PIVOT_FUNC_MIN ) aStr = GetFuncName( PIVOTSTR_MIN ); else if( rnFuncMask == PIVOT_FUNC_PRODUCT ) aStr = GetFuncName( PIVOTSTR_PROD ); else if( rnFuncMask == PIVOT_FUNC_COUNT_NUM ) aStr = GetFuncName( PIVOTSTR_COUNT2 ); else if( rnFuncMask == PIVOT_FUNC_STD_DEV ) aStr = GetFuncName( PIVOTSTR_DEV ); else if( rnFuncMask == PIVOT_FUNC_STD_DEVP ) aStr = GetFuncName( PIVOTSTR_DEV2 ); else if( rnFuncMask == PIVOT_FUNC_STD_VAR ) aStr = GetFuncName( PIVOTSTR_VAR ); else if( rnFuncMask == PIVOT_FUNC_STD_VARP ) aStr = GetFuncName( PIVOTSTR_VAR2 ); else { aStr = ScGlobal::GetRscString( STR_TABLE_ERGEBNIS ); aStr.AppendAscii( RTL_CONSTASCII_STRINGPARAM( " - " ) ); } return aStr; } void ScPivotLayoutDlg::NotifyStartTracking( ScPivotFieldWindow& rSourceWindow ) { mpTrackingWindow = &rSourceWindow; mpDropWindow = 0; rSourceWindow.NotifyStartTracking(); StartTracking( STARTTRACK_BUTTONREPEAT ); SetPointer( Pointer( rSourceWindow.GetDropPointerStyle() ) ); } void ScPivotLayoutDlg::NotifyDoubleClick( ScPivotFieldWindow& rSourceWindow ) { // nothing to do on double-click in selection window if( rSourceWindow.GetType() == PIVOTFIELDTYPE_SELECT ) return; const ScPivotFuncData* pFuncData = rSourceWindow.GetSelectedFuncData(); DBG_ASSERT( pFuncData, "ScPivotLayoutDlg::NotifyDoubleClick - invalid selection" ); if( !pFuncData ) return; ScDPLabelData* pLabelData = GetLabelData( pFuncData->mnCol ); DBG_ASSERT( pLabelData, "ScPivotLayoutDlg::NotifyDoubleClick - missing label data" ); if( !pLabelData ) return; ScAbstractDialogFactory* pFactory = ScAbstractDialogFactory::Create(); DBG_ASSERT( pFactory, "ScPivotLayoutDlg::NotifyDoubleClick - ScAbstractDialogFactory creation failed" ); if( !pFactory ) return; if( rSourceWindow.GetType() == PIVOTFIELDTYPE_DATA ) { ::std::auto_ptr< AbstractScDPFunctionDlg > xDlg( pFactory->CreateScDPFunctionDlg( this, RID_SCDLG_DPDATAFIELD, maLabelData, *pLabelData, *pFuncData ) ); if( xDlg->Execute() == RET_OK ) { ScPivotFuncData aFuncData( *pFuncData ); aFuncData.mnFuncMask = pLabelData->mnFuncMask = xDlg->GetFuncMask(); aFuncData.maFieldRef = xDlg->GetFieldRef(); rSourceWindow.ModifySelectedField( aFuncData ); } } else { // list of plain names of all data fields ScDPNameVec aDataFieldNames; maWndData.WriteFieldNames( aDataFieldNames ); // allow to modify layout options for row fields, if multiple data fields exist, or if it is not the last row field bool bLayout = (rSourceWindow.GetType() == PIVOTFIELDTYPE_ROW) && ((aDataFieldNames.size() > 1) || (rSourceWindow.GetSelectedIndex() + 1 < rSourceWindow.GetFieldCount())); ::std::auto_ptr< AbstractScDPSubtotalDlg > xDlg( pFactory->CreateScDPSubtotalDlg( this, RID_SCDLG_PIVOTSUBT, *mxDlgDPObject, *pLabelData, *pFuncData, aDataFieldNames, bLayout ) ); if( xDlg->Execute() == RET_OK ) { xDlg->FillLabelData( *pLabelData ); ScPivotFuncData aFuncData( *pFuncData ); aFuncData.mnFuncMask = pLabelData->mnFuncMask; rSourceWindow.ModifySelectedField( aFuncData ); } } } void ScPivotLayoutDlg::NotifyFieldRemoved( ScPivotFieldWindow& rSourceWindow ) { // update focus: move to selection window, if source window is empty now GrabFieldFocus( rSourceWindow ); } // protected ------------------------------------------------------------------ void ScPivotLayoutDlg::Tracking( const TrackingEvent& rTEvt ) { DBG_ASSERT( mpTrackingWindow, "ScPivotLayoutDlg::Tracking - missing tracking source window" ); if( !mpTrackingWindow ) return; // find target window const Point& rDialogPos = rTEvt.GetMouseEvent().GetPosPixel(); ScPivotFieldWindow* pTargetWindow = dynamic_cast< ScPivotFieldWindow* >( FindWindow( rDialogPos ) ); // check if the target orientation is allowed for this field if( pTargetWindow && (mpTrackingWindow != pTargetWindow) && !IsInsertAllowed( *mpTrackingWindow, *pTargetWindow ) ) pTargetWindow = 0; // tracking from selection window: do not show "delete" mouse pointer PointerStyle eTargetPointer = pTargetWindow ? pTargetWindow->GetDropPointerStyle() : ((mpTrackingWindow->GetType() == PIVOTFIELDTYPE_SELECT) ? POINTER_NOTALLOWED : POINTER_PIVOT_DELETE); // after calculating pointer style, check if target is selection window if( pTargetWindow && (pTargetWindow->GetType() == PIVOTFIELDTYPE_SELECT) ) pTargetWindow = 0; // notify windows about tracking if( mpDropWindow != pTargetWindow ) { // tracking window changed if( mpDropWindow ) mpDropWindow->NotifyEndTracking( ENDTRACKING_SUSPEND ); if( pTargetWindow ) pTargetWindow->NotifyStartTracking(); mpDropWindow = pTargetWindow; } if( mpDropWindow ) mpDropWindow->NotifyTracking( rDialogPos - pTargetWindow->GetPosPixel() ); // end tracking: move or remove field if( rTEvt.IsTrackingEnded() ) { bool bCancelled = rTEvt.IsTrackingCanceled(); if( mpDropWindow ) { mpDropWindow->NotifyEndTracking( bCancelled ? ENDTRACKING_CANCEL : ENDTRACKING_DROP ); if( !bCancelled ) { size_t nInsertIndex = mpDropWindow->GetDropIndex( rDialogPos - mpDropWindow->GetPosPixel() ); bool bMoved = MoveField( *mpTrackingWindow, *mpDropWindow, nInsertIndex, true ); // focus drop window, if move was successful, otherwise back to source window GrabFieldFocus( bMoved ? *mpDropWindow : *mpTrackingWindow ); } } else { // drop target invalid (outside field windows): remove tracked field if( !bCancelled ) mpTrackingWindow->RemoveSelectedField(); // focus source window (or another window, if it is empty now) GrabFieldFocus( *mpTrackingWindow ); } eTargetPointer = POINTER_ARROW; if( mpTrackingWindow != mpDropWindow ) mpTrackingWindow->NotifyEndTracking( ENDTRACKING_CANCEL ); mpTrackingWindow = mpDropWindow = 0; } SetPointer( eTargetPointer ); } void ScPivotLayoutDlg::SetReference( const ScRange& rRef, ScDocument* pDocP ) { if( !mbRefInputMode || !mpActiveEdit ) return; if( rRef.aStart != rRef.aEnd ) RefInputStart( mpActiveEdit ); if( mpActiveEdit == &maEdInPos ) { String aRefStr; rRef.Format( aRefStr, SCR_ABS_3D, pDocP, pDocP->GetAddressConvention() ); mpActiveEdit->SetRefString( aRefStr ); } else if( mpActiveEdit == &maEdOutPos ) { String aRefStr; rRef.aStart.Format( aRefStr, STD_FORMAT, pDocP, pDocP->GetAddressConvention() ); mpActiveEdit->SetRefString( aRefStr ); } } sal_Bool ScPivotLayoutDlg::IsRefInputMode() const { return mbRefInputMode; } void ScPivotLayoutDlg::SetActive() { if( mbRefInputMode ) { if( mpActiveEdit ) mpActiveEdit->GrabFocus(); if( mpActiveEdit == &maEdInPos ) EdInModifyHdl( 0 ); else if( mpActiveEdit == &maEdOutPos ) EdOutModifyHdl( 0 ); } else { GrabFocus(); } RefInputDone(); } sal_Bool ScPivotLayoutDlg::Close() { return DoClose( ScPivotLayoutWrapper::GetChildWindowId() ); } // private -------------------------------------------------------------------- ScPivotFieldWindow& ScPivotLayoutDlg::GetFieldWindow( ScPivotFieldType eFieldType ) { switch( eFieldType ) { case PIVOTFIELDTYPE_PAGE: return maWndPage; case PIVOTFIELDTYPE_ROW: return maWndRow; case PIVOTFIELDTYPE_COL: return maWndCol; case PIVOTFIELDTYPE_DATA: return maWndData; default:; } return maWndSelect; } bool ScPivotLayoutDlg::IsInsertAllowed( const ScPivotFieldWindow& rSourceWindow, const ScPivotFieldWindow& rTargetWindow ) { if( rTargetWindow.GetType() != PIVOTFIELDTYPE_SELECT ) { const ScPivotFuncData* pSourceData = rSourceWindow.GetSelectedFuncData(); ScDPLabelData* pLabelData = pSourceData ? GetLabelData( pSourceData->mnCol ) : 0; DBG_ASSERT( pLabelData, "ScPivotLayoutDlg::IsInsertAllowed - label data not found" ); if( pLabelData ) { sheet::DataPilotFieldOrientation eOrient = sheet::DataPilotFieldOrientation_HIDDEN; switch( rTargetWindow.GetType() ) { case PIVOTFIELDTYPE_PAGE: eOrient = sheet::DataPilotFieldOrientation_PAGE; break; case PIVOTFIELDTYPE_COL: eOrient = sheet::DataPilotFieldOrientation_COLUMN; break; case PIVOTFIELDTYPE_ROW: eOrient = sheet::DataPilotFieldOrientation_ROW; break; case PIVOTFIELDTYPE_DATA: eOrient = sheet::DataPilotFieldOrientation_DATA; break; default: return false; } return ScDPObject::IsOrientationAllowed( static_cast< sal_uInt16 >( eOrient ), pLabelData->mnFlags ); } } return false; } void ScPivotLayoutDlg::InitFieldWindows() { maLabelData = maPivotData.maLabelArray; maWndSelect.ReadDataLabels( maLabelData ); maWndPage.ReadPivotFields( maPivotData.maPageArr ); maWndCol.ReadPivotFields( maPivotData.maColArr ); maWndRow.ReadPivotFields( maPivotData.maRowArr ); maWndData.ReadPivotFields( maPivotData.maDataArr ); } void ScPivotLayoutDlg::GrabFieldFocus( ScPivotFieldWindow& rFieldWindow ) { if( rFieldWindow.IsEmpty() ) { if( maWndSelect.IsEmpty() ) maBtnOk.GrabFocus(); else maWndSelect.GrabFocus(); } else rFieldWindow.GrabFocus(); } namespace { void lclFindFieldWindow( ScPivotFieldWindow*& rpFieldWindow, const ScPivotFuncData*& rpFuncData, size_t& rnFieldIndex, ScPivotFieldWindow& rFieldWindow ) { ScPivotFuncDataEntry aEntry = rFieldWindow.FindFuncDataByCol( rpFuncData->mnCol ); if( aEntry.first ) { rpFieldWindow = &rFieldWindow; rpFuncData = aEntry.first; rnFieldIndex = aEntry.second; } } } // namespace bool ScPivotLayoutDlg::MoveField( ScPivotFieldWindow& rSourceWindow, ScPivotFieldWindow& rTargetWindow, size_t nInsertIndex, bool bMoveExisting ) { // move inside the same window if( &rSourceWindow == &rTargetWindow ) return bMoveExisting && rTargetWindow.MoveSelectedField( nInsertIndex ); // do not insert if not supported by target window if( !IsInsertAllowed( rSourceWindow, rTargetWindow ) ) { rSourceWindow.RemoveSelectedField(); return false; } // move from one window to another window if( const ScPivotFuncData* pSourceData = rSourceWindow.GetSelectedFuncData() ) { // move to page/col/row window: try to find existing field in another window ScPivotFieldWindow* pSourceWindow = &rSourceWindow; size_t nSourceIndex = rSourceWindow.GetSelectedIndex(); if( rTargetWindow.GetType() != PIVOTFIELDTYPE_DATA ) { lclFindFieldWindow( pSourceWindow, pSourceData, nSourceIndex, maWndPage ); lclFindFieldWindow( pSourceWindow, pSourceData, nSourceIndex, maWndCol ); lclFindFieldWindow( pSourceWindow, pSourceData, nSourceIndex, maWndRow ); } // found in target window: move to new position if( pSourceWindow == &rTargetWindow ) return bMoveExisting && pSourceWindow->MoveField( nSourceIndex, nInsertIndex ); // insert field into target window rTargetWindow.InsertField( nInsertIndex, *pSourceData ); // remove field from source window pSourceWindow->RemoveField( nSourceIndex ); // remove field from data window, if it is the original source if( (rSourceWindow.GetType() == PIVOTFIELDTYPE_DATA) && (pSourceWindow->GetType() != PIVOTFIELDTYPE_DATA) ) rSourceWindow.RemoveSelectedField(); return true; } return false; } // handlers ------------------------------------------------------------------- IMPL_LINK( ScPivotLayoutDlg, ClickHdl, PushButton *, pBtn ) { if( mpFocusWindow ) { /* Raising sub dialogs (from the NotifyDoubleClick function) triggers VCL child window focus events from this sub dialog which may invalidate the member mpFocusWindow pointing to the target field window. This would cause a crash with the following call to the GrabFieldFocus function, if mpFocusWindow is used directly. */ ScPivotFieldWindow& rTargetWindow = *mpFocusWindow; if( pBtn == &maBtnRemove ) { rTargetWindow.RemoveSelectedField(); // focus back to field window GrabFieldFocus( rTargetWindow ); } else if( pBtn == &maBtnOptions ) { NotifyDoubleClick( rTargetWindow ); // focus back to field window GrabFieldFocus( rTargetWindow ); } } return 0; } IMPL_LINK( ScPivotLayoutDlg, OkHdl, OKButton *, EMPTYARG ) { String aOutPosStr = maEdOutPos.GetText(); ScAddress aAdrDest; bool bToNewTable = maLbOutPos.GetSelectEntryPos() == 1; sal_uInt16 nResult = !bToNewTable ? aAdrDest.Parse( aOutPosStr, mpDoc, mpDoc->GetAddressConvention() ) : 0; if( bToNewTable || ((aOutPosStr.Len() > 0) && ((nResult & SCA_VALID) == SCA_VALID)) ) { ScPivotFieldVector aPageFields, aColFields, aRowFields, aDataFields; maWndPage.WritePivotFields( aPageFields ); maWndCol.WritePivotFields( aColFields ); maWndRow.WritePivotFields( aRowFields ); maWndData.WritePivotFields( aDataFields ); // TODO: handle data field in dialog field windows? aRowFields.resize( aRowFields.size() + 1 ); aRowFields.back().nCol = PIVOT_DATA_FIELD; ScDPSaveData* pOldSaveData = mxDlgDPObject->GetSaveData(); ScRange aOutRange( aAdrDest ); // bToNewTable is passed separately ScDPSaveData aSaveData; aSaveData.SetIgnoreEmptyRows( maBtnIgnEmptyRows.IsChecked() ); aSaveData.SetRepeatIfEmpty( maBtnDetectCat.IsChecked() ); aSaveData.SetColumnGrand( maBtnTotalCol.IsChecked() ); aSaveData.SetRowGrand( maBtnTotalRow.IsChecked() ); aSaveData.SetFilterButton( maBtnFilter.IsChecked() ); aSaveData.SetDrillDown( maBtnDrillDown.IsChecked() ); uno::Reference< sheet::XDimensionsSupplier > xSource = mxDlgDPObject->GetSource(); ScDPObject::ConvertOrientation( aSaveData, aPageFields, sheet::DataPilotFieldOrientation_PAGE, 0, 0, 0, xSource, false ); ScDPObject::ConvertOrientation( aSaveData, aColFields, sheet::DataPilotFieldOrientation_COLUMN, 0, 0, 0, xSource, false ); ScDPObject::ConvertOrientation( aSaveData, aRowFields, sheet::DataPilotFieldOrientation_ROW, 0, 0, 0, xSource, false ); ScDPObject::ConvertOrientation( aSaveData, aDataFields, sheet::DataPilotFieldOrientation_DATA, 0, 0, 0, xSource, false, &aColFields, &aRowFields, &aPageFields ); for( ScDPLabelDataVector::const_iterator aIt = maLabelData.begin(), aEnd = maLabelData.end(); aIt != aEnd; ++aIt ) { if( ScDPSaveDimension* pDim = aSaveData.GetExistingDimensionByName( aIt->maName ) ) { pDim->SetUsedHierarchy( aIt->mnUsedHier ); pDim->SetShowEmpty( aIt->mbShowAll ); pDim->SetSortInfo( &aIt->maSortInfo ); pDim->SetLayoutInfo( &aIt->maLayoutInfo ); pDim->SetAutoShowInfo( &aIt->maShowInfo ); ScDPSaveDimension* pOldDim = NULL; if (pOldSaveData) { // Transfer the existing layout names to new dimension instance. pOldDim = pOldSaveData->GetExistingDimensionByName(aIt->maName); if (pOldDim) { const OUString* pLayoutName = pOldDim->GetLayoutName(); if (pLayoutName) pDim->SetLayoutName(*pLayoutName); const OUString* pSubtotalName = pOldDim->GetSubtotalName(); if (pSubtotalName) pDim->SetSubtotalName(*pSubtotalName); } } bool bManualSort = ( aIt->maSortInfo.Mode == sheet::DataPilotFieldSortMode::MANUAL ); // visibility of members for (::std::vector<ScDPLabelData::Member>::const_iterator itr = aIt->maMembers.begin(), itrEnd = aIt->maMembers.end(); itr != itrEnd; ++itr) { ScDPSaveMember* pMember = pDim->GetMemberByName(itr->maName); // #i40054# create/access members only if flags are not default // (or in manual sorting mode - to keep the order) if (bManualSort || !itr->mbVisible || !itr->mbShowDetails) { pMember->SetIsVisible(itr->mbVisible); pMember->SetShowDetails(itr->mbShowDetails); } if (pOldDim) { // Transfer the existing layout name. ScDPSaveMember* pOldMember = pOldDim->GetMemberByName(itr->maName); if (pOldMember) { const OUString* pLayoutName = pOldMember->GetLayoutName(); if (pLayoutName) pMember->SetLayoutName(*pLayoutName); } } } } } ScDPSaveDimension* pDim = aSaveData.GetDataLayoutDimension(); if (pDim && pOldSaveData) { ScDPSaveDimension* pOldDim = pOldSaveData->GetDataLayoutDimension(); if (pOldDim) { const OUString* pLayoutName = pOldDim->GetLayoutName(); if (pLayoutName) pDim->SetLayoutName(*pLayoutName); } } // also transfer grand total name if (pOldSaveData) { const OUString* pGrandTotalName = pOldSaveData->GetGrandTotalName(); if (pGrandTotalName) aSaveData.SetGrandTotalName(*pGrandTotalName); } sal_uInt16 nWhichPivot = SC_MOD()->GetPool().GetWhich( SID_PIVOT_TABLE ); ScPivotItem aOutItem( nWhichPivot, &aSaveData, &aOutRange, bToNewTable ); mbRefInputMode = false; // to allow deselecting when switching sheets SetDispatcherLock( false ); SwitchToDocument(); // #95513# don't hide the dialog before executing the slot, instead it is used as // parent for message boxes in ScTabViewShell::GetDialogParent const SfxPoolItem* pRet = GetBindings().GetDispatcher()->Execute( SID_PIVOT_TABLE, SFX_CALLMODE_SLOT | SFX_CALLMODE_RECORD, &aOutItem, 0L, 0L ); bool bSuccess = true; if (pRet) { const SfxBoolItem* pItem = dynamic_cast<const SfxBoolItem*>(pRet); if (pItem) bSuccess = pItem->GetValue(); } if (bSuccess) // Table successfully inserted. Close(); else { // Table insertion failed. Keep the dialog open. mbRefInputMode = true; SetDispatcherLock(true); } } else { if( !maBtnMore.GetState() ) maBtnMore.SetState( true ); ErrorBox( this, WinBits( WB_OK | WB_DEF_OK ), ScGlobal::GetRscString( STR_INVALID_TABREF ) ).Execute(); maEdOutPos.GrabFocus(); } return 0; } IMPL_LINK( ScPivotLayoutDlg, CancelHdl, CancelButton *, EMPTYARG ) { Close(); return 0; } IMPL_LINK( ScPivotLayoutDlg, MoreClickHdl, MoreButton *, EMPTYARG ) { if ( maBtnMore.GetState() ) { mbRefInputMode = true; if ( maEdInPos.IsEnabled() ) { maEdInPos.Enable(); maEdInPos.GrabFocus(); maEdInPos.Enable(); } else { maEdOutPos.Enable(); maEdOutPos.GrabFocus(); maEdOutPos.Enable(); } } else { mbRefInputMode = false; } return 0; } IMPL_LINK( ScPivotLayoutDlg, EdOutModifyHdl, Edit *, EMPTYARG ) { String theCurPosStr = maEdOutPos.GetText(); sal_uInt16 nResult = ScAddress().Parse( theCurPosStr, mpDoc, mpDoc->GetAddressConvention() ); if ( SCA_VALID == (nResult & SCA_VALID) ) { String* pStr = 0; bool bFound = false; sal_uInt16 i = 0; sal_uInt16 nCount = maLbOutPos.GetEntryCount(); for ( i=2; i<nCount && !bFound; i++ ) { pStr = (String*)maLbOutPos.GetEntryData( i ); bFound = (theCurPosStr == *pStr); } if ( bFound ) maLbOutPos.SelectEntryPos( --i ); else maLbOutPos.SelectEntryPos( 0 ); } return 0; } IMPL_LINK( ScPivotLayoutDlg, EdInModifyHdl, Edit *, EMPTYARG ) { String theCurPosStr = maEdInPos.GetText(); sal_uInt16 nResult = ScRange().Parse( theCurPosStr, mpDoc, mpDoc->GetAddressConvention() ); // invalid source range if( SCA_VALID != (nResult & SCA_VALID) ) return 0; ScRefAddress start, end; ConvertDoubleRef( mpDoc, theCurPosStr, 1, start, end, mpDoc->GetAddressConvention() ); ScRange aNewRange( start.GetAddress(), end.GetAddress() ); ScSheetSourceDesc inSheet = *mxDlgDPObject->GetSheetDesc(); // new range is identical to the current range if( inSheet.aSourceRange == aNewRange ) return 0; ScTabViewShell* pTabViewShell = mpViewData->GetViewShell(); inSheet.aSourceRange = aNewRange; mxDlgDPObject->SetSheetDesc( inSheet ); mxDlgDPObject->FillOldParam( maPivotData ); mxDlgDPObject->FillLabelData( maPivotData ); // SetDialogDPObject does not take ownership but makes a copy internally pTabViewShell->SetDialogDPObject( mxDlgDPObject.get() ); // re-initialize the field windows from the new data InitFieldWindows(); return 0; } IMPL_LINK( ScPivotLayoutDlg, SelAreaHdl, ListBox *, EMPTYARG ) { String aString; sal_uInt16 nSelPos = maLbOutPos.GetSelectEntryPos(); if( nSelPos > 1 ) { aString = *(String*)maLbOutPos.GetEntryData( nSelPos ); } else { // do not allow to specify output position, if target is "new sheet" bool bNewSheet = nSelPos == 1; maEdOutPos.Enable( !bNewSheet ); maRbOutPos.Enable( !bNewSheet ); } maEdOutPos.SetText( aString ); return 0; } IMPL_LINK( ScPivotLayoutDlg, ChildEventListener, VclWindowEvent*, pEvent ) { Window* pWindow = pEvent->GetWindow(); // check that this dialog is the parent of the window, to ignore focus events from sub dialogs if( (pEvent->GetId() == VCLEVENT_WINDOW_GETFOCUS) && pWindow && (pWindow->GetParent() == this) ) { // check if old window and/or new window are field windows ScPivotFieldWindow* pSourceWindow = mpFocusWindow; ScPivotFieldWindow* pTargetWindow = dynamic_cast< ScPivotFieldWindow* >( pWindow ); /* Enable or disable the Remove/Options buttons. Do nothing if the buttons themselves get the focus. #128113# The TestTool may set the focus into an empty window. Then the Remove/Options buttons must be disabled. */ if( (pWindow != &maBtnRemove) && (pWindow != &maBtnOptions) ) { bool bEnableButtons = pTargetWindow && (pTargetWindow->GetType() != PIVOTFIELDTYPE_SELECT) && !pTargetWindow->IsEmpty(); maBtnRemove.Enable( bEnableButtons ); maBtnOptions.Enable( bEnableButtons ); /* Remember the new focus window (will not be changed, if Remove/Option buttons are getting focus, because they need to know the field window they are working on). */ mpFocusWindow = pTargetWindow; } /* Move the last selected field to target window, if focus changes via keyboard shortcut. */ if( pSourceWindow && pTargetWindow && (pSourceWindow != pTargetWindow) && ((pTargetWindow->GetGetFocusFlags() & GETFOCUS_MNEMONIC) != 0) ) { // append field in target window MoveField( *pSourceWindow, *pTargetWindow, pTargetWindow->GetFieldCount(), false ); // move cursor in selection window to next field if( pSourceWindow->GetType() == PIVOTFIELDTYPE_SELECT ) pSourceWindow->SelectNextField(); // return focus to source window (if it is not empty) GrabFieldFocus( pSourceWindow->IsEmpty() ? *pTargetWindow : *pSourceWindow ); } mpActiveEdit = dynamic_cast< ::formula::RefEdit* >( pEvent->GetWindow() ); } return 0; } // ============================================================================
35,923
12,238
#pragma once #include"./graph_template.hpp" #include"../segment_tree/lazy_segment_tree.hpp" template<typename T,typename E,typename F,typename G,typename H> class HLD_lazy{ int child_size(const graph& v,int n,int p){ int cnt=0; for(auto t:v[n]){ if(t!=p)cnt+=child_size(v,t,n); } return sz[n]=cnt+1; } void make(const graph& v,int root){ sz=new int[v.size()]; vertex=new int[v.size()]; par=new int[v.size()]; head=new int[v.size()]; child_size(v,root,-1); stack<tuple<int,int>>stk; stk.emplace(root,-1); int idx=0; par[root]=root; head[root]=root; while(!stk.empty()){ int n,p; tie(n,p)=stk.top(); stk.pop(); vertex[n]=idx++; int mx=0,heavy=-1; for(auto t:v[n])if(t!=p&&mx<sz[t]){ mx=sz[t]; heavy=t; } for(auto t:v[n]){ if(t!=heavy&&t!=p){ par[t]=n; head[t]=t; stk.emplace(t,n); } } if(heavy!=-1){ par[heavy]=par[n]; head[heavy]=head[n]; stk.emplace(heavy,n); } } } int* sz; int* vertex; int* par; int* head; F _f;G _g;H _h; lazy_segment_tree<T,E,F,G,H>* seg; public: HLD_lazy(const graph& v,int root=0,F f=F(),G g=G(),H h=H()):_f(f),_g(g),_h(h){ make(v,root); seg=new lazy_segment_tree<T,E,F,G,H>(v.size(),f,g,h); } // HLD_lazy(const graph& v,const vector<T>& a,int root=0,F f=F(),G g=G(),H h=H()):_f(f),_g(g),_h(h){ // vector<T>tmp(v.size()); // make(v,root); // for(int i=0;i<(int)v.size();i++){ // tmp[vertex[i]]=a[i]; // } // seg=new lazy_segment_tree(tmp,f,g,h); // } int lca(int l,int r){ while(1){ if(head[l]==head[r])return sz[l]>sz[r]?l:r; else if(sz[head[l]]>sz[head[r]])r=par[r]; else l=par[l]; } } inline void update_vertex(int u,E x){ seg->update(vertex[u],vertex[u],x); } inline maybe<T> get_vertex(int u){ return seg->get(vertex[u],vertex[u]); } inline void update_subtree(int u,E x){ seg->update(vertex[u],vertex[u]+sz[u]-1); } inline maybe<T> get_subtree(int u){ return seg->get(vertex[u],vertex[u]+sz[u]-1); } void update_path(int u,int v,E x){ while(1){ if(head[u]==head[v]){ seg->update(vertex[u],vertex[v],x); break; } else if(sz[head[u]]>sz[head[v]]){ seg->update(vertex[v],vertex[head[v]],x); v=par[v]; } else{ seg->update(vertex[u],vertex[head[u]],x); u=par[u]; } } } T get_path(int u,int v){ auto f=expand<T,F>(_f); maybe<T> res; while(1){ if(head[u]==head[v]){ return f(res,seg->get(vertex[u],vertex[v])); } else if(sz[head[u]]>sz[head[v]]){ res=f(res,seg->get(vertex[v],vertex[head[v]])); v=par[v]; } else{ res=f(res,seg->get(vertex[u],vertex[head[u]])); u=par[u]; } } } };
2,673
1,508
#include "PitchParameterField.h" PitchParameterField::PitchParameterField(const char* _name) : ParameterField(_name) { } void PitchParameterField::increment(int16_t amount) { value += amount; if(value > max) { value = max; } dirtyValue = true; } void PitchParameterField::decrement(int16_t amount) { value -= amount; if(value < min) { value = min; } dirtyValue = true; } void PitchParameterField::render(GraphicsContext& g) { if(visible) { ParameterField::render(g); if(dirtyValue || g.full) { Hardware::display.print(noteNames[value]); dirtyValue = false; } } }
676
219
#pragma once // ARKSurvivalEvolved (329.9) SDK #ifdef _MSC_VER #pragma pack(push, 0x8) #endif #include "ARKSurvivalEvolved_Task_StunForestKaiju_classes.hpp" namespace sdk { //--------------------------------------------------------------------------- //Parameters //--------------------------------------------------------------------------- // Function Task_StunForestKaiju.Task_StunForestKaiju_C.ReceiveExecute struct UTask_StunForestKaiju_C_ReceiveExecute_Params { class AActor** OwnerActor; // (Parm, ZeroConstructor, IsPlainOldData) }; // Function Task_StunForestKaiju.Task_StunForestKaiju_C.ExecuteUbergraph_Task_StunForestKaiju struct UTask_StunForestKaiju_C_ExecuteUbergraph_Task_StunForestKaiju_Params { int EntryPoint; // (Parm, ZeroConstructor, IsPlainOldData) }; } #ifdef _MSC_VER #pragma pack(pop) #endif
1,005
311
#include <stdio.h> #include <string> #include "Engine.h" using namespace std; bool Engine::Init(string WindowTitle) { TTF_Init(); _graphics = new Graphics; if (_graphics->Init(WindowTitle) == false) return false; _textureManager = new TextureManager(_graphics->GetRenderer()); _time = new Time; if (_time->Init() == false) return false; _input = new Input; if (_input->Init() == false) return false; _textTextureGenerator = new TextTextureGenerator(_graphics->GetRenderer()); _mainMenu = new MainMenu(this); if (_mainMenu->Init() == false) return false; _scoreGUI = new ScoreGUI(this); if (_scoreGUI->Init() == false) return false; _scoreGUI->UpdateScore(0); _easyDifficulty = new EasyDifficulty; _mediumDifficulty = new MediumDifficulty; _hardDifficulty = new HardDifficulty; _currentDifficulty = _easyDifficulty; //Load helicopter texture SDL_Texture* helicopterTexture = _textureManager->LoadFromFile(Constants::HELICOPTER_IMAGE_NAME); if (helicopterTexture == NULL) return false; //Load background texture SDL_Texture* backgroundTexture = _textureManager->LoadFromFile(Constants::BACKGROUND_IMAGE_NAME); if (backgroundTexture == NULL) return false; //Load obstacle texture SDL_Texture* obstacleTexture = _textureManager->LoadFromFile(Constants::OBSTACLE_IMAGE_NAME); if (obstacleTexture == NULL) return false; _helicopter = new Helicopter(helicopterTexture, this); _background = new Background(backgroundTexture, this); _background2 = new Background(backgroundTexture, this); _background2->SetX(Constants::WINDOW_WIDTH - 1); _obstacleManager = new ObstacleManager(obstacleTexture, this); _obstacleManager->Init(); return true; } void Engine::Cleanup() { if (_graphics != NULL) { _graphics->Cleanup(); delete _graphics; _graphics = NULL; } if (_time != NULL) { _time->Cleanup(); delete _time; _time = NULL; } if (_input != NULL) { _input->Cleanup(); delete _input; _input = NULL; } if (_helicopter != NULL) { _helicopter->Cleanup(); delete _helicopter; _helicopter = NULL; } if (_background != NULL) { _background->Cleanup(); delete _background; _background = NULL; } if (_background2 != NULL) { _background2->Cleanup(); delete _background2; _background2 = NULL; } if (_obstacleManager != NULL) { _obstacleManager->Cleanup(); delete _obstacleManager; _obstacleManager = NULL; } _currentDifficulty = NULL; if (_easyDifficulty != NULL) { delete _easyDifficulty; _easyDifficulty = NULL; } if (_mediumDifficulty != NULL) { delete _mediumDifficulty; _mediumDifficulty = NULL; } if (_hardDifficulty != NULL) { delete _hardDifficulty; _hardDifficulty = NULL; } if (_mainMenu != NULL) { _mainMenu->Cleanup(); delete _mainMenu; _mainMenu = NULL; } if (_scoreGUI != NULL) { _scoreGUI->Cleanup(); delete _scoreGUI; _scoreGUI = NULL; } if (_textTextureGenerator != NULL) { delete _textTextureGenerator; _textTextureGenerator = NULL; } SDL_Quit(); IMG_Quit(); } void Engine::Run() { while (_input->IsQuitRequested() == false) { //Update Timer _time->Update(); //Update Input _input->Poll(); //Update _background->Update(); _background2->Update(); _helicopter->Update(); _obstacleManager->Update(); _mainMenu->Update(); if (GameActive == false && _mainMenu->NewGameRequested == true) NewGame(); //Check collisions if (_obstacleManager->CheckPlayerCollision(_helicopter->GetBoundingRectangle())) GameOver(); //Check helicopter offscreen if (_helicopter->IsOffScreen()) GameOver(); //Clear render target _graphics->Clear(); //Draw helicopter _background->Draw(); _background2->Draw(); _obstacleManager->Draw(); _helicopter->Draw(); _mainMenu->Draw(); _scoreGUI->Draw(); //Present render target _graphics->Present(); SDL_Delay(8); } } Input* Engine::GetInput() { return _input; } Graphics* Engine::GetGraphics() { return _graphics; } Time* Engine::GetTime() { return _time; } Difficulty* Engine::GetCurrentDifficulty() { return _currentDifficulty; } void Engine::GameOver() { GameActive = false; _mainMenu->Active = true; _obstacleManager->Active = false; _helicopter->Active = false; _mainMenu->NewGameRequested = false; } void Engine::NewGame() { _mainMenu->NewGameRequested = false; switch (_mainMenu->SelectedDifficulty) { case 0: _currentDifficulty = _easyDifficulty; break; case 1: _currentDifficulty = _mediumDifficulty; break; case 2: _currentDifficulty = _hardDifficulty; break; } GameActive = true; _mainMenu->Active = false; _obstacleManager->Reset(); _helicopter->Reset(); _scoreGUI->UpdateScore(0); } TextTextureGenerator* Engine::GetTextTextureGenerator() { return _textTextureGenerator; } void Engine::IncrementScore() { _scoreGUI->UpdateScore(_scoreGUI->Score + 1); }
4,878
1,857
//-------------------------------------------------------------------------------- // EnemyProjectiles.hpp //-------------------------------------------------------------------------------- // The class that manages the projectiles fired by the enemies //-------------------------------------------------------------------------------- #pragma once #include <tonc.h> #include "Projectile.hpp" #include "graphics/ObjectTilePointer.hpp" #include "graphics/PalettePointer.hpp" class GameScene; class EnemyProjectiles final { constexpr static u32 MaxProjectiles = 128; GameScene& gameScene(); Projectile projectiles[MaxProjectiles]; u32 numProjectiles; ObjectTilePointer tilePtr; SinglePalettePointer palPtr; public: void init(); void update(); void pushGraphics(); void add(vec2<s16f7> pos, vec2<s16f7> vel, u16 type) { ASSERT(numProjectiles < MaxProjectiles); projectiles[numProjectiles++] = { pos, vel, type }; } void add(vec2<s16f7> pos, vec2<s16f7> vel, u16 type, u16 arg) { ASSERT(numProjectiles < MaxProjectiles); projectiles[numProjectiles++] = { pos, vel, type, arg }; } };
1,182
347
/* This file is part of Jellyfish. Jellyfish is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Jellyfish is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Jellyfish. If not, see <http://www.gnu.org/licenses/>. */ #ifndef __JELLYFISH_TEXT_DUMPER_HPP__ #define __JELLYFISH_TEXT_DUMPER_HPP__ #include <jellyfish/sorted_dumper.hpp> namespace jellyfish { template<typename Key, typename Val> class text_writer { public: void write(std::ostream& out, const Key& key, const Val val) { out << key << " " << val << "\n"; } }; template<typename storage_t> class text_dumper : public sorted_dumper<text_dumper<storage_t>, storage_t> { typedef sorted_dumper<text_dumper<storage_t>, storage_t> super; text_writer<typename super::key_type, uint64_t> writer; public: static const char* format; text_dumper(int nb_threads, const char* file_prefix, file_header* header = 0) : super(nb_threads, file_prefix, header) { } virtual void _dump(storage_t* ary) { if(super::header_) { super::header_->update_from_ary(*ary); super::header_->format(format); } super::_dump(ary); } void write_key_value_pair(std::ostream& out, typename super::heap_item item) { writer.write(out, item->key_, item->val_); } }; template<typename storage_t> const char* jellyfish::text_dumper<storage_t>::format = "text/sorted"; template<typename Key, typename Val> class text_reader { std::istream& is_; char* buffer_; Key key_; Val val_; const RectangularBinaryMatrix m_; const size_t size_mask_; public: text_reader(std::istream& is, file_header* header) : is_(is), buffer_(new char[header->key_len() / 2 + 1]), key_(header->key_len() / 2), m_(header->matrix()), size_mask_(header->size() - 1) { } const Key& key() const { return key_; } const Val& val() const { return val_; } size_t pos() const { return m_.times(key()) & size_mask_; } bool next() { is_ >> key_ >> val_; return is_.good(); } }; } #endif /* __JELLYFISH_TEXT_DUMPER_HPP__ */
2,522
882
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ui/app_list/search/arc/arc_playstore_search_result.h" #include <utility> #include "ash/public/cpp/app_list/app_list_config.h" #include "ash/public/cpp/app_list/vector_icons/vector_icons.h" #include "base/bind.h" #include "base/metrics/user_metrics.h" #include "base/strings/utf_string_conversions.h" #include "chrome/browser/apps/app_service/app_icon/app_icon_factory.h" #include "chrome/browser/ash/arc/icon_decode_request.h" #include "chrome/browser/profiles/profile.h" #include "chrome/browser/ui/app_list/app_list_controller_delegate.h" #include "chrome/browser/ui/app_list/arc/arc_playstore_app_context_menu.h" #include "chrome/browser/ui/app_list/search/search_tags_util.h" #include "components/arc/mojom/app.mojom.h" #include "components/arc/session/arc_bridge_service.h" #include "components/arc/session/arc_service_manager.h" #include "components/crx_file/id_util.h" #include "ui/base/models/image_model.h" #include "ui/gfx/canvas.h" #include "ui/gfx/color_palette.h" #include "ui/gfx/image/canvas_image_source.h" #include "ui/gfx/image/image_skia_operations.h" #include "ui/gfx/paint_vector_icon.h" namespace { // The id prefix to identify a Play Store search result. constexpr char kPlayAppPrefix[] = "play://"; // Badge icon color. constexpr SkColor kBadgeColor = gfx::kGoogleGrey700; // Size of the vector icon inside the badge. constexpr int kBadgeIconSize = 12; // The background image source for badge. class BadgeBackgroundImageSource : public gfx::CanvasImageSource { public: explicit BadgeBackgroundImageSource(int size) : CanvasImageSource(gfx::Size(size, size)) {} BadgeBackgroundImageSource(const BadgeBackgroundImageSource&) = delete; BadgeBackgroundImageSource& operator=(const BadgeBackgroundImageSource&) = delete; ~BadgeBackgroundImageSource() override = default; private: // gfx::CanvasImageSource overrides: void Draw(gfx::Canvas* canvas) override { cc::PaintFlags flags; flags.setColor(SK_ColorWHITE); flags.setAntiAlias(true); flags.setStyle(cc::PaintFlags::kFill_Style); const float origin = static_cast<float>(size().width()) / 2; canvas->DrawCircle(gfx::PointF(origin, origin), origin, flags); } }; gfx::ImageSkia CreateBadgeIcon(const gfx::VectorIcon& vector_icon, int badge_size, int icon_size, SkColor icon_color) { gfx::ImageSkia background( std::make_unique<BadgeBackgroundImageSource>(badge_size), gfx::Size(badge_size, badge_size)); gfx::ImageSkia foreground( gfx::CreateVectorIcon(vector_icon, icon_size, icon_color)); return gfx::ImageSkiaOperations::CreateSuperimposedImage(background, foreground); } bool LaunchIntent(const std::string& intent_uri, int64_t display_id) { auto* arc_service_manager = arc::ArcServiceManager::Get(); if (!arc_service_manager) return false; auto* arc_bridge = arc_service_manager->arc_bridge_service(); if (auto* app_instance = ARC_GET_INSTANCE_FOR_METHOD(arc_bridge->app(), LaunchIntent)) { app_instance->LaunchIntent(intent_uri, display_id); return true; } if (auto* app_instance = ARC_GET_INSTANCE_FOR_METHOD( arc_bridge->app(), LaunchIntentDeprecated)) { app_instance->LaunchIntentDeprecated(intent_uri, absl::nullopt); return true; } return false; } } // namespace namespace app_list { ArcPlayStoreSearchResult::ArcPlayStoreSearchResult( arc::mojom::AppDiscoveryResultPtr data, Profile* profile, AppListControllerDelegate* list_controller, const std::u16string& query) : data_(std::move(data)), profile_(profile), list_controller_(list_controller) { const auto title = base::UTF8ToUTF16(label().value()); SetTitle(title); SetTitleTags(CalculateTags(query, title)); set_id(kPlayAppPrefix + crx_file::id_util::GenerateId(install_intent_uri().value())); SetCategory(Category::kPlayStore); SetDisplayType(ash::SearchResultDisplayType::kTile); // TODO: The badge icon should be updated to pass through a vector icon and // color id rather than hardcoding the colors here. SetBadgeIcon(ui::ImageModel::FromImageSkia(CreateBadgeIcon( is_instant_app() ? ash::kBadgeInstantIcon : ash::kBadgePlayIcon, ash::SharedAppListConfig::instance().search_tile_badge_icon_dimension(), kBadgeIconSize, kBadgeColor))); SetFormattedPrice(base::UTF8ToUTF16(formatted_price().value())); SetRating(review_score()); SetResultType(is_instant_app() ? ash::AppListSearchResultType::kInstantApp : ash::AppListSearchResultType::kPlayStoreApp); SetMetricsType(is_instant_app() ? ash::PLAY_STORE_INSTANT_APP : ash::PLAY_STORE_UNINSTALLED_APP); apps::ArcRawIconPngDataToImageSkia( std::move(data_->icon), ash::SharedAppListConfig::instance().search_tile_icon_dimension(), base::BindOnce(&ArcPlayStoreSearchResult::OnIconDecoded, weak_ptr_factory_.GetWeakPtr())); } ArcPlayStoreSearchResult::~ArcPlayStoreSearchResult() = default; void ArcPlayStoreSearchResult::Open(int event_flags) { LaunchIntent(install_intent_uri().value(), list_controller_->GetAppListDisplayId()); } void ArcPlayStoreSearchResult::GetContextMenuModel( GetMenuModelCallback callback) { context_menu_ = std::make_unique<ArcPlayStoreAppContextMenu>( this, profile_, list_controller_); // TODO(755701): Enable context menu once Play Store API starts returning both // install and launch intents. std::move(callback).Run(nullptr); } void ArcPlayStoreSearchResult::ExecuteLaunchCommand(int event_flags) { Open(event_flags); } AppContextMenu* ArcPlayStoreSearchResult::GetAppContextMenu() { return context_menu_.get(); } void ArcPlayStoreSearchResult::OnIconDecoded(const gfx::ImageSkia& icon) { SetIcon(IconInfo(icon)); } } // namespace app_list
6,194
2,014
// bslstl_stack.t.cpp -*-C++-*- #include <bslstl_stack.h> #include <bslstl_vector.h> #include <bsltf_stdstatefulallocator.h> #include <bsltf_stdtestallocator.h> #include <bsltf_templatetestfacility.h> #include <bsltf_testvaluesarray.h> #include <bslma_allocator.h> #include <bslma_default.h> #include <bslma_defaultallocatorguard.h> #include <bslma_mallocfreeallocator.h> #include <bslma_testallocator.h> #include <bslma_testallocatormonitor.h> #include <bslalg_rangecompare.h> #include <bslmf_issame.h> #include <bslmf_haspointersemantics.h> #include <bslmf_movableref.h> #include <bsls_alignmentutil.h> #include <bsls_asserttest.h> #include <bsls_bsltestutil.h> #include <bsls_compilerfeatures.h> #include <bsls_nameof.h> #include <bsls_platform.h> #include <algorithm> #include <functional> #include <typeinfo> #include <cstdio> #include <cstdio> #include <cstdlib> #include <stdlib.h> // atoi #include <string.h> // ============================================================================ // ADL SWAP TEST HELPER // ---------------------------------------------------------------------------- template <class TYPE> void invokeAdlSwap(TYPE& a, TYPE& b) // Exchange the values of the specified 'a' and 'b' objects using the // 'swap' method found by ADL (Argument Dependent Lookup). The behavior // is undefined unless 'a' and 'b' were created with the same allocator. { using namespace bsl; swap(a, b); } // The following 'using' directives must come *after* the definition of // 'invokeAdlSwap' (above). using namespace BloombergLP; using namespace bsl; // ============================================================================ // TEST PLAN // ---------------------------------------------------------------------------- // Overview // -------- // The object under test is a container whose interface and contract is // dictated by the C++ standard. The general concerns are compliance, // exception safety, and proper dispatching (for member function templates such // as assign and insert). This container is implemented in the form of a class // template, and thus its proper instantiation for several types is a concern. // Regarding the allocator template argument, we use mostly a 'bsl::allocator' // together with a 'bslma::TestAllocator' mechanism, but we also verify the C++ // standard. // // The Primary Manipulators and Basic Accessors are decided to be: // // Primary Manipulators: //: o 'push' //: o 'pop // // Basic Accessors: //: o 'empty' //: o 'size' //: o 'top' // // This test plan follows the standard approach for components implementing // value-semantic containers. We have chosen as *primary* *manipulators* the // 'push' and 'pop' methods to be used by the generator functions 'g' and // 'gg'. Note that some manipulators must support aliasing, and those that // perform memory allocation must be tested for exception neutrality via the // 'bslma::TestAllocator' component. After the mandatory sequence of cases // (1--10) for value-semantic types (cases 5 and 10 are not implemented, as // there is not output or streaming below bslstl), we test each individual // constructor, manipulator, and accessor in subsequent cases. // // Certain standard value-semantic-type test cases are omitted: //: o BSLX streaming is not (yet) implemented for this class. // // Global Concerns: //: o The test driver is robust w.r.t. reuse in other, similar components. //: o ACCESSOR methods are declared 'const'. //: o CREATOR & MANIPULATOR pointer/reference parameters are declared 'const'. //: o No memory is ever allocated from the global allocator. //: o Any allocated memory is always from the object allocator. //: o An object's value is independent of the allocator used to supply memory. //: o Injected exceptions are safely propagated during memory allocation. //: o Precondition violations are detected in appropriate build modes. // // Global Assumptions: //: o All explicit memory allocations are presumed to use the global, default, //: or object allocator. //: o ACCESSOR methods are 'const' thread-safe. //: o Individual attribute types are presumed to be *alias-safe*; hence, only //: certain methods require the testing of this property: //: o copy-assignment //: o swap // ---------------------------------------------------------------------------- // CREATORS // [ 7] copy c'tor // [ 2] stack, stack(bslma::Allocator *bA) // [17] stack(MovableRef container) // [17] stack(MovableRef container, bslma::Allocator *bA) // [17] stack(MovableRef stack) // [17] stack(MovableRef stack, bslma::Allocator *bA) // // MANIPULATORS // [ 9] operator= // [18] operator=(MovableRef stack) // [ 8] member swap // [ 2] Primary Manipulators -- push and pop // [18] push(MovableRef value) // [18] emplace(Args&&.. args) // // ACCESSORS // [15] testing empty, size // [ 4] Primary Accessors // // FREE FUNCTIONS // [12] inequality comparisons: '<', '>', '<=', '>=' // [ 6] equality comparisons: '==', '!=' // [ 5] operator<< (N/A) // ---------------------------------------------------------------------------- // [16] Usage Example // [14] testing simple container that does not support allocators // [13] testing container override of specified 'VALUE' // [11] type traits // [10] allocator // [ 3] Primary generator functions 'gg' and 'ggg' // [ 1] Breathing Test // [19] CONCERN: Methods qualifed 'noexcept' in standard are so implemented. // // ============================================================================ // STANDARD BDE ASSERT TEST MACROS // ---------------------------------------------------------------------------- // NOTE: THIS IS A LOW-LEVEL COMPONENT AND MAY NOT USE ANY C++ LIBRARY // FUNCTIONS, INCLUDING IOSTREAMS. namespace { int testStatus = 0; void aSsErT(bool b, const char *s, int i) { if (b) { printf("Error " __FILE__ "(%d): %s (failed)\n", i, s); if (testStatus >= 0 && testStatus <= 100) ++testStatus; } } } // close unnamed namespace //============================================================================= // STANDARD BDE TEST DRIVER MACROS //----------------------------------------------------------------------------- #define ASSERT BSLS_BSLTESTUTIL_ASSERT #define LOOP_ASSERT BSLS_BSLTESTUTIL_LOOP_ASSERT #define LOOP0_ASSERT BSLS_BSLTESTUTIL_LOOP0_ASSERT #define LOOP1_ASSERT BSLS_BSLTESTUTIL_LOOP1_ASSERT #define LOOP2_ASSERT BSLS_BSLTESTUTIL_LOOP2_ASSERT #define LOOP3_ASSERT BSLS_BSLTESTUTIL_LOOP3_ASSERT #define LOOP4_ASSERT BSLS_BSLTESTUTIL_LOOP4_ASSERT #define LOOP5_ASSERT BSLS_BSLTESTUTIL_LOOP5_ASSERT #define LOOP6_ASSERT BSLS_BSLTESTUTIL_LOOP6_ASSERT #define ASSERTV BSLS_BSLTESTUTIL_ASSERTV #define Q BSLS_BSLTESTUTIL_Q // Quote identifier literally. #define P BSLS_BSLTESTUTIL_P // Print identifier and value. #define P_ BSLS_BSLTESTUTIL_P_ // P(X) without '\n'. #define T_ BSLS_BSLTESTUTIL_T_ // Print a tab (w/o newline). #define L_ BSLS_BSLTESTUTIL_L_ // current Line number #define RUN_EACH_TYPE BSLTF_TEMPLATETESTFACILITY_RUN_EACH_TYPE // ============================================================================ // NEGATIVE-TEST MACRO ABBREVIATIONS // ---------------------------------------------------------------------------- #define ASSERT_SAFE_PASS(EXPR) BSLS_ASSERTTEST_ASSERT_SAFE_PASS(EXPR) #define ASSERT_SAFE_FAIL(EXPR) BSLS_ASSERTTEST_ASSERT_SAFE_FAIL(EXPR) #define ASSERT_PASS(EXPR) BSLS_ASSERTTEST_ASSERT_PASS(EXPR) #define ASSERT_FAIL(EXPR) BSLS_ASSERTTEST_ASSERT_FAIL(EXPR) #define ASSERT_OPT_PASS(EXPR) BSLS_ASSERTTEST_ASSERT_OPT_PASS(EXPR) #define ASSERT_OPT_FAIL(EXPR) BSLS_ASSERTTEST_ASSERT_OPT_FAIL(EXPR) // ============================================================================ // PRINTF FORMAT MACRO ABBREVIATIONS // ---------------------------------------------------------------------------- #define ZU BSLS_BSLTESTUTIL_FORMAT_ZU // ============================================================================ // GLOBAL TEST VALUES // ---------------------------------------------------------------------------- static bool verbose; static bool veryVerbose; static bool veryVeryVerbose; static bool veryVeryVeryVerbose; //============================================================================= // GLOBAL TYPEDEFS/CONSTANTS FOR TESTING //----------------------------------------------------------------------------- // Define DEFAULT DATA used in multiple test cases. static const size_t DEFAULT_MAX_LENGTH = 32; struct DefaultDataRow { int d_line; // source line number const char *d_spec; // specification string, for input to 'gg' function const char *d_results; // expected element values }; static const DefaultDataRow DEFAULT_DATA[] = { //line spec results //---- -------- ------- { L_, "", "" }, { L_, "A", "A" }, { L_, "AA", "A" }, { L_, "B", "B" }, { L_, "AB", "AB" }, { L_, "BA", "AB" }, { L_, "AC", "AC" }, { L_, "CD", "CD" }, { L_, "ABC", "ABC" }, { L_, "ACB", "ABC" }, { L_, "BAC", "ABC" }, { L_, "BCA", "ABC" }, { L_, "CAB", "ABC" }, { L_, "CBA", "ABC" }, { L_, "BAD", "ABD" }, { L_, "ABCA", "ABC" }, { L_, "ABCB", "ABC" }, { L_, "ABCC", "ABC" }, { L_, "ABCABC", "ABC" }, { L_, "AABBCC", "ABC" }, { L_, "ABCD", "ABCD" }, { L_, "ACBD", "ABCD" }, { L_, "BDCA", "ABCD" }, { L_, "DCBA", "ABCD" }, { L_, "BEAD", "ABDE" }, { L_, "BCDE", "BCDE" }, { L_, "ABCDE", "ABCDE" }, { L_, "ACBDE", "ABCDE" }, { L_, "CEBDA", "ABCDE" }, { L_, "EDCBA", "ABCDE" }, { L_, "FEDCB", "BCDEF" }, { L_, "FEDCBA", "ABCDEF" }, { L_, "ABCDEFG", "ABCDEFG" }, { L_, "ABCDEFGH", "ABCDEFGH" }, { L_, "ABCDEFGHI", "ABCDEFGHI" }, { L_, "ABCDEFGHIJKLMNOP", "ABCDEFGHIJKLMNOP" }, { L_, "PONMLKJIGHFEDCBA", "ABCDEFGHIJKLMNOP" }, { L_, "ABCDEFGHIJKLMNOPQ", "ABCDEFGHIJKLMNOPQ" }, { L_, "DHBIMACOPELGFKNJQ", "ABCDEFGHIJKLMNOPQ" } }; static const int DEFAULT_NUM_DATA = sizeof DEFAULT_DATA / sizeof *DEFAULT_DATA; typedef bslmf::MovableRefUtil MoveUtil; //============================================================================= // GLOBAL HELPER FUNCTIONS FOR TESTING //----------------------------------------------------------------------------- #ifndef BSLS_PLATFORM_OS_WINDOWS # define TEST_TYPES_REGULAR(containerArg) \ containerArg<signed char>, \ containerArg<size_t>, \ containerArg<bsltf::TemplateTestFacility::ObjectPtr>, \ containerArg<bsltf::TemplateTestFacility::FunctionPtr>, \ containerArg<bsltf::TemplateTestFacility::MethodPtr>, \ containerArg<bsltf::EnumeratedTestType::Enum>, \ containerArg<bsltf::UnionTestType>, \ containerArg<bsltf::SimpleTestType>, \ containerArg<bsltf::AllocTestType>, \ containerArg<bsltf::BitwiseMoveableTestType>, \ containerArg<bsltf::AllocBitwiseMoveableTestType>, \ containerArg<bsltf::NonTypicalOverloadsTestType> #else # define TEST_TYPES_REGULAR(containerArg) \ containerArg<signed char>, \ containerArg<size_t>, \ containerArg<bsltf::TemplateTestFacility::ObjectPtr>, \ containerArg<bsltf::TemplateTestFacility::MethodPtr>, \ containerArg<bsltf::EnumeratedTestType::Enum>, \ containerArg<bsltf::UnionTestType>, \ containerArg<bsltf::SimpleTestType>, \ containerArg<bsltf::AllocTestType>, \ containerArg<bsltf::BitwiseMoveableTestType>, \ containerArg<bsltf::AllocBitwiseMoveableTestType>, \ containerArg<bsltf::NonTypicalOverloadsTestType> #endif #define TEST_TYPES_INEQUAL_COMPARABLE(containerArg) \ containerArg<signed char>, \ containerArg<size_t>, \ containerArg<bsltf::TemplateTestFacility::ObjectPtr>, \ containerArg<bsltf::EnumeratedTestType::Enum> #define TEST_TYPES_MOVABLE(containerArg) \ containerArg<bsltf::MovableTestType>, \ containerArg<bsltf::MovableAllocTestType> namespace bsl { // stack-specific print function. template <class VALUE, class CONTAINER> void debugprint(const bsl::stack<VALUE, CONTAINER>& s) { if (s.empty()) { printf("<empty>"); } else { printf("size: %d, top: ", (int)s.size()); bsls::BslTestUtil::callDebugprint(static_cast<char>( bsltf::TemplateTestFacility::getIdentifier(s.top()))); } fflush(stdout); } } // close namespace bsl template <class VALUE> struct NonAllocCont { // PUBLIC TYPES typedef VALUE value_type; typedef VALUE& reference; typedef const VALUE& const_reference; typedef std::size_t size_type; private: // DATA bsl::vector<VALUE> d_vector; public: // CREATORS NonAllocCont() : d_vector(&bslma::MallocFreeAllocator::singleton()) {} ~NonAllocCont() {} // MANIPULATORS NonAllocCont& operator=(const NonAllocCont& rhs) { d_vector = rhs.d_vector; return *this; } reference back() { return d_vector.back(); } void pop_back() { d_vector.pop_back(); } void push_back(const value_type& value) { d_vector.push_back(value); } bsl::vector<value_type>& contents() { return d_vector; } // ACCESSORS bool operator==(const NonAllocCont& rhs) const { return d_vector == rhs.d_vector; } bool operator!=(const NonAllocCont& rhs) const { return !operator==(rhs); } bool operator<(const NonAllocCont& rhs) const { return d_vector < rhs.d_vector; } bool operator>=(const NonAllocCont& rhs) const { return !operator<(rhs); } bool operator>(const NonAllocCont& rhs) const { return d_vector > rhs.d_vector; } bool operator<=(const NonAllocCont& rhs) const { return !operator>(rhs); } const_reference back() const { return d_vector.back(); } size_type size() const { return d_vector.size(); } }; namespace std { template <class VALUE> void swap(NonAllocCont<VALUE>& lhs, NonAllocCont<VALUE>& rhs) { lhs.contents().swap(rhs.contents()); } } // close namespace std template <class VALUE> struct ValueName { private: // NOT IMPLEMENTED static const char *name(); // Not implemented, so that an attempt to show the name of an // unrecognized type will result in failure to link. }; template <> struct ValueName<signed char> { static const char *name() { return "signed char"; } }; template <> struct ValueName<size_t> { static const char *name() { return "size_t"; } }; template <> struct ValueName<bsltf::TemplateTestFacility::ObjectPtr> { static const char *name() { return "TemplateTestFacility::ObjectPtr"; } }; template <> struct ValueName<bsltf::TemplateTestFacility::FunctionPtr> { static const char *name() { return "TemplateTestFacility::FunctionPtr"; } }; template <> struct ValueName<bsltf::TemplateTestFacility::MethodPtr> { static const char *name() { return "TemplateTestFacility::MethodPtr"; } }; template <> struct ValueName<bsltf::EnumeratedTestType::Enum> { static const char *name() { return "EnumeratedTestType::Enum"; } }; template <> struct ValueName<bsltf::UnionTestType> { static const char *name() { return "UnionTestType"; } }; template <> struct ValueName<bsltf::SimpleTestType> { static const char *name() { return "SimpleTestType"; } }; template <> struct ValueName<bsltf::AllocTestType> { static const char *name() { return "AllocTestType"; } }; template <> struct ValueName<bsltf::BitwiseMoveableTestType> { static const char *name() { return "BitwiseMoveableTestType"; } }; template <> struct ValueName<bsltf::AllocBitwiseMoveableTestType> { static const char *name() { return "AllocBitwiseMoveableTestType"; } }; template <> struct ValueName<bsltf::NonTypicalOverloadsTestType> { static const char *name() { return "NonTypicalOverloadsTestType"; } }; template <class CONTAINER> struct ContainerName { static const char *name(); }; template <class VALUE> struct ContainerName<deque<VALUE> > { static const char *name() { static char buf[1000]; strcpy(buf, "deque<"); strcat(buf, ValueName<VALUE>::name()); strcat(buf, ">"); return buf; } }; template <class VALUE> struct ContainerName<vector<VALUE> > { static const char *name() { static char buf[1000]; strcpy(buf, "vector<"); strcat(buf, ValueName<VALUE>::name()); strcat(buf, ">"); return buf; } }; bool expectToAllocate(int n) // Return 'true' if the container is expected to allocate memory on the // specified 'n'th element, and 'false' otherwise. { if (n > 32) { return (0 == n % 32); // RETURN } return (((n - 1) & n) == 0); // Allocate when 'n' is a power of 2 } template<class CONTAINER, class VALUES> void emptyNVerifyStack(stack<typename CONTAINER::value_type, CONTAINER> *pmX, const VALUES& expectedValues, size_t expectedSize, const int LINE) // Verify the specified 'container' has the specified 'expectedSize' and // contains the same values as the array in the specified 'expectedValues'. // Return 0 if 'container' has the expected values, and a non-zero value // otherwise. { const char *cont = ContainerName<CONTAINER>::name(); const char *val = ValueName<typename CONTAINER::value_type>::name(); ASSERTV(cont, val, LINE, expectedSize, pmX->size(), expectedSize == pmX->size()); if (expectedSize != pmX->size()) { return; // RETURN } for (int i = static_cast<int>(expectedSize) - 1; i >= 0; --i) { if (expectedValues[i] != pmX->top()) P_(cont); ASSERTV(val, i, LINE, expectedValues[i], pmX->top(), expectedValues[i] == pmX->top()); pmX->pop(); } } template<class CONTAINER, class VALUES> void verifyStack(const stack<typename CONTAINER::value_type, CONTAINER>& X, const VALUES& expectedValues, size_t expectedSize, const int LINE, bslma::Allocator *allocator = 0) { stack<typename CONTAINER::value_type, CONTAINER> copyX(X, bslma::Default::allocator(allocator)); emptyNVerifyStack(&copyX, expectedValues, expectedSize, LINE); } // ---------------------------------------------------------------------------- // HELPERS: "Called Method" Classes: 'NonMovableVector' and 'MovableVector' // ---------------------------------------------------------------------------- enum CalledMethod // Enumerations used to indicate if appropriate special container's method // has been invoked. { e_NONE = 0 , e_CTOR_DFT_SANS_ALLOC = 1 << 0 , e_CTOR_DFT_AVEC_ALLOC = 1 << 1 , e_CTOR_CPY_SANS_ALLOC = 1 << 3 , e_CTOR_CPY_AVEC_ALLOC = 1 << 4 , e_CTOR_MOV_SANS_ALLOC = 1 << 5 , e_CTOR_MOV_AVEC_ALLOC = 1 << 6 , e_ASSIGN_CREF = 1 << 7 , e_ASSIGN_MOVE = 1 << 8 , e_PUSH_BACK_CREF = 1 << 9 , e_PUSH_BACK_MOVE = 1 << 10 , e_EMPLACE_0 = 1 << 11 , e_EMPLACE_1 = 1 << 12 , e_EMPLACE_2 = 1 << 13 , e_EMPLACE_3 = 1 << 14 , e_EMPLACE_4 = 1 << 15 , e_EMPLACE_5 = 1 << 16 , e_EMPLACE_6 = 1 << 17 , e_EMPLACE_7 = 1 << 18 , e_EMPLACE_8 = 1 << 19 , e_EMPLACE_9 = 1 << 20 , e_EMPLACE_A = 1 << 21 }; void debugprint(enum CalledMethod calledMethod) { const char *ascii; #define CASE(X) case(e_ ## X): ascii = #X; break; switch (calledMethod) { CASE(NONE) CASE(CTOR_DFT_SANS_ALLOC) CASE(CTOR_DFT_AVEC_ALLOC) CASE(CTOR_CPY_SANS_ALLOC) CASE(CTOR_CPY_AVEC_ALLOC) CASE(CTOR_MOV_SANS_ALLOC) CASE(CTOR_MOV_AVEC_ALLOC) CASE(ASSIGN_CREF) CASE(ASSIGN_MOVE) CASE(PUSH_BACK_CREF) CASE(PUSH_BACK_MOVE) CASE(EMPLACE_0) CASE(EMPLACE_1) CASE(EMPLACE_2) CASE(EMPLACE_3) CASE(EMPLACE_4) CASE(EMPLACE_5) CASE(EMPLACE_6) CASE(EMPLACE_7) CASE(EMPLACE_8) CASE(EMPLACE_9) CASE(EMPLACE_A) default: ascii = "(* UNKNOWN *)"; } #undef CASE printf("%s", ascii); } inline CalledMethod operator|=(CalledMethod& lhs, CalledMethod rhs) // Bitwise OR the values of the specified 'lhs' and 'rhs' flags, and return // the resulting value. { lhs = static_cast<CalledMethod>( static_cast<int>(lhs) | static_cast<int>(rhs)); return lhs; } CalledMethod g_calledMethodFlag; // global variable, that stores information // about called methods for special // containers 'NonMovableVector' and // 'MovableVector'. void setupCalledMethodCheck() // Reset 'g_calledMethodFlag' global variable's value. { g_calledMethodFlag = e_NONE; } enum CalledMethod getCalledMethod() { return g_calledMethodFlag; } // ====================== // class NonMovableVector // ====================== template <class VALUE, class ALLOCATOR> class NonMovableVector; template<class VALUE, class ALLOCATOR> bool operator==(const NonMovableVector<VALUE, ALLOCATOR>& lhs, const NonMovableVector<VALUE, ALLOCATOR>& rhs); template <class VALUE, class ALLOCATOR = bsl::allocator<VALUE> > class NonMovableVector { // This class is a value-semantic class template, acting as a transparent // proxy for the underlying 'bsl::vector' container, that holds elements of // the (template parameter) 'VALUE', and recording in the global variable // 'g_calledMethodFlag' methods being invoked. The information recorded is // used to verify that 'stack' invokes expected container methods. // DATA bsl::vector<VALUE> d_vector; // container for it's behaviour simulation // FRIENDS friend bool operator==<VALUE, ALLOCATOR>(const NonMovableVector& lhs, const NonMovableVector& rhs); public: // CLASS METHODS static int GGG(NonMovableVector *object, const char *spec, int verbose = 1); static NonMovableVector GG(NonMovableVector *object, const char *spec); // PUBLIC TYPES typedef ALLOCATOR allocator_type; typedef VALUE value_type; typedef VALUE& reference; typedef const VALUE& const_reference; typedef std::size_t size_type; typedef VALUE *iterator; typedef const VALUE *const_iterator; // CREATORS NonMovableVector() : d_vector() // Create an empty vector. Method invocation is recorded. { g_calledMethodFlag |= e_CTOR_DFT_SANS_ALLOC; } NonMovableVector(const ALLOCATOR& basicAllocator) : d_vector(basicAllocator) // Create an empty vector, using the specified 'basicAllocator' to // supply memory. Method invocation is recorded. { g_calledMethodFlag |= e_CTOR_DFT_AVEC_ALLOC; } NonMovableVector(const NonMovableVector& original) // Create a vector that has the same value as the specified 'original' // vector. Method invocation is recorded. : d_vector(original.d_vector) { g_calledMethodFlag |= e_CTOR_CPY_SANS_ALLOC; } NonMovableVector(const NonMovableVector& original, const ALLOCATOR& basicAllocator) // Create a vector that has the same value as the specified 'original' // vector, using the specified 'basicAllocator' to supply memory. // Method invocation is recorded. : d_vector(original.d_vector, basicAllocator) { g_calledMethodFlag |= e_CTOR_CPY_AVEC_ALLOC; } // MANIPULATORS NonMovableVector& operator=(const NonMovableVector& rhs) // Assign to this vector the value of the specified 'other' vector and // return a reference to this modifiable vector. Method invocation is // recorded. { d_vector = rhs.d_vector; g_calledMethodFlag |= e_ASSIGN_CREF; return *this; } void pop_back() // Erase the last element from this vector. { d_vector.pop_back(); } void push_back(const value_type& value) // Append a copy of the specified 'value' at the end of this vector. // Method invocation is recorded. { g_calledMethodFlag |= e_PUSH_BACK_CREF; d_vector.push_back(value); } template <class INPUT_ITER> iterator insert(const_iterator position, INPUT_ITER first, INPUT_ITER last) // Insert at the specified 'position' in this vector the values in // the range starting at the specified 'first' and ending // immediately before the specified 'last' iterators of the // (template parameter) type 'INPUT_ITER', and return an iterator // to the first newly inserted element. { return d_vector.insert(position, first, last); } iterator begin() // Return an iterator pointing the first element in this modifiable // vector (or the past-the-end iterator if this vector is empty). { return d_vector.begin(); } iterator end() // Return the past-the-end iterator for this modifiable vector. { return d_vector.end(); } reference front() // Return a reference to the modifiable element at the first position // in this vector. The behavior is undefined if this vector is empty. { return d_vector.front(); } reference back() // Return a reference to the modifiable element at the last position in // this vector. The behavior is undefined if this vector is empty. { return d_vector.back(); } #if !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES template <class... Args> void emplace_back(Args&&... arguments) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the specified 'arguments'. Note that this method is written only // for testing purposes, it DOESN'T simulate standard vector behavior // and requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { int argumentsNumber = sizeof...(arguments); g_calledMethodFlag |= static_cast<CalledMethod>( static_cast<int>(e_EMPLACE_0) << argumentsNumber); d_vector.push_back(value_type(1)); } #elif BSLS_COMPILERFEATURES_SIMULATE_VARIADIC_TEMPLATES inline void emplace_back() // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter. Note that // this method is written only for testing purposes, it DOESN'T // simulate standard vector behavior and requires that the (template // parameter) type 'VALUE_TYPE' has constructor, accepting integer // value as a parameter. Method invocation is recorded. { g_calledMethodFlag |= e_EMPLACE_0; d_vector.push_back(value_type(1)); } template <class Args_01> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed argument. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; g_calledMethodFlag |= e_EMPLACE_1; d_vector.push_back(value_type(1)); } template <class Args_01, class Args_02> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01, BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed arguments. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; (void)args_02; g_calledMethodFlag |= e_EMPLACE_2; d_vector.push_back(value_type(1)); } template <class Args_01, class Args_02, class Args_03> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01, BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02, BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed arguments. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; (void)args_02; (void)args_03; g_calledMethodFlag |= e_EMPLACE_3; d_vector.push_back(value_type(1)); } template <class Args_01, class Args_02, class Args_03, class Args_04> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01, BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02, BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03, BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed arguments. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; (void)args_02; (void)args_03; (void)args_04; g_calledMethodFlag |= e_EMPLACE_4; d_vector.push_back(value_type(1)); } template <class Args_01, class Args_02, class Args_03, class Args_04, class Args_05> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01, BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02, BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03, BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04, BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed arguments. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; (void)args_02; (void)args_03; (void)args_04; (void)args_05; g_calledMethodFlag |= e_EMPLACE_5; d_vector.push_back(value_type(1)); } template <class Args_01, class Args_02, class Args_03, class Args_04, class Args_05, class Args_06> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01, BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02, BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03, BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04, BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05, BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed arguments. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; (void)args_02; (void)args_03; (void)args_04; (void)args_05; (void)args_06; g_calledMethodFlag |= e_EMPLACE_6; d_vector.push_back(value_type(1)); } template <class Args_01, class Args_02, class Args_03, class Args_04, class Args_05, class Args_06, class Args_07> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01, BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02, BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03, BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04, BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05, BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06, BSLS_COMPILERFEATURES_FORWARD_REF(Args_07) args_07) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed arguments. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; (void)args_02; (void)args_03; (void)args_04; (void)args_05; (void)args_06; (void)args_07; g_calledMethodFlag |= e_EMPLACE_7; d_vector.push_back(value_type(1)); } template <class Args_01, class Args_02, class Args_03, class Args_04, class Args_05, class Args_06, class Args_07, class Args_08> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01, BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02, BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03, BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04, BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05, BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06, BSLS_COMPILERFEATURES_FORWARD_REF(Args_07) args_07, BSLS_COMPILERFEATURES_FORWARD_REF(Args_08) args_08) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed arguments. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; (void)args_02; (void)args_03; (void)args_04; (void)args_05; (void)args_06; (void)args_07; (void)args_08; g_calledMethodFlag |= e_EMPLACE_8; d_vector.push_back(value_type(1)); } template <class Args_01, class Args_02, class Args_03, class Args_04, class Args_05, class Args_06, class Args_07, class Args_08, class Args_09> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01, BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02, BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03, BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04, BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05, BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06, BSLS_COMPILERFEATURES_FORWARD_REF(Args_07) args_07, BSLS_COMPILERFEATURES_FORWARD_REF(Args_08) args_08, BSLS_COMPILERFEATURES_FORWARD_REF(Args_09) args_09) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed arguments. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; (void)args_02; (void)args_03; (void)args_04; (void)args_05; (void)args_06; (void)args_07; (void)args_08; (void)args_09; g_calledMethodFlag |= e_EMPLACE_9; d_vector.push_back(value_type(1)); } template <class Args_01, class Args_02, class Args_03, class Args_04, class Args_05, class Args_06, class Args_07, class Args_08, class Args_09, class Args_10> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01, BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02, BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03, BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04, BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05, BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06, BSLS_COMPILERFEATURES_FORWARD_REF(Args_07) args_07, BSLS_COMPILERFEATURES_FORWARD_REF(Args_08) args_08, BSLS_COMPILERFEATURES_FORWARD_REF(Args_09) args_09, BSLS_COMPILERFEATURES_FORWARD_REF(Args_10) args_10) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed arguments. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; (void)args_02; (void)args_03; (void)args_04; (void)args_05; (void)args_06; (void)args_07; (void)args_08; (void)args_09; (void)args_10; g_calledMethodFlag |= e_EMPLACE_A; d_vector.push_back(value_type(1)); } #else template <class... Args> void emplace_back( BSLS_COMPILERFEATURES_FORWARD_REF(Args)... arguments) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the specified 'arguments'. Note that this method is written only // for testing purposes, it DOESN'T simulate standard vector behavior // and requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { int argumentsNumber = sizeof...(arguments); g_calledMethodFlag |= static_cast<CalledMethod>( static_cast<int>(e_EMPLACE_0) << argumentsNumber); d_vector.push_back(value_type(1)); } #endif // ACCESSORS const_iterator begin() const // Return an iterator pointing the first element in this non-modifiable // vector (or the past-the-end iterator if this vector is empty). { return d_vector.begin(); } const_iterator end() const // Return the past-the-end iterator for this non-modifiable vector. { return d_vector.end(); } const_reference front() const // Return a reference to the non-modifiable element at the first // position in this vector. The behavior is undefined if this vector // is empty. { return d_vector.front(); } const_reference back() const // Return a reference to the non-modifiable element at the last // position in this vector. The behavior is undefined if this vector // is empty. { return d_vector.back(); } size_type size() const // Return the number of elements in this vector. { return d_vector.size(); } bool empty() const // Return 'true' if this vector has size 0, and 'false' otherwise. { return d_vector.empty(); } }; // ---------------------- // class NonMovableVector // ---------------------- // CLASS METHODS template <class CONTAINER> class TestDriver; template <class VALUE, class ALLOCATOR> int NonMovableVector<VALUE, ALLOCATOR>:: GGG(NonMovableVector *object, const char *spec, int verbose) { bslma::DefaultAllocatorGuard guard( &bslma::NewDeleteAllocator::singleton()); typename TestDriver<NonMovableVector>::TestValues VALUES; enum { SUCCESS = -1 }; for (int i = 0; spec[i]; ++i) { if ('A' <= spec[i] && spec[i] <= 'Z') { object->push_back(VALUES[spec[i] - 'A']); } else { if (verbose) { printf("Error, bad character ('%c') " "in spec \"%s\" at position %d.\n", spec[i], spec, i); } // Discontinue processing this spec. return i; // RETURN } } return SUCCESS; } template <class VALUE, class ALLOCATOR> NonMovableVector<VALUE, ALLOCATOR> NonMovableVector<VALUE, ALLOCATOR>:: GG(NonMovableVector *object, const char *spec) { ASSERTV(GGG(object, spec) < 0); return *object; } // FREE OPERATORS template<class VALUE, class ALLOCATOR> bool operator==(const NonMovableVector<VALUE, ALLOCATOR>& lhs, const NonMovableVector<VALUE, ALLOCATOR>& rhs) { return lhs.d_vector == rhs.d_vector; } // =================== // class MovableVector // =================== template <class VALUE, class ALLOCATOR> class MovableVector; template<class VALUE, class ALLOCATOR> bool operator==(const MovableVector<VALUE, ALLOCATOR>& lhs, const MovableVector<VALUE, ALLOCATOR>& rhs); template <class VALUE, class ALLOCATOR = bsl::allocator<VALUE> > class MovableVector { // TBD // // This class is a value-semantic class template, acting as a transparent // proxy for the underlying 'bsl::vector' container, that holds elements of // the (template parameter) 'VALUE', and recording in the global variable // 'g_calledMethodFlag' methods being invoked. The information recorded is // used to verify that 'stack' invokes expected container methods. private: // DATA bsl::vector<VALUE> d_vector; // provides required behavior // FRIENDS friend bool operator==<VALUE, ALLOCATOR>( const MovableVector<VALUE, ALLOCATOR>& lhs, const MovableVector<VALUE, ALLOCATOR>& rhs); public: // CLASS METHODS static int GGG(MovableVector *object, const char *spec, int verbose = 1); static MovableVector GG(MovableVector *object, const char *spec); // PUBLIC TYPES typedef ALLOCATOR allocator_type; typedef VALUE value_type; typedef VALUE& reference; typedef const VALUE& const_reference; typedef std::size_t size_type; typedef VALUE* iterator; typedef const VALUE* const_iterator; // CREATORS MovableVector() : d_vector() // Create an empty vector. Method invocation is recorded. { g_calledMethodFlag |= e_CTOR_DFT_SANS_ALLOC; } MovableVector(const ALLOCATOR& basicAllocator) : d_vector( basicAllocator) // Create an empty vector, using the specified 'basicAllocator' to // supply memory. Method invocation is recorded. { g_calledMethodFlag |= e_CTOR_DFT_AVEC_ALLOC; } MovableVector(const MovableVector& original) // Create a vector that has the same value as the specified 'original' // vector. Method invocation is recorded. : d_vector(original.d_vector) { g_calledMethodFlag |= e_CTOR_CPY_SANS_ALLOC; } MovableVector(bslmf::MovableRef<MovableVector> original) // Create a vector that has the same value as the specified 'original' // vector. Method invocation is recorded. : d_vector(MoveUtil::move(MoveUtil::access(original).d_vector)) { g_calledMethodFlag |= e_CTOR_MOV_SANS_ALLOC; } MovableVector(const MovableVector& original, const ALLOCATOR& basicAllocator) // Create a vector that has the same value as the specified 'original' // vector, using the specified 'basicAllocator' to supply memory. // Method invocation is recorded. : d_vector(original.d_vector, basicAllocator) { g_calledMethodFlag |= e_CTOR_CPY_AVEC_ALLOC; } MovableVector(bslmf::MovableRef<MovableVector> original, const ALLOCATOR& basicAllocator) // Create a vector that has the same value as the specified 'original' // vector, using the specified 'basicAllocator' to supply memory. // Method invocation is recorded. : d_vector(MoveUtil::move(MoveUtil::access(original).d_vector), basicAllocator) { g_calledMethodFlag |= e_CTOR_MOV_AVEC_ALLOC; } // MANIPULATORS MovableVector& operator=(const MovableVector& rhs) // Assign to this vector the value of the specified 'other' vector and // return a reference to this modifiable vector. Method invocation is // recorded. { g_calledMethodFlag |= e_ASSIGN_CREF; d_vector = rhs.d_vector; return *this; } MovableVector& operator=(bslmf::MovableRef<MovableVector> rhs) // Assign to this vector the value of the specified 'other' vector and // return a reference to this modifiable vector. Method invocation is // recorded. { g_calledMethodFlag |= e_ASSIGN_MOVE; d_vector = MoveUtil::move(MoveUtil::access(rhs).d_vector); return *this; } void pop_back() // Erase the last element from this vector. { d_vector.pop_back(); } void push_back(const value_type& value) // Append a copy of the specified 'value' at the end of this vector. // Method invocation is recorded. { g_calledMethodFlag |= e_PUSH_BACK_CREF; d_vector.push_back(value); } void push_back(bslmf::MovableRef<value_type> value) // Append a copy of the specified 'value' at the end of this vector. // Method invocation is recorded. { g_calledMethodFlag |= e_PUSH_BACK_MOVE; d_vector.push_back(MoveUtil::move(value)); } template <class INPUT_ITER> iterator insert(const_iterator position, INPUT_ITER first, INPUT_ITER last) // Insert at the specified 'position' in this vector the values in // the range starting at the specified 'first' and ending // immediately before the specified 'last' iterators of the // (template parameter) type 'INPUT_ITER', and return an iterator // to the first newly inserted element. { return d_vector.insert(position, first, last); } iterator begin() // Return an iterator pointing the first element in this modifiable // vector (or the past-the-end iterator if this vector is empty). { return d_vector.begin(); } iterator end() // Return the past-the-end iterator for this modifiable vector. { return d_vector.end(); } reference front() // Return a reference to the modifiable element at the first position // in this vector. The behavior is undefined if this vector is empty. { return d_vector.front(); } reference back() // Return a reference to the modifiable element at the last position in // this vector. The behavior is undefined if this vector is empty. { return d_vector.back(); } #if !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES template <class... Args> void emplace_back(Args&&... arguments) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the specified 'arguments'. Note that this method is written only // for testing purposes, it DOESN'T simulate standard vector behavior // and requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { int argumentsNumber = sizeof...(arguments); g_calledMethodFlag |= static_cast<CalledMethod>( static_cast<int>(e_EMPLACE_0) << argumentsNumber); d_vector.push_back(value_type(1)); } #elif BSLS_COMPILERFEATURES_SIMULATE_VARIADIC_TEMPLATES inline void emplace_back() // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter. Note that // this method is written only for testing purposes, it DOESN'T // simulate standard vector behavior and requires that the (template // parameter) type 'VALUE_TYPE' has constructor, accepting integer // value as a parameter. Method invocation is recorded. { g_calledMethodFlag |= e_EMPLACE_0; d_vector.push_back(value_type(1)); } template <class Args_01> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed argument. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; g_calledMethodFlag |= e_EMPLACE_1; d_vector.push_back(value_type(1)); } template <class Args_01, class Args_02> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01, BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed arguments. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; (void)args_02; g_calledMethodFlag |= e_EMPLACE_2; d_vector.push_back(value_type(1)); } template <class Args_01, class Args_02, class Args_03> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01, BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02, BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed arguments. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; (void)args_02; (void)args_03; g_calledMethodFlag |= e_EMPLACE_3; d_vector.push_back(value_type(1)); } template <class Args_01, class Args_02, class Args_03, class Args_04> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01, BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02, BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03, BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed arguments. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; (void)args_02; (void)args_03; (void)args_04; g_calledMethodFlag |= e_EMPLACE_4; d_vector.push_back(value_type(1)); } template <class Args_01, class Args_02, class Args_03, class Args_04, class Args_05> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01, BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02, BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03, BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04, BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed arguments. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; (void)args_02; (void)args_03; (void)args_04; (void)args_05; g_calledMethodFlag |= e_EMPLACE_5; d_vector.push_back(value_type(1)); } template <class Args_01, class Args_02, class Args_03, class Args_04, class Args_05, class Args_06> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01, BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02, BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03, BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04, BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05, BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed arguments. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; (void)args_02; (void)args_03; (void)args_04; (void)args_05; (void)args_06; g_calledMethodFlag |= e_EMPLACE_6; d_vector.push_back(value_type(1)); } template <class Args_01, class Args_02, class Args_03, class Args_04, class Args_05, class Args_06, class Args_07> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01, BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02, BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03, BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04, BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05, BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06, BSLS_COMPILERFEATURES_FORWARD_REF(Args_07) args_07) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed arguments. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; (void)args_02; (void)args_03; (void)args_04; (void)args_05; (void)args_06; (void)args_07; g_calledMethodFlag |= e_EMPLACE_7; d_vector.push_back(value_type(1)); } template <class Args_01, class Args_02, class Args_03, class Args_04, class Args_05, class Args_06, class Args_07, class Args_08> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01, BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02, BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03, BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04, BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05, BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06, BSLS_COMPILERFEATURES_FORWARD_REF(Args_07) args_07, BSLS_COMPILERFEATURES_FORWARD_REF(Args_08) args_08) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed arguments. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; (void)args_02; (void)args_03; (void)args_04; (void)args_05; (void)args_06; (void)args_07; (void)args_08; g_calledMethodFlag |= e_EMPLACE_8; d_vector.push_back(value_type(1)); } template <class Args_01, class Args_02, class Args_03, class Args_04, class Args_05, class Args_06, class Args_07, class Args_08, class Args_09> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01, BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02, BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03, BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04, BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05, BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06, BSLS_COMPILERFEATURES_FORWARD_REF(Args_07) args_07, BSLS_COMPILERFEATURES_FORWARD_REF(Args_08) args_08, BSLS_COMPILERFEATURES_FORWARD_REF(Args_09) args_09) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed arguments. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; (void)args_02; (void)args_03; (void)args_04; (void)args_05; (void)args_06; (void)args_07; (void)args_08; (void)args_09; g_calledMethodFlag |= e_EMPLACE_9; d_vector.push_back(value_type(1)); } template <class Args_01, class Args_02, class Args_03, class Args_04, class Args_05, class Args_06, class Args_07, class Args_08, class Args_09, class Args_10> inline void emplace_back(BSLS_COMPILERFEATURES_FORWARD_REF(Args_01) args_01, BSLS_COMPILERFEATURES_FORWARD_REF(Args_02) args_02, BSLS_COMPILERFEATURES_FORWARD_REF(Args_03) args_03, BSLS_COMPILERFEATURES_FORWARD_REF(Args_04) args_04, BSLS_COMPILERFEATURES_FORWARD_REF(Args_05) args_05, BSLS_COMPILERFEATURES_FORWARD_REF(Args_06) args_06, BSLS_COMPILERFEATURES_FORWARD_REF(Args_07) args_07, BSLS_COMPILERFEATURES_FORWARD_REF(Args_08) args_08, BSLS_COMPILERFEATURES_FORWARD_REF(Args_09) args_09, BSLS_COMPILERFEATURES_FORWARD_REF(Args_10) args_10) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the passed arguments. Note that this method is written only for // testing purposes, it DOESN'T simulate standard vector behavior and // requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { // Compiler warnings suppression. (void)args_01; (void)args_02; (void)args_03; (void)args_04; (void)args_05; (void)args_06; (void)args_07; (void)args_08; (void)args_09; (void)args_10; g_calledMethodFlag |= e_EMPLACE_A; d_vector.push_back(value_type(1)); } #else template <class... Args> void emplace_back( BSLS_COMPILERFEATURES_FORWARD_REF(Args)... arguments) // Append to the end of this vector a newly created 'value_type' // object, constructed with integer literal as a parameter, despite of // the specified 'arguments'. Note that this method is written only // for testing purposes, it DOESN'T simulate standard vector behavior // and requires that the (template parameter) type 'VALUE_TYPE' has // constructor, accepting integer value as a parameter. Method // invocation is recorded. { int argumentsNumber = sizeof...(arguments); g_calledMethodFlag |= static_cast<CalledMethod>( static_cast<int>(e_EMPLACE_0) << argumentsNumber); d_vector.push_back(value_type(1)); } #endif // ACCESSORS const_iterator begin() const // Return an iterator pointing the first element in this non-modifiable // vector (or the past-the-end iterator if this vector is empty). { return d_vector.begin(); } const_iterator end() const // Return the past-the-end iterator for this non-modifiable vector. { return d_vector.end(); } const_reference front() const // Return a reference to the non-modifiable element at the first // position in this vector. The behavior is undefined if this vector // is empty. { return d_vector.front(); } const_reference back() const // Return a reference to the non-modifiable element at the last // position in this vector. The behavior is undefined if this vector // is empty. { return d_vector.back(); } size_type size() const // Return the number of elements in this vector. { return d_vector.size(); } bool empty() const // Return 'true' if this vector has size 0, and 'false' otherwise. { return d_vector.empty(); } }; // ------------------- // class MovableVector // ------------------- // CLASS METHODS template <class CONTAINER> class TestDriver; template <class VALUE, class ALLOCATOR> int MovableVector<VALUE, ALLOCATOR>:: GGG(MovableVector *object, const char *spec, int verbose) { bslma::DefaultAllocatorGuard guard( &bslma::NewDeleteAllocator::singleton()); typename TestDriver<MovableVector>::TestValues VALUES; enum { SUCCESS = -1 }; for (int i = 0; spec[i]; ++i) { if ('A' <= spec[i] && spec[i] <= 'Z') { object->push_back(VALUES[spec[i] - 'A']); } else { if (verbose) { printf("Error, bad character ('%c') " "in spec \"%s\" at position %d.\n", spec[i], spec, i); } // Discontinue processing this spec. return i; // RETURN } } return SUCCESS; } template <class VALUE, class ALLOCATOR> MovableVector<VALUE, ALLOCATOR> MovableVector<VALUE, ALLOCATOR>:: GG(MovableVector *object, const char *spec) { ASSERTV(GGG(object, spec) < 0); return *object; } // FREE OPERATORS template<class VALUE, class ALLOCATOR> bool operator==(const MovableVector<VALUE, ALLOCATOR>& lhs, const MovableVector<VALUE, ALLOCATOR>& rhs) { return lhs.d_vector == rhs.d_vector; } // ========================== // class StatefulStlAllocator // ========================== template <class VALUE> class StatefulStlAllocator : public bsltf::StdTestAllocator<VALUE> // This class implements a standard compliant allocator that has an // attribute, 'id'. { // DATA int d_id; // identifier private: // TYPES typedef bsltf::StdTestAllocator<VALUE> StlAlloc; // Alias for the base class. public: template <class OTHER_TYPE> struct rebind { // This nested 'struct' template, parameterized by some 'OTHER_TYPE', // provides a namespace for an 'other' type alias, which is an // allocator type following the same template as this one but that // allocates elements of 'OTHER_TYPE'. Note that this allocator type // is convertible to and from 'other' for any 'OTHER_TYPE' including // 'void'. typedef StatefulStlAllocator<OTHER_TYPE> other; }; // CREATORS StatefulStlAllocator() // Create a 'StatefulStlAllocator' object. : StlAlloc() { } //! StatefulStlAllocator(const StatefulStlAllocator& original) = default; // Create a 'StatefulStlAllocator' object having the same id as the // specified 'original'. template <class OTHER_TYPE> StatefulStlAllocator(const StatefulStlAllocator<OTHER_TYPE>& original) // Create a 'StatefulStlAllocator' object having the same id as the // specified 'original' with a different template type. : StlAlloc(original) , d_id(original.id()) { } // MANIPULATORS void setId(int value) // Set the 'id' attribute of this object to the specified 'value'. { d_id = value; } // ACCESSORS int id() const // Return the value of the 'id' attribute of this object. { return d_id; } }; template <class T> struct SpecialContainerTrait // A class should declare this trait if it registers it's methods // invocation in 'g_calledMethodFlag' global variable. { static const bool is_special_container = false; }; template <class T> struct SpecialContainerTrait<NonMovableVector<T> > { static const bool is_special_container = true; }; template <class T> struct SpecialContainerTrait<MovableVector<T> > { static const bool is_special_container = true; }; template <class CONTAINER> bool isCalledMethodCheckPassed(CalledMethod flag) // Return 'true' if global variable 'g_calledMethodFlag' has the same value // as the specified 'flag', and 'false' otherwise. Note that this check is // performed only for special containers, defined above. Function always // returns 'true' for all other classes. { if (SpecialContainerTrait<CONTAINER>::is_special_container) { return flag == g_calledMethodFlag; } return true; } //============================================================================= // USAGE EXAMPLE //============================================================================= // Suppose a husband wants to keep track of chores his wife has asked him to // do. Over the years of being married, he has noticed that his wife generally // wants the most recently requested task done first. If she has a new task in // mind that is low-priority, she will avoid asking for it until higher // priority tasks are finished. When he has finished all tasks, he is to // report to his wife that he is ready for more. // First, we define the class implementing the 'to-do' list. class ToDoList { // DATA bsl::stack<const char *> d_stack; public: // MANIPULATORS void enqueueTask(const char *task); // Add the specified 'task', a string describing a task, to the // list. Note the lifetime of the string referred to by 'task' must // exceed the lifetime of the task in this list. bool finishTask(); // Remove the current task from the list. Return 'true' if a task was // removed and it was the last task on the list, and return 'false' // otherwise. // ACCESSORS const char *currentTask() const; // Return the string representing the current task. If there // is no current task, return the string "<EMPTY>", which is // not a valid task. }; // MANIPULATORS void ToDoList::enqueueTask(const char *task) { d_stack.push(task); } bool ToDoList::finishTask() { if (!d_stack.empty()) { d_stack.pop(); return d_stack.empty(); // RETURN } return false; }; // ACCESSORS const char *ToDoList::currentTask() const { if (d_stack.empty()) { return "<EMPTY>"; // RETURN } return d_stack.top(); } //============================================================================= // ==================== // class ExceptionGuard // ==================== template <class OBJECT> struct ExceptionGuard { // This class provide a mechanism to verify the strong exception guarantee // in exception-throwing code. On construction, this class stores the // a copy of an object of the parameterized type 'OBJECT' and the address // of that object. On destruction, if 'release' was not invoked, it will // verify the value of the object is the same as the value of the copy // create on construction. This class requires the copy constructor and // 'operator ==' to be tested before use. // DATA int d_line; // the line number at construction OBJECT d_copy; // copy of the object being tested const OBJECT *d_object_p; // address of the original object public: // CREATORS ExceptionGuard(const OBJECT *object, int line, bslma::Allocator *basicAllocator = 0) : d_line(line) , d_copy(*object, basicAllocator) , d_object_p(object) // Create the exception guard for the specified 'object' at the // specified 'line' number. Optionally, specify 'basicAllocator' used // to supply memory. {} ~ExceptionGuard() // Destroy the exception guard. If the guard was not released, verify // that the state of the object supplied at construction has not // change. { if (d_object_p) { const int LINE = d_line; ASSERTV(LINE, d_copy == *d_object_p); } } // MANIPULATORS void release() // Release the guard from verifying the state of the object. { d_object_p = 0; } }; // ============================================================================ // GLOBAL TYPEDEFS FOR TESTING // ---------------------------------------------------------------------------- // // ================ // class TestDriver // ================ template <class CONTAINER> class TestDriver { // TBD // // This templatized struct provide a namespace for testing the 'set' // container. The parameterized 'KEY', 'COMP' and 'ALLOC' specifies the // value type, comparator type and allocator type respectively. Each // "testCase*" method test a specific aspect of 'stack<VALUE, CONTAINER>'. // Every test cases should be invoked with various parameterized type to // fully test the container. public: // PUBLIC TYPES typedef bsl::stack<typename CONTAINER::value_type, CONTAINER> Obj; // Type under test. private: // TYPES typedef typename Obj::value_type value_type; typedef typename Obj::reference reference; typedef typename Obj::const_reference const_reference; typedef typename Obj::size_type size_type; typedef CONTAINER container_type; // Shorthands public: typedef bsltf::TestValuesArray<value_type> TestValues; private: // TEST APPARATUS //------------------------------------------------------------------------- // The generating functions interpret the given 'spec' in order from left // to right to configure the object according to a custom language. // Uppercase letters [A..Z] correspond to arbitrary (but unique) char // values to be appended to the 'stack<VALUE, CONTAINER>' object. //.. // LANGUAGE SPECIFICATION: // ----------------------- // // <SPEC> ::= <EMPTY> | <LIST> // // <EMPTY> ::= // // <LIST> ::= <ITEM> | <ITEM><LIST> // // <ITEM> ::= <ELEMENT> | <CLEAR> // // <ELEMENT> ::= 'A' | 'B' | 'C' | 'D' | 'E' | ... | 'Z' // // unique but otherwise arbitrary // Spec String Description // ----------- ----------------------------------------------------------- // "" Has no effect; leaves the object empty. // "A" Insert the value corresponding to A. // "AA" Insert two values both corresponding to A. // "ABC" Insert three values corresponding to A, B and C. //.. //------------------------------------------------------------------------- static int ggg(Obj *object, const char *spec, int verbose = 1); // Configure the specified 'object' according to the specified 'spec', // using only the primary manipulator function 'insert' and white-box // manipulator 'clear'. Optionally specify a zero 'verbose' to // suppress 'spec' syntax error messages. Return the index of the // first invalid character, and a negative value otherwise. Note that // this function is used to implement 'gg' as well as allow for // verification of syntax error detection. static Obj& gg(Obj *object, const char *spec); // Return, by reference, the specified object with its value adjusted // according to the specified 'spec'. static Obj g(const char *spec); // Return, by value, a new object corresponding to the specified // 'spec'. static void emptyAndVerify(Obj *obj, const TestValues& testValues, size_t numTestValues, const int LINE); // Pop the elements out of 'obj', verifying that they exactly match // the first 'numTestValues' elements in 'testValues'. static bool typeAlloc() { return bslma::UsesBslmaAllocator<value_type>::value; } static bool emptyWillAlloc() { // Creating an empty 'deque' allocates memory, creating an empty // 'vector' does not. return bsl::is_same<CONTAINER, deque<value_type> >::value; } static bool use_same_allocator(Obj& object, int TYPE_ALLOC, bslma::TestAllocator *ta); // Return 'true' if the specified 'object' uses the specified 'ta' // allocator for supplying memory. The specified 'TYPE_ALLOC' // identifies, if 'object' uses allocator at all. Return 'false' if // object doesn't use 'ta'. public: // TEST CASES static void testCase19(); // Test 'noexcept' specifications #if !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES static void testCase18MoveOnlyType(); // Test move manipulators on move-only types static void testCase17MoveOnlyType(); // Test move manipulators on move-only types #endif // !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES template <bool PROPAGATE_ON_CONTAINER_MOVE_ASSIGNMENT_FLAG, bool OTHER_FLAGS> static void testCase18_propagate_on_container_move_assignment_dispatch(); static void testCase18_propagate_on_container_move_assignment(); // Test 'propagate_on_container_move_assignment'. static void testCase18(bool isMovableContainer); // Test move manipulators static void testCase17(bool isMovableContainer); // Test move constructors static void testCase12(); // Test inequality operators static void testCase11(); // Test type traits. static void testCase10(); // Test bslma::Allocator. template <bool PROPAGATE_ON_CONTAINER_COPY_ASSIGNMENT_FLAG, bool OTHER_FLAGS> static void testCase9_propagate_on_container_copy_assignment_dispatch(); static void testCase9_propagate_on_container_copy_assignment(); // Test 'propagate_on_container_copy_assignment'. static void testCase9(); // Test assignment operator ('operator='). template <bool PROPAGATE_ON_CONTAINER_SWAP_FLAG, bool OTHER_FLAGS> static void testCase8_propagate_on_container_swap_dispatch(); static void testCase8_propagate_on_container_swap(); // Test 'propagate_on_container_swap'. static void testCase8(); // Test 'swap' member. template <bool SELECT_ON_CONTAINER_COPY_CONSTRUCTION_FLAG, bool OTHER_FLAGS> static void testCase7_select_on_container_copy_construction_dispatch(); static void testCase7_select_on_container_copy_construction(); // Test 'select_on_container_copy_construction'. static void testCase7(); // Test copy constructor. static void testCase6(); // Test equality operator ('operator=='). static void testCase5(); // Reserved for (<<) operator. static void testCase4(); // Test basic accessors ('size' and 'top'). static void testCase3(); // Test generator functions 'ggg', and 'gg'. static void testCase2(); // Test primary manipulators ('push' and 'pop'). static void testCase1(int *testKeys, size_t numValues); // Breathing test. This test *exercises* basic functionality but // *test* nothing. static void testCase1_NoAlloc(int *testValues, size_t numValues); // Breathing test, except on a non-allocator container. This test // *exercises* basic functionality but *test* nothing. }; // ---------------- // class TestDriver // ---------------- template <class CONTAINER> bool TestDriver<CONTAINER>::use_same_allocator(Obj& object, int TYPE_ALLOC, bslma::TestAllocator *ta) { bslma::DefaultAllocatorGuard guard( &bslma::NewDeleteAllocator::singleton()); const TestValues VALUES; if (0 == TYPE_ALLOC) { // If 'VALUE' does not use allocator, return true. return true; // RETURN } const bsls::Types::Int64 BB = ta->numBlocksTotal(); const bsls::Types::Int64 B = ta->numBlocksInUse(); object.push(VALUES[0]); const bsls::Types::Int64 AA = ta->numBlocksTotal(); const bsls::Types::Int64 A = ta->numBlocksInUse(); if (BB + TYPE_ALLOC <= AA && B + TYPE_ALLOC <= A) { return true; // RETURN } if (veryVerbose) { Q(Did find expected allocator) P(ta->name()) } return false; } template <class CONTAINER> int TestDriver<CONTAINER>::ggg(Obj *object, const char *spec, int verbose) { bslma::DefaultAllocatorGuard guard( &bslma::NewDeleteAllocator::singleton()); const TestValues VALUES; enum { SUCCESS = -1 }; for (int i = 0; spec[i]; ++i) { if ('A' <= spec[i] && spec[i] <= 'Z') { object->push(VALUES[spec[i] - 'A']); } else { if (verbose) { printf("Error, bad character ('%c') " "in spec \"%s\" at position %d.\n", spec[i], spec, i); } // Discontinue processing this spec. return i; // RETURN } } return SUCCESS; } template <class CONTAINER> bsl::stack<typename CONTAINER::value_type, CONTAINER>& TestDriver<CONTAINER>::gg(Obj *object, const char *spec) { ASSERTV(ggg(object, spec) < 0); return *object; } template <class CONTAINER> bsl::stack<typename CONTAINER::value_type, CONTAINER> TestDriver<CONTAINER>::g(const char *spec) { Obj object((bslma::Allocator *)0); return gg(&object, spec); } template <class CONTAINER> void TestDriver<CONTAINER>::emptyAndVerify(Obj *obj, const TestValues& testValues, size_t numTestValues, const int LINE) { ASSERTV(LINE, numTestValues, obj->size(), numTestValues == obj->size()); for (int ti = static_cast<int>(numTestValues) - 1; ti >= 0; --ti) { ASSERTV(LINE, testValues[ti], obj->top(), testValues[ti] == obj->top()); obj->pop(); } ASSERTV(LINE, obj->size(), obj->empty()); ASSERTV(LINE, obj->size(), 0 == obj->size()); } template <class CONTAINER> void TestDriver<CONTAINER>::testCase19() { // ------------------------------------------------------------------------ // 'noexcept' SPECIFICATION // // Concerns: //: 1 The 'noexcept' specification has been applied to all class interfaces //: required by the standard. // // Plan: //: 1 Apply the uniary 'noexcept' operator to expressions that mimic those //: appearing in the standard and confirm that calculated boolean value //: matches the expected value. //: //: 2 Since the 'noexcept' specification does not vary with the 'TYPE' //: of the container, we need test for just one general type and any //: 'TYPE' specializations. // // Testing: // CONCERN: Methods qualifed 'noexcept' in standard are so implemented. // ------------------------------------------------------------------------ if (verbose) { P(bsls::NameOf<CONTAINER>()) } // N4594: 23.6.6.1 'stack' definition // page 905: //.. // void swap(stack& s) noexcept(is_nothrow_swappable_v<Container>) // { using std::swap; swap(c, s.c); } //.. { Obj c; Obj s; ASSERT(false == BSLS_KEYWORD_NOEXCEPT_OPERATOR(c.swap(s))); } // page 905 //.. // template <class T, class Container> // void swap(stack<T, Container>& x, stack<T, Container>& y) // noexcept(noexcept(x.swap(y))); //.. { Obj x; Obj y; ASSERT(false == BSLS_KEYWORD_NOEXCEPT_OPERATOR(swap(x, y))); } } #if !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES template <class CONTAINER> void TestDriver<CONTAINER>::testCase18MoveOnlyType() { // ------------------------------------------------------------------------ // MOVE MANIPULATORS FOR MOVE ONLY TYPES // // Concerns: //: 1 The implementation of the move manipulator methods do not rely on //: the (non-existent) copy construction or copy assignment methods of //: the contained type. // // Plan: //: 1 Instantiate this test method for the instrumented helper container //: class, 'MovableVector', using 'bsltf::MoveOnlyAllocTestType' for the //: contained value type. //: //: 2 Recast the tests of 'testCase18' so there is no reliance on copy //: construction or copy assignment. // // Testing: // operator=(MovableRef queue) // emplace(Args&&.. args) // push(MovableRef value) // ------------------------------------------------------------------------ enum { k_MAX_NUM_PARAMS = 10 }; typedef typename CONTAINER::value_type VALUE; const int TYPE_ALLOC = bslma::UsesBslmaAllocator<VALUE>::value; const bool is_special_container = SpecialContainerTrait<CONTAINER>::is_special_container; const bool is_copy_constructible = bsl::is_copy_constructible<VALUE>::value; if (verbose) { P_(bsls::NameOf<CONTAINER>()) P_(bsls::NameOf<VALUE>()) P_(is_special_container) P_(is_copy_constructible) P (TYPE_ALLOC) } ASSERT( is_special_container); ASSERT(!is_copy_constructible); if (verbose) { printf("Movable 'push'"); } { const CalledMethod expectedPushMethod = e_PUSH_BACK_MOVE; const int count = 3; Obj mX; const Obj& X = mX; // test object for 'push' for (int i = 0; i < count; ++i) { if (veryVerbose) { P(i) } static VALUE value0(VALUE(0)); setupCalledMethodCheck(); mX.push(MoveUtil::move(VALUE(i))); ASSERT(isCalledMethodCheckPassed<CONTAINER>(expectedPushMethod)); ASSERT(value0 == X.front()); ASSERT(VALUE(i) == X.back()); } } if (verbose) { printf("Movable 'operator='"); } { const CalledMethod expectedAssignMethod = e_ASSIGN_MOVE; const int count = 3; for (int i = 0; i < count; ++i) { if (veryVerbose) { P(i) } Obj mX; const Obj& X = mX; Obj mY; const Obj& Y = mY; for (int j = 0; j < i; ++j) { mX.push(VALUE(j)); mY.push(VALUE(j)); } Obj mZ; const Obj& Z = mZ; setupCalledMethodCheck(); mZ = MoveUtil::move(mX); ASSERTV( i, bsls::NameOf<CONTAINER>(), expectedAssignMethod, getCalledMethod(), isCalledMethodCheckPassed<CONTAINER>(expectedAssignMethod)); ASSERT(Y == Z); } } if (verbose) { printf("'emplace'"); } { Obj mA; const Obj& A = mA; // test object for 'emplace' Obj mB; const Obj& B = mB; // control object for 'emplace' (void) A; // Compiler warnings suppression. (void) B; // Compiler warnings suppression. for (int numArgs = 0; numArgs < k_MAX_NUM_PARAMS; ++numArgs) { if (veryVerbose) { P(numArgs) } VALUE *addressOfResult = 0; CalledMethod expectedEmplacePush = static_cast<CalledMethod>(static_cast<int>(e_EMPLACE_0) << numArgs); setupCalledMethodCheck(); switch (numArgs) { case 0: { VALUE& result = mA.emplace(); addressOfResult = bsls::Util::addressOf(result); } break; case 1: { VALUE& result = mA.emplace(0); addressOfResult = bsls::Util::addressOf(result); } break; case 2: { VALUE& result = mA.emplace(0, 0); addressOfResult = bsls::Util::addressOf(result); } break; case 3: { VALUE& result = mA.emplace(0, 0, 0); addressOfResult = bsls::Util::addressOf(result); } break; case 4: { VALUE& result = mA.emplace(0, 0, 0, 0); addressOfResult = bsls::Util::addressOf(result); } break; case 5: { VALUE& result = mA.emplace(0, 0, 0, 0, 0); addressOfResult = bsls::Util::addressOf(result); } break; case 6: { VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0); addressOfResult = bsls::Util::addressOf(result); } break; case 7: { VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0, 0); addressOfResult = bsls::Util::addressOf(result); } break; case 8: { VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0, 0, 0); addressOfResult = bsls::Util::addressOf(result); } break; case 9: { VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0, 0, 0, 0); addressOfResult = bsls::Util::addressOf(result); } break; case 10: { VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0, 0, 0, 0, 0); addressOfResult = bsls::Util::addressOf(result); } break; default: ASSERT(!"'value' not in range '[0, k_MAX_NUM_PARAMS]'"); } ASSERTV( numArgs, bsls::NameOf<CONTAINER>(), expectedEmplacePush, getCalledMethod(), isCalledMethodCheckPassed<CONTAINER>(expectedEmplacePush)); const VALUE *ADDRESS_OF_TOP_VALUE = bsls::Util::addressOf(A.top()); ASSERTV(numArgs, bsls::NameOf<CONTAINER>(), ADDRESS_OF_TOP_VALUE == addressOfResult); // Track expected value of 'A'. Note that the 'emplace' methods of // '(Non)?MovableVector' append 'VALUE(1)' regardless the number // and value of their arguments. mB.push(VALUE(1)); ASSERTV(A.size(), B.size(), B == A); } } } template <class CONTAINER> void TestDriver<CONTAINER>::testCase17MoveOnlyType() { // ------------------------------------------------------------------------ // MOVE CONSTRUCTORS FOR MOVE ONLY TYPES // // Concerns: //: 1 The implementation of the move constructors do not rely on the //: (non-existent) copy construction and copy assignment methods of the //: contained type. // // Plan: //: 1 Instantiate this test method for the instrumented helper container //: class, 'MovableVector', using 'bsltf::MoveOnlyAllocTestType' for the //: contained value type. //: //: 2 Recast the tests of 'testCase18' so there is no reliance on copy //: construction or copy assignment. // // Testing: // queue(MovableRef container); // queue(MovableRef original); // queue(MovableRef container, const ALLOCATOR& allocator); // queue(MovableRef original, const ALLOCATOR& allocator); // ------------------------------------------------------------------------ typedef typename CONTAINER::value_type VALUE; const int TYPE_ALLOC = bslma::UsesBslmaAllocator<VALUE>::value; const bool is_special_container = SpecialContainerTrait<CONTAINER>::is_special_container; const bool is_copy_constructible = bsl::is_copy_constructible<VALUE> ::value; if (verbose) { P_(bsls::NameOf<CONTAINER>()) P_(bsls::NameOf<VALUE>()) P_(is_special_container) P_(is_copy_constructible) P (TYPE_ALLOC) } ASSERT( is_special_container); ASSERT(!is_copy_constructible); { const int NUM_DATA = DEFAULT_NUM_DATA; const DefaultDataRow (&DATA)[NUM_DATA] = DEFAULT_DATA; const TestValues VALUES; for (int ti = 0; ti < NUM_DATA; ++ti) { const int LINE = DATA[ti].d_line; // source line number const char *const SPEC = DATA[ti].d_spec; if (veryVerbose) { T_ P_(LINE) P(SPEC); } bslma::TestAllocator da("default", veryVeryVeryVerbose); bslma::TestAllocator sa("source" , veryVeryVeryVerbose); for (char cfg = 'a'; cfg <= 'e'; ++cfg) { const char CONFIG = cfg; // how we call the constructor if (veryVerbose) { T_ T_ P(CONFIG); } // Create source object Obj *pX = new Obj(&sa); Obj& mX = *pX; const Obj& X = mX; // Create control object Obj mZ; const Obj& Z = mZ; // Create value ('CONTAINER') object CONTAINER mC(&sa); const CONTAINER& C = mC; // Install default allocator. bslma::DefaultAllocatorGuard dag(&da); bslma::TestAllocator ta("target", veryVeryVeryVerbose); bslma::TestAllocator fa("footprint", veryVeryVeryVerbose); Obj *objPtr; bslma::TestAllocator *objAllocatorPtr; (void)objAllocatorPtr; setupCalledMethodCheck(); CalledMethod expectedCtor; switch (CONFIG) { case 'a': { objPtr = new (fa) Obj(MoveUtil::move(mX)); objAllocatorPtr = &sa; expectedCtor = e_CTOR_MOV_SANS_ALLOC; } break; case 'b': { objPtr = new (fa) Obj(MoveUtil::move(mX), (bslma::Allocator *)0); objAllocatorPtr = &da; expectedCtor = e_CTOR_MOV_AVEC_ALLOC; } break; case 'c': { objPtr = new (fa) Obj(MoveUtil::move(mX), &ta); objAllocatorPtr = &ta; expectedCtor = e_CTOR_MOV_AVEC_ALLOC; } break; case 'd': { objPtr = new (fa) Obj(MoveUtil::move(mC)); objAllocatorPtr = &sa; expectedCtor = e_CTOR_MOV_SANS_ALLOC; } break; case 'e': { objPtr = new (fa) Obj(MoveUtil::move(mC), (bslma::Allocator *)0); objAllocatorPtr = &da; expectedCtor = e_CTOR_MOV_AVEC_ALLOC; } break; case 'f': { objPtr = new (fa) Obj(MoveUtil::move(mC), &ta); objAllocatorPtr = &ta; expectedCtor = e_CTOR_MOV_AVEC_ALLOC; } break; default: { ASSERTV(LINE, SPEC, CONFIG, !"Bad constructor config."); return; // RETURN } break; } Obj& mY = *objPtr; const Obj& Y = mY; // test object ASSERTV( bsls::NameOf<CONTAINER>(), LINE, SPEC, expectedCtor, getCalledMethod(), true == isCalledMethodCheckPassed<CONTAINER>(expectedCtor)); ASSERTV(LINE, SPEC, CONFIG, sizeof(Obj) == fa.numBytesInUse()); // Reclaim dynamically allocated source object. delete pX; // Reclaim dynamically allocated object under test. fa.deleteObject(objPtr); // Verify all memory is released on object destruction. ASSERTV(LINE, SPEC, CONFIG, fa.numBlocksInUse(), 0 == fa.numBlocksInUse()); ASSERTV(LINE, SPEC, CONFIG, ta.numBlocksInUse(), 0 == ta.numBlocksInUse()); } ASSERTV(LINE, SPEC, da.numBlocksInUse(), 0 == da.numBlocksInUse()); ASSERTV(LINE, SPEC, sa.numBlocksInUse(), 0 == sa.numBlocksInUse()); } } } #endif // !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES template <class CONTAINER> template <bool PROPAGATE_ON_CONTAINER_MOVE_ASSIGNMENT_FLAG, bool OTHER_FLAGS> void TestDriver<CONTAINER>:: testCase18_propagate_on_container_move_assignment_dispatch() { typedef typename CONTAINER::value_type VALUE; // Set the three properties of 'bsltf::StdStatefulAllocator' that are not // under test in this test case to 'false'. typedef bsltf::StdStatefulAllocator< VALUE, OTHER_FLAGS, OTHER_FLAGS, OTHER_FLAGS, PROPAGATE_ON_CONTAINER_MOVE_ASSIGNMENT_FLAG> StdAlloc; typedef bsl::deque<VALUE, StdAlloc> CObj; typedef bsl::stack<VALUE, CObj> Obj; const bool PROPAGATE = PROPAGATE_ON_CONTAINER_MOVE_ASSIGNMENT_FLAG; static const char *SPECS[] = { "", "A", "BC", "CDE", }; const int NUM_SPECS = static_cast<const int>(sizeof SPECS / sizeof *SPECS); bslma::TestAllocator da("default", veryVeryVeryVerbose); bslma::DefaultAllocatorGuard dag(&da); // Create control and source objects. for (int ti = 0; ti < NUM_SPECS; ++ti) { const char *const ISPEC = SPECS[ti]; const size_t ILENGTH = strlen(ISPEC); TestValues IVALUES(ISPEC); bslma::TestAllocator oas("source", veryVeryVeryVerbose); bslma::TestAllocator oat("target", veryVeryVeryVerbose); StdAlloc mas(&oas); StdAlloc mat(&oat); StdAlloc scratch(&da); const CObj CI(IVALUES.begin(), IVALUES.end(), scratch); const Obj W(CI, scratch); // control // Create target object. for (int tj = 0; tj < NUM_SPECS; ++tj) { const char *const JSPEC = SPECS[tj]; const size_t JLENGTH = strlen(JSPEC); TestValues JVALUES(JSPEC); { Obj mY(CI, mas); const Obj& Y = mY; if (veryVerbose) { T_ P_(ISPEC) P_(Y) P(W) } const CObj CJ(JVALUES.begin(), JVALUES.end(), scratch); Obj mX(CJ, mat); const Obj& X = mX; bslma::TestAllocatorMonitor oasm(&oas); bslma::TestAllocatorMonitor oatm(&oat); Obj *mR = &(mX = MoveUtil::move(mY)); ASSERTV(ISPEC, JSPEC, W, X, W == X); ASSERTV(ISPEC, JSPEC, mR, &mX, mR == &mX); // TBD no 'get_allocator' in 'stack' #if 0 ASSERTV(ISPEC, JSPEC, PROPAGATE, !PROPAGATE == (mat == X.get_allocator())); ASSERTV(ISPEC, JSPEC, PROPAGATE, PROPAGATE == (mas == X.get_allocator())); ASSERTV(ISPEC, JSPEC, mas == Y.get_allocator()); #endif if (PROPAGATE) { ASSERTV(ISPEC, JSPEC, 0 == oat.numBlocksInUse()); } else { ASSERTV(ISPEC, JSPEC, oasm.isInUseSame()); } } ASSERTV(ISPEC, 0 == oas.numBlocksInUse()); ASSERTV(ISPEC, 0 == oat.numBlocksInUse()); } } ASSERTV(0 == da.numBlocksInUse()); } template <class CONTAINER> void TestDriver<CONTAINER>::testCase18_propagate_on_container_move_assignment() { // ------------------------------------------------------------------------ // MOVE-ASSIGNMENT OPERATOR: ALLOCATOR PROPAGATION // // Concerns: //: 1 If the 'propagate_on_container_move_assignment' trait is 'false', the //: allocator used by the target object remains unchanged (i.e., the //: source object's allocator is *not* propagated). //: //: 2 If the 'propagate_on_container_move_assignment' trait is 'true', the //: allocator used by the target object is updated to be a copy of that //: used by the source object (i.e., the source object's allocator *is* //: propagated). //: //: 3 The allocator used by the source object remains unchanged whether or //; not it is propagated to the target object. //: //: 4 If the allocator is propagated from the source object to the target //: object, all memory allocated from the target object's original //: allocator is released. //: //: 5 The effect of the 'propagate_on_container_move_assignment' trait is //: independent of the other three allocator propagation traits. // // Plan: //: 1 Specify a set S of object values with varied differences, ordered by //: increasing length, to be used in the following tests. //: //: 2 Create two 'bsltf::StdStatefulAllocator' objects with their //: 'propagate_on_container_move_assignment' property configured to //: 'false'. In two successive iterations of P-3, first configure the //: three properties not under test to be 'false', then configure them //: all to be 'true'. //: //: 3 For each value '(x, y)' in the cross product S x S: (C-1) //: //: 1 Initialize an object 'X' from 'x' using one of the allocators from //: P-2. //: //: 2 Initialize two objects from 'y', a control object 'W' using a //: scratch allocator and an object 'Y' using the other allocator from //: P-2. //: //: 3 Move-assign 'Y' to 'X' and use 'operator==' to verify that 'X' //: subsequently has the same value as 'W'. //: //: 4 Use the 'get_allocator' method to verify that the allocator of 'Y' //: is *not* propagated to 'X' and that the allocator used by 'Y' //: remains unchanged. (C-1) //: //: 4 Repeat P-2..3 except that this time configure the allocator property //: under test to 'true' and verify that the allocator of 'Y' *is* //: propagated to 'X'. Also verify that all memory is released to the //: allocator that was in use by 'X' prior to the assignment. (C-2..5) // // Testing: // propagate_on_container_move_assignment // ------------------------------------------------------------------------ if (verbose) printf("\nMOVE-ASSIGNMENT OPERATOR: ALLOCATOR PROPAGATION" "\n===============================================\n"); if (verbose) printf("\n'propagate_on_container_move_assignment::value == false'\n"); testCase18_propagate_on_container_move_assignment_dispatch<false, false>(); testCase18_propagate_on_container_move_assignment_dispatch<false, true>(); if (verbose) printf("\n'propagate_on_container_move_assignment::value == true'\n"); testCase18_propagate_on_container_move_assignment_dispatch<true, false>(); testCase18_propagate_on_container_move_assignment_dispatch<true, true>(); } template <class CONTAINER> void TestDriver<CONTAINER>::testCase18(bool isMovableContainer) { // ------------------------------------------------------------------------ // MOVE MANIPULATORS: // // Concerns: //: 1 Each of the methods under test correctly forwards its arguments //: to the corresponding method of the underlying 'CONTAINER' when //: that container provides those "move" methods, and to the expected //: alternate methods otherwise. //: //: 2 The reference returned from the assignment operator is to the target //: object (i.e., '*this'). //: //: 3 'emplace_back' returns a reference to the inserted element. // // Plan: //: 1 Instantiate this test method for the two instrumented helper //: container classes: 'NonMovableVector' and 'MovableVector'. //: //: 2 Use loop-based tests that iterate for a small number of values. //: Use 3 different values for the 'push' and assignment tests. The //: 'emplace' tests a different number of parameters on each test. //: Those require 10 iterations to address each of the 10 overloads //: used when CPP11 support is not available. //: //: 3 For each test create a "control" object that has the expected //: value of the object under test. Create the control object using //: the previously tested (non-moveable) 'push' method. //: //: 4 Invoke the method under test on the object under test. Confirm //: that the expected enumerated value was set in the global variable. //: Confirm that the test object has the expected value. Confirm that //: the expected value is returned (if any). // // Testing: // operator=(MovableRef stack) // emplace(Args&&.. args) // push(MovableRef value) // ------------------------------------------------------------------------ typedef typename CONTAINER::value_type VALUE; enum { k_MAX_NUM_PARAMS = 10 }; const int TYPE_ALLOC = bslma::UsesBslmaAllocator<VALUE>::value; const bool is_special_container = SpecialContainerTrait<CONTAINER>::is_special_container; const TestValues VALUES; if (verbose) { P_(bsls::NameOf<CONTAINER>()) P_(bsls::NameOf<VALUE>()) P_(is_special_container) P (TYPE_ALLOC) } ASSERT(is_special_container); if (verbose) { printf("Movable 'push'"); } { Obj mX; const Obj& X = mX; // test object for 'push' Obj mY; const Obj& Y = mY; // control object for 'push' CalledMethod expectedPushMethod = isMovableContainer ? e_PUSH_BACK_MOVE : e_PUSH_BACK_CREF; for (int i = 0; i < 3; ++i) { if (veryVerbose) { P(i) } VALUE value = VALUES[i]; VALUE valueToBeMoved = value; setupCalledMethodCheck(); mX.push(MoveUtil::move(valueToBeMoved)); ASSERT(isCalledMethodCheckPassed<CONTAINER>(expectedPushMethod)); setupCalledMethodCheck(); mY.push( value); ASSERT(isCalledMethodCheckPassed<CONTAINER>(e_PUSH_BACK_CREF)); ASSERT(Y == X); } } if (verbose) { printf("Movable 'operator='"); } { CalledMethod expectedAssignMethod = isMovableContainer ? e_ASSIGN_MOVE : e_ASSIGN_CREF; Obj mX; const Obj& X = mX; // test object for 'push' for (int i = 0; i < 3; ++i) { if (veryVerbose) { P(i) } VALUE value = VALUES[i]; Obj mU; const Obj& U = mU; // test object Obj mV; const Obj& V = mV; // control object mX.push(value); Obj mT(X); // sacrifice object Obj *mR = 0; setupCalledMethodCheck(); mR = &(mU = MoveUtil::move(mT)); ASSERTV(bsls::Util::addressOf(U) == mR); ASSERTV( i, bsls::NameOf<CONTAINER>(), expectedAssignMethod, getCalledMethod(), isCalledMethodCheckPassed<CONTAINER>(expectedAssignMethod)); ASSERT(U == X); setupCalledMethodCheck(); mV = X; ASSERTV( i, bsls::NameOf<CONTAINER>(), expectedAssignMethod, getCalledMethod(), isCalledMethodCheckPassed<CONTAINER>(e_ASSIGN_CREF)); ASSERT(V == X); ASSERT(U == V); } } if (verbose) { printf("'emplace'"); } { Obj mA; const Obj& A = mA; // test object for 'emplace' Obj mB; const Obj& B = mB; // control object for 'emplace' for (int value = 0; value < k_MAX_NUM_PARAMS; ++value) { if (veryVerbose) { P(value) } CalledMethod expectedEmplacePush = static_cast<CalledMethod>(static_cast<int>(e_EMPLACE_0) << value); setupCalledMethodCheck(); VALUE *addressOfResult = 0; switch (value) { case 0: { VALUE& result = mA.emplace(); addressOfResult = bsls::Util::addressOf(result); } break; case 1: { VALUE& result = mA.emplace(0); addressOfResult = bsls::Util::addressOf(result); } break; case 2: { VALUE& result = mA.emplace(0, 0); addressOfResult = bsls::Util::addressOf(result); } break; case 3: { VALUE& result = mA.emplace(0, 0, 0); addressOfResult = bsls::Util::addressOf(result); } break; case 4: { VALUE& result = mA.emplace(0, 0, 0, 0); addressOfResult = bsls::Util::addressOf(result); } break; case 5: { VALUE& result = mA.emplace(0, 0, 0, 0, 0); addressOfResult = bsls::Util::addressOf(result); } break; case 6: { VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0); addressOfResult = bsls::Util::addressOf(result); } break; case 7: { VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0, 0); addressOfResult = bsls::Util::addressOf(result); } break; case 8: { VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0, 0, 0); addressOfResult = bsls::Util::addressOf(result); } break; case 9: { VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0, 0, 0, 0); addressOfResult = bsls::Util::addressOf(result); } break; case 10: { VALUE& result = mA.emplace(0, 0, 0, 0, 0, 0, 0, 0, 0, 0); addressOfResult = bsls::Util::addressOf(result); } break; default: ASSERT(!"'value' not in range '[0, k_MAX_NUM_PARAMS]'"); } const VALUE *ADDRESS_OF_TOP = bsls::Util::addressOf(A.top()); ASSERTV(ADDRESS_OF_TOP == addressOfResult); ASSERTV( value, bsls::NameOf<CONTAINER>(), expectedEmplacePush, getCalledMethod(), isCalledMethodCheckPassed<CONTAINER>(expectedEmplacePush)); // Track expected value of 'A'. Note that the 'emplace' methods of // '(Non)?MovableVector' append 'VALUE(1)' regardless the number // and value of their arguments. mB.push(VALUE(1)); ASSERTV(A.size(), B.size(), B == A); } } } template <class CONTAINER> void TestDriver<CONTAINER>::testCase17(bool isMovableContainer) { // ------------------------------------------------------------------------ // MOVE CONSTRUCTORS: // Ensure that we can construct any object of the class, having other // object of the class as the source. To provide backward compatibility, // copy copnstructor should be used in the absence of move constructor. // We are going to use two special containers 'NonMovableVector' and // 'MovableVector', that register called method, to verify it. // // Concerns: //: 1 Appropriate constructor of underlying container (move or copy) is //: called. //: //: 2 The new object has the same value as the source object. //: //: 3 All internal representations of a given value can be used to create a //: new object of equivalent value. //: //: 4 The source object is left in a valid but unspecified state. //: //: 5 No additional memory is allocated by the target object. //: //: 5 If an allocator is NOT supplied to the constructor, the //: allocator of the source object in effect at the time of construction //: becomes the object allocator for the resulting object. //: //: 6 If an allocator IS supplied to the constructor, that //: allocator becomes the object allocator for the resulting object. //: //: 7 If a null allocator address IS supplied to the constructor, the //: default allocator in effect at the time of construction becomes //: the object allocator for the resulting object. //: //: 8 Supplying an allocator to the constructor has no effect on subsequent //: object values. //: //: 9 Subsequent changes to or destruction of the source object have no //: effect on the move-constructed object and vice-versa. //: //:10 Every object releases any allocated memory at destruction. // // Plan: //: 1 Using the table-driven technique: //: //: 1 Specify a set of (unique) valid source object values. //: //: 2 Specify a set of (unique) valid value ('CONTAINER') objects. //: //: 2 For each row (representing a distinct object value, 'V') in the table //: described in P-1: //: //: 1 Execute an inner loop creating three distinct objects, in turn, //: each object having the same value, 'V', but configured differently //: identified by 'CONFIG': //: //: 'a': passing a source object without passing an allocator; //: //: 'b': passing a source object and an explicit null allocator; //: //: 'c': passing a source object and the address of a test allocator //: distinct from the default and source object's allocators. //: //: 'd': passing a value object without passing an allocator; //: //: 'e': passing a value object and an explicit null allocator; //: //: 'f': passing a value object and the address of a test allocator //: distinct from the default and source object's allocators. //: //: 2 For each of the four iterations in P-2.1: //: //: 1 Use the value constructor with 'sa' allocator to create dynamic //: source object 'mX' and control object 'mZ', each having the value //: 'V'. //: //: 2 Create a 'bslma_TestAllocator' object, and install it as the //: default allocator (note that a ubiquitous test allocator is //: already installed as the global allocator). //: //: 3 Choose the move constructor depending on 'CONFIG' to dynamically //: create an object, 'mY', using movable reference of 'mX'. //: //: 4 Verify that the appropriate constructor of underlying container //: has been called. Note that this check is skipped for all classes //: except special containers 'NonMovableVector' and 'MovableVector'. //: (C-1) //: //: 5 Use the appropriate test allocator to verify that no additional //: memory is allocated by the target object. (C-5) //: //: 6 Use the helper function 'use_same_allocator' to verify each //: underlying attribute capable of allocating memory to ensure //: that its object allocator is properly installed. (C-6..9) //: //: 7 Use the helper function 'use_same_comparator' to verify that the //: target object, 'mY', has the same comparator as that of 'mZ', to //: ensure that new object comprator is properly installed. (C-2..3) //: //: 8 Add some values to the source and target object separately. //: Verify that they change independently. Destroy source object. //: Verify that target object is unaffected. (C-4, 10) //: //: 9 Delete the target object and let the control object go out of //: scope to verify, that all memory has been released. (C-11) // // Testing: // stack(MovableRef container) // stack(MovableRef container, bslma::Allocator *bA) // stack(MovableRef stack) // stack(MovableRef stack, bslma::Allocator *bA) // ------------------------------------------------------------------------ typedef typename CONTAINER::value_type VALUE; const int TYPE_ALLOC = bslma::UsesBslmaAllocator<VALUE>::value; const bool is_special_container = SpecialContainerTrait<CONTAINER>::is_special_container; if (verbose) { P_(bsls::NameOf<CONTAINER>()) P_(bsls::NameOf<VALUE>()) P_(is_special_container) P (TYPE_ALLOC) } { ASSERT(is_special_container); const int NUM_DATA = DEFAULT_NUM_DATA; const DefaultDataRow (&DATA)[NUM_DATA] = DEFAULT_DATA; const TestValues VALUES; for (int ti = 0; ti < NUM_DATA; ++ti) { const int LINE = DATA[ti].d_line; // source line number const char *const SPEC = DATA[ti].d_spec; if (veryVerbose) { T_ P_(LINE) P(SPEC); } bslma::TestAllocator da("default", veryVeryVeryVerbose); bslma::TestAllocator sa("source" , veryVeryVeryVerbose); for (char cfg = 'a'; cfg <= 'e'; ++cfg) { const char CONFIG = cfg; // how we call the constructor if (veryVerbose) { T_ T_ P(CONFIG); } // Create source object Obj *pX = new Obj(&sa); Obj& mX = gg(pX, SPEC); const Obj& X = mX; // Create control object Obj mZ; const Obj& Z = gg(&mZ, SPEC); // Create value ('CONTAINER') object CONTAINER mC(&sa); const CONTAINER& C = CONTAINER::GG(&mC, SPEC); // Install default allocator. bslma::DefaultAllocatorGuard dag(&da); bslma::TestAllocator ta("target", veryVeryVeryVerbose); bslma::TestAllocator fa("footprint", veryVeryVeryVerbose); Obj *objPtr; bslma::TestAllocator *objAllocatorPtr; setupCalledMethodCheck(); CalledMethod expectedCtor; switch (CONFIG) { case 'a': { objPtr = new (fa) Obj(MoveUtil::move(mX)); objAllocatorPtr = isMovableContainer ? &sa : &da; expectedCtor = isMovableContainer ? e_CTOR_MOV_SANS_ALLOC : e_CTOR_CPY_SANS_ALLOC; } break; case 'b': { objPtr = new (fa) Obj(MoveUtil::move(mX), (bslma::Allocator *)0); objAllocatorPtr = &da; expectedCtor = isMovableContainer ? e_CTOR_MOV_AVEC_ALLOC : e_CTOR_CPY_AVEC_ALLOC; } break; case 'c': { objPtr = new (fa) Obj(MoveUtil::move(mX), &ta); objAllocatorPtr = &ta; expectedCtor = isMovableContainer ? e_CTOR_MOV_AVEC_ALLOC : e_CTOR_CPY_AVEC_ALLOC; } break; case 'd': { objPtr = new (fa) Obj(MoveUtil::move(mC)); objAllocatorPtr = isMovableContainer ? &sa : &da; expectedCtor = isMovableContainer ? e_CTOR_MOV_SANS_ALLOC : e_CTOR_CPY_SANS_ALLOC; } break; case 'e': { objPtr = new (fa) Obj(MoveUtil::move(mC), (bslma::Allocator *)0); objAllocatorPtr = &da; expectedCtor = isMovableContainer ? e_CTOR_MOV_AVEC_ALLOC : e_CTOR_CPY_AVEC_ALLOC; } break; case 'f': { objPtr = new (fa) Obj(MoveUtil::move(mC), &ta); objAllocatorPtr = &ta; expectedCtor = isMovableContainer ? e_CTOR_MOV_AVEC_ALLOC : e_CTOR_CPY_AVEC_ALLOC; } break; default: { ASSERTV(LINE, SPEC, CONFIG, !"Bad constructor config."); return; // RETURN } break; } Obj& mY = *objPtr; const Obj& Y = mY; ASSERTV( bsls::NameOf<CONTAINER>(), LINE, SPEC, expectedCtor, getCalledMethod(), true == isCalledMethodCheckPassed<CONTAINER>(expectedCtor)); ASSERTV(LINE, SPEC, CONFIG, sizeof(Obj) == fa.numBytesInUse()); // Verify correctness of the contents moving. ASSERTV(LINE, SPEC, CONFIG, Z == Y); // Verify any attribute allocators are installed properly. ASSERTV(LINE, SPEC, CONFIG, use_same_allocator( mY, TYPE_ALLOC, objAllocatorPtr)); // Verify independence of the target object from the source // one. size_t sourceSize = X.size(); size_t targetSize = Y.size(); mX.push(VALUES[0]); ASSERTV(LINE, SPEC, CONFIG, sourceSize != X.size()); ASSERTV(LINE, SPEC, CONFIG, targetSize == Y.size()); sourceSize = X.size(); mY.push(VALUES[0]); ASSERTV(LINE, SPEC, CONFIG, sourceSize == X.size()); ASSERTV(LINE, SPEC, CONFIG, targetSize != Y.size()); targetSize = Y.size(); const VALUE top = Y.top(); // Reclaim dynamically allocated source object. delete pX; ASSERTV(LINE, SPEC, CONFIG, top == Y.top()); ASSERTV(LINE, SPEC, CONFIG, targetSize == Y.size()); // Reclaim dynamically allocated object under test. fa.deleteObject(objPtr); // Verify all memory is released on object destruction. ASSERTV(LINE, SPEC, CONFIG, fa.numBlocksInUse(), 0 == fa.numBlocksInUse()); ASSERTV(LINE, SPEC, CONFIG, ta.numBlocksInUse(), 0 == ta.numBlocksInUse()); } ASSERTV(LINE, SPEC, da.numBlocksInUse(), 0 == da.numBlocksInUse()); ASSERTV(LINE, SPEC, sa.numBlocksInUse(), 0 == sa.numBlocksInUse()); } } } template <class CONTAINER> void TestDriver<CONTAINER>::testCase12() { // ------------------------------------------------------------------------ // TESTING INEQUALITY OPERATORS // // Concern: // That the inequality operators function correctly. // // Plan: // Load 2 stack objects according to two SPEC's via the 'ggg' function, // and compare them. It turns out that 'strcmp' comparing the two // 'SPEC's will correspond directly to the result of inequality // operators, which is very convenient. // // Repeat the test a second time, with the second stack object created // with a different allocator than the first, to verify that creation // via different allocators has no impact on value. // ------------------------------------------------------------------------ const char *cont = ContainerName<container_type>::name(); const int NUM_DATA = DEFAULT_NUM_DATA; const DefaultDataRow (&DATA)[NUM_DATA] = DEFAULT_DATA; bslma::TestAllocator ta("testA", veryVeryVeryVerbose); bslma::TestAllocator tb("testB", veryVeryVeryVerbose); bslma::TestAllocator da("default", veryVeryVeryVerbose); bslma::DefaultAllocatorGuard dag(&da); if (veryVerbose) printf(" %s ---------------------------", cont); { // Create first object for (int ti = 0; ti < NUM_DATA; ++ti) { const char *const SPECX = DATA[ti].d_spec; Obj mX(&ta); const Obj& X = gg(&mX, SPECX); for (int tj = 0; tj < NUM_DATA; ++tj) { const char *const SPECY = DATA[tj].d_spec; Obj mY(&ta); const Obj& Y = gg(&mY, SPECY); const int CMP = ti == tj ? 0 : strcmp(SPECX, SPECY) > 0 ? 1 : -1; const bool EQ = X == Y; const bool NE = X != Y; const bool LT = X < Y; const bool LE = X <= Y; const bool GT = X > Y; const bool GE = X >= Y; ASSERTV(cont, SPECX, SPECY, EQ == (Y == X)); ASSERTV(cont, SPECX, SPECY, NE == (Y != X)); ASSERTV(cont, SPECX, SPECY, LT == (Y > X)); ASSERTV(cont, SPECX, SPECY, LE == (Y >= X)); ASSERTV(cont, SPECX, SPECY, GT == (Y < X)); ASSERTV(cont, SPECX, SPECY, GE == (Y <= X)); ASSERTV(cont, SPECX, SPECY, LT == !GE); ASSERTV(cont, SPECX, SPECY, GT == !LE); ASSERTV(cont, SPECX, SPECY, !(LT && GT)); ASSERTV(cont, SPECX, SPECY, LE || GE); if (0 == CMP) { ASSERTV(cont, SPECX, SPECY, !LT && !GT); ASSERTV(cont, SPECX, SPECY, LE && GE); } else { ASSERTV(cont, SPECX, SPECY, LT || GT); } ASSERTV(cont, SPECX, SPECY, CMP, (CMP < 0) == LT); ASSERTV(cont, SPECX, SPECY, CMP, (CMP < 0) == !GE); ASSERTV(cont, SPECX, SPECY, CMP, !((CMP == 0) && LT)); ASSERTV(cont, SPECX, SPECY, CMP, !((CMP == 0) && GT)); ASSERTV(cont, SPECX, SPECY, CMP, (CMP > 0) == GT); ASSERTV(cont, SPECX, SPECY, CMP, (CMP > 0) == !LE); ASSERTV(cont, SPECX, SPECY, CMP, (CMP == 0) == EQ); ASSERTV(cont, SPECX, SPECY, CMP, (CMP != 0) == NE); } // Do it all over again, this time using a different allocator // for 'mY' to verify changing the allocator has no impact on // comparisons. Note we are re-testing the equality comparators // so this memory allocation aspect is tested for them too. for (int tj = 0; tj < NUM_DATA; ++tj) { const char *const SPECY = DATA[tj].d_spec; Obj mY(g(SPECY), &tb); const Obj& Y = mY; const int CMP = ti == tj ? 0 : strcmp(SPECX, SPECY) > 0 ? 1 : -1; const bool EQ = X == Y; const bool NE = X != Y; const bool LT = X < Y; const bool LE = X <= Y; const bool GT = X > Y; const bool GE = X >= Y; ASSERTV(cont, SPECX, SPECY, EQ == (Y == X)); ASSERTV(cont, SPECX, SPECY, NE == (Y != X)); ASSERTV(cont, SPECX, SPECY, LT == (Y > X)); ASSERTV(cont, SPECX, SPECY, LE == (Y >= X)); ASSERTV(cont, SPECX, SPECY, GT == (Y < X)); ASSERTV(cont, SPECX, SPECY, GE == (Y <= X)); ASSERTV(cont, SPECX, SPECY, LT == !GE); ASSERTV(cont, SPECX, SPECY, GT == !LE); ASSERTV(cont, SPECX, SPECY, !(LT && GT)); ASSERTV(cont, SPECX, SPECY, LE || GE); if (EQ) { ASSERTV(cont, SPECX, SPECY, !LT && !GT); ASSERTV(cont, SPECX, SPECY, LE && GE); } else { ASSERTV(cont, SPECX, SPECY, LT || GT); } ASSERTV(cont, SPECX, SPECY, CMP, (CMP < 0) == LT); ASSERTV(cont, SPECX, SPECY, CMP, (CMP < 0) == !GE); ASSERTV(cont, SPECX, SPECY, CMP, !((CMP == 0) && LT)); ASSERTV(cont, SPECX, SPECY, CMP, !((CMP == 0) && GT)); ASSERTV(cont, SPECX, SPECY, CMP, (CMP > 0) == GT); ASSERTV(cont, SPECX, SPECY, CMP, (CMP > 0) == !LE); ASSERTV(cont, SPECX, SPECY, CMP, (CMP == 0) == EQ); ASSERTV(cont, SPECX, SPECY, CMP, (CMP != 0) == NE); } } } } template <class CONTAINER> void TestDriver<CONTAINER>::testCase11() { // ------------------------------------------------------------------------ // TESTING TYPE TRAITS // // Concern: //: 1 The object has the necessary type traits. // // Plan: //: 1 Use 'BSLMF_ASSERT' to verify all the type traits exists. (C-1) // // Testing: // CONCERN: The object has the necessary type traits // ------------------------------------------------------------------------ // Verify set defines the expected traits. enum { CONTAINER_USES_ALLOC = bslma::UsesBslmaAllocator<CONTAINER>::value }; BSLMF_ASSERT( ((int) CONTAINER_USES_ALLOC == bslma::UsesBslmaAllocator<Obj>::value)); // Verify stack does not define other common traits. BSLMF_ASSERT((0 == bslalg::HasStlIterators<Obj>::value)); BSLMF_ASSERT((0 == bsl::is_trivially_copyable<Obj>::value)); BSLMF_ASSERT((0 == bslmf::IsBitwiseEqualityComparable<Obj>::value)); BSLMF_ASSERT((0 == bslmf::IsBitwiseMoveable<Obj>::value)); BSLMF_ASSERT((0 == bslmf::HasPointerSemantics<Obj>::value)); BSLMF_ASSERT((0 == bsl::is_trivially_default_constructible<Obj>::value)); } template <class CONTAINER> void TestDriver<CONTAINER>::testCase10() { // ------------------------------------------------------------------------ // TESTING BSLMA ALLOCATOR // // Concern: //: 1 A standard compliant allocator can be used instead of //: 'bsl::allocator'. //: //: 2 Methods that uses the allocator (e.g., variations of constructor, //: 'insert' and 'swap') can successfully populate the object. //: //: 3 'KEY' types that allocate memory uses the default allocator instead //: of the object allocator. //: //: 4 Every object releases any allocated memory at destruction. // // Plan: //: 1 Using a loop base approach, create a list of specs and their //: expected value. For each spec: //: //: 1 Create an object using a standard allocator through multiple ways, //: including: range-based constructor, copy constructor, range-based //: insert, multiple inserts, and swap. //: //: 2 Verify the value of each objects is as expected. //: //: 3 For types that allocate memory, verify memory for the elements //: comes from the default allocator. // // Testing: // CONCERN: 'set' is compatible with a standard allocator. // ------------------------------------------------------------------------ const char *cont = ContainerName<container_type>::name(); const size_t NUM_DATA = DEFAULT_NUM_DATA; const DefaultDataRow (&DATA)[NUM_DATA] = DEFAULT_DATA; bslma::TestAllocator scratch("scratch", veryVeryVeryVerbose); for (size_t ti = 0; ti < NUM_DATA; ++ti) { const int LINE = DATA[ti].d_line; const char *const SPEC = DATA[ti].d_spec; const size_t LENGTH = strlen(DATA[ti].d_spec); const TestValues EXP(DATA[ti].d_spec, &scratch); TestValues values(SPEC, &scratch); bslma::TestAllocator ta("test", veryVeryVeryVerbose); bslma::TestAllocatorMonitor tam(&ta); bslma::TestAllocator da("default", veryVeryVeryVerbose); bslma::DefaultAllocatorGuard dag(&da); bslma::TestAllocatorMonitor dam(&da); { container_type tmpCont(&ta); for (size_t tk = 0; tk < LENGTH; ++tk) { tmpCont.push_back(values[tk]); } Obj mX(tmpCont, &ta); const Obj& X = mX; verifyStack(X, EXP, LENGTH, L_, &ta); Obj mY(X, &ta); const Obj& Y = mY; verifyStack(Y, EXP, LENGTH, L_, &ta); Obj mZ(&ta); const Obj& Z = mZ; mZ.swap(mX); verifyStack(Z, EXP, LENGTH, L_, &ta); ASSERTV(LINE, X.empty()); ASSERTV(LINE, 0 == X.size()); } ASSERT(tam.isTotalUp() || 0 == LENGTH); ASSERT(tam.isInUseSame()); tam.reset(); { Obj mX(&ta); const Obj& X = mX; for (size_t tj = 0; tj < LENGTH; ++tj) { mX.push(values[tj]); ASSERTV(LINE, tj, LENGTH, values[tj] == X.top()); } verifyStack(X, EXP, LENGTH, L_, &ta); } ASSERT(tam.isTotalUp() || 0 == LENGTH); ASSERT(tam.isInUseSame()); ASSERT(dam.isTotalSame()); { container_type tmpCont; for (size_t tk = 0; tk < LENGTH; ++tk) { tmpCont.push_back(values[tk]); } Obj mX(tmpCont); const Obj& X = mX; verifyStack(X, EXP, LENGTH, L_); Obj mY(X); const Obj& Y = mY; verifyStack(Y, EXP, LENGTH, L_); Obj mZ; const Obj& Z = mZ; mZ.swap(mX); verifyStack(Z, EXP, LENGTH, L_); ASSERTV(LINE, X.empty()); ASSERTV(LINE, 0 == X.size()); } ASSERTV(cont, dam.isTotalUp() == (emptyWillAlloc() || LENGTH > 0)); dam.reset(); { Obj mX; const Obj& X = mX; for (size_t tj = 0; tj < LENGTH; ++tj) { mX.push(values[tj]); ASSERTV(LINE, tj, LENGTH, values[tj] == X.top()); } verifyStack(X, EXP, LENGTH, L_); } ASSERTV(cont, dam.isTotalUp() == (emptyWillAlloc() || LENGTH > 0)); ASSERTV(LINE, da.numBlocksInUse(), 0 == da.numBlocksInUse()); } } template <class CONTAINER> template <bool PROPAGATE_ON_CONTAINER_COPY_ASSIGNMENT_FLAG, bool OTHER_FLAGS> void TestDriver<CONTAINER>:: testCase9_propagate_on_container_copy_assignment_dispatch() { typedef typename CONTAINER::value_type VALUE; // Set the three properties of 'bsltf::StdStatefulAllocator' that are not // under test in this test case to 'false'. typedef bsltf::StdStatefulAllocator< VALUE, OTHER_FLAGS, PROPAGATE_ON_CONTAINER_COPY_ASSIGNMENT_FLAG, OTHER_FLAGS, OTHER_FLAGS> StdAlloc; typedef bsl::deque<VALUE, StdAlloc> CObj; typedef bsl::stack<VALUE, CObj> Obj; const bool PROPAGATE = PROPAGATE_ON_CONTAINER_COPY_ASSIGNMENT_FLAG; static const char *SPECS[] = { "", "A", "BC", "CDE", }; const int NUM_SPECS = static_cast<const int>(sizeof SPECS / sizeof *SPECS); bslma::TestAllocator da("default", veryVeryVeryVerbose); bslma::DefaultAllocatorGuard dag(&da); // Create control and source objects. for (int ti = 0; ti < NUM_SPECS; ++ti) { const char *const ISPEC = SPECS[ti]; const size_t ILENGTH = strlen(ISPEC); TestValues IVALUES(ISPEC); bslma::TestAllocator oas("source", veryVeryVeryVerbose); bslma::TestAllocator oat("target", veryVeryVeryVerbose); StdAlloc mas(&oas); StdAlloc mat(&oat); StdAlloc scratch(&da); const CObj CI(IVALUES.begin(), IVALUES.end(), scratch); const Obj W(CI, scratch); // control // Create target object. for (int tj = 0; tj < NUM_SPECS; ++tj) { const char *const JSPEC = SPECS[tj]; const size_t JLENGTH = strlen(JSPEC); TestValues JVALUES(JSPEC); { Obj mY(CI, mas); const Obj& Y = mY; if (veryVerbose) { T_ P_(ISPEC) P_(Y) P(W) } const CObj CJ(JVALUES.begin(), JVALUES.end(), scratch); Obj mX(CJ, mat); const Obj& X = mX; bslma::TestAllocatorMonitor oasm(&oas); bslma::TestAllocatorMonitor oatm(&oat); Obj *mR = &(mX = Y); ASSERTV(ISPEC, JSPEC, W, X, W == X); ASSERTV(ISPEC, JSPEC, W, Y, W == Y); ASSERTV(ISPEC, JSPEC, mR, &mX, mR == &mX); // TBD no 'get_allocator' in 'stack' #if 0 ASSERTV(ISPEC, JSPEC, PROPAGATE, !PROPAGATE == (mat == X.get_allocator())); ASSERTV(ISPEC, JSPEC, PROPAGATE, PROPAGATE == (mas == X.get_allocator())); ASSERTV(ISPEC, JSPEC, mas == Y.get_allocator()); #endif if (PROPAGATE) { ASSERTV(ISPEC, JSPEC, 0 == oat.numBlocksInUse()); } else { ASSERTV(ISPEC, JSPEC, oasm.isInUseSame()); } } ASSERTV(ISPEC, 0 == oas.numBlocksInUse()); ASSERTV(ISPEC, 0 == oat.numBlocksInUse()); } } ASSERTV(0 == da.numBlocksInUse()); } template <class CONTAINER> void TestDriver<CONTAINER>::testCase9_propagate_on_container_copy_assignment() { // ------------------------------------------------------------------------ // COPY-ASSIGNMENT OPERATOR: ALLOCATOR PROPAGATION // // Concerns: //: 1 If the 'propagate_on_container_copy_assignment' trait is 'false', the //: allocator used by the target object remains unchanged (i.e., the //: source object's allocator is *not* propagated). //: //: 2 If the 'propagate_on_container_copy_assignment' trait is 'true', the //: allocator used by the target object is updated to be a copy of that //: used by the source object (i.e., the source object's allocator *is* //: propagated). //: //: 3 The allocator used by the source object remains unchanged whether or //; not it is propagated to the target object. //: //: 4 If the allocator is propagated from the source object to the target //: object, all memory allocated from the target object's original //: allocator is released. //: //: 5 The effect of the 'propagate_on_container_copy_assignment' trait is //: independent of the other three allocator propagation traits. // // Plan: //: 1 Specify a set S of object values with varied differences, ordered by //: increasing length, to be used in the following tests. //: //: 2 Create two 'bsltf::StdStatefulAllocator' objects with their //: 'propagate_on_container_copy_assignment' property configured to //: 'false'. In two successive iterations of P-3, first configure the //: three properties not under test to be 'false', then configure them //: all to be 'true'. //: //: 3 For each value '(x, y)' in the cross product S x S: (C-1) //: //: 1 Initialize an object 'X' from 'x' using one of the allocators from //: P-2. //: //: 2 Initialize two objects from 'y', a control object 'W' using a //: scratch allocator and an object 'Y' using the other allocator from //: P-2. //: //: 3 Copy-assign 'Y' to 'X' and use 'operator==' to verify that both //: 'X' and 'Y' subsequently have the same value as 'W'. //: //: 4 Use the 'get_allocator' method to verify that the allocator of 'Y' //: is *not* propagated to 'X' and that the allocator used by 'Y' //: remains unchanged. (C-1) //: //: 4 Repeat P-2..3 except that this time configure the allocator property //: under test to 'true' and verify that the allocator of 'Y' *is* //: propagated to 'X'. Also verify that all memory is released to the //: allocator that was in use by 'X' prior to the assignment. (C-2..5) // // Testing: // propagate_on_container_copy_assignment // ------------------------------------------------------------------------ if (verbose) printf("\nCOPY-ASSIGNMENT OPERATOR: ALLOCATOR PROPAGATION" "\n===============================================\n"); if (verbose) printf("\n'propagate_on_container_copy_assignment::value == false'\n"); testCase9_propagate_on_container_copy_assignment_dispatch<false, false>(); testCase9_propagate_on_container_copy_assignment_dispatch<false, true>(); if (verbose) printf("\n'propagate_on_container_copy_assignment::value == true'\n"); testCase9_propagate_on_container_copy_assignment_dispatch<true, false>(); testCase9_propagate_on_container_copy_assignment_dispatch<true, true>(); } template <class CONTAINER> void TestDriver<CONTAINER>::testCase9() { // ------------------------------------------------------------------------ // COPY-ASSIGNMENT OPERATOR: // Ensure that we can assign the value of any object of the class to any // object of the class, such that the two objects subsequently have the // same value. // // Concerns: //: 1 The assignment operator can change the value of any modifiable target //: object to that of any source object. //: //: 2 The allocator address held by the target object is unchanged. //: //: 3 Any memory allocation is from the target object's allocator. //: //: 4 The signature and return type are standard. //: //: 5 The reference returned is to the target object (i.e., '*this'). //: //: 6 The value of the source object is not modified. //: //: 7 The allocator address held by the source object is unchanged. //: //: 8 QoI: Assigning a source object having the default-constructed value //: allocates no memory. //: //: 9 Any memory allocation is exception neutral. //: //:10 Assigning an object to itself behaves as expected (alias-safety). //: //:11 Every object releases any allocated memory at destruction. // // Plan: //: 1 Use the address of 'operator=' to initialize a member-function //: pointer having the appropriate signature and return type for the //: copy-assignment operator defined in this component. (C-4) //: //: 2 Create a 'bslma::TestAllocator' object, and install it as the default //: allocator (note that a ubiquitous test allocator is already installed //: as the global allocator). //: //: 3 Using the table-driven technique: //: //: 1 Specify a set of (unique) valid object values. //: //: 4 For each row 'R1' (representing a distinct object value, 'V') in the //: table described in P-3: (C-1..2, 5..8, 11) //: //: 1 Use the value constructor and a "scratch" allocator to create two //: 'const' 'Obj', 'Z' and 'ZZ', each having the value 'V'. //: //: 2 Execute an inner loop that iterates over each row 'R2' //: (representing a distinct object value, 'W') in the table described //: in P-3: //: //: 3 For each of the iterations (P-4.2): (C-1..2, 5..8, 11) //: //: 1 Create a 'bslma::TestAllocator' object, 'oa'. //: //: 2 Use the value constructor and 'oa' to create a modifiable 'Obj', //: 'mX', having the value 'W'. //: //: 3 Assign 'mX' from 'Z' in the presence of injected exceptions //: (using the 'bslma::TestAllocator_EXCEPTION_TEST_*' macros). //: //: 4 Verify that the address of the return value is the same as that //: of 'mX'. (C-5) //: //: 5 Use the equality-comparison operator to verify that: (C-1, 6) //: //: 1 The target object, 'mX', now has the same value as that of 'Z'. //: (C-1) //: //: 2 'Z' still has the same value as that of 'ZZ'. (C-6) //: //: 6 Use the 'allocator' accessor of both 'mX' and 'Z' to verify that //: the respective allocator addresses held by the target and source //: objects are unchanged. (C-2, 7) //: //: 7 Use the appropriate test allocators to verify that: (C-8, 11) //: //: 1 For an object that (a) is initialized with a value that did NOT //: require memory allocation, and (b) is then assigned a value //: that DID require memory allocation, the target object DOES //: allocate memory from its object allocator only (irrespective of //: the specific number of allocations or the total amount of //: memory allocated); also cross check with what is expected for //: 'mX' and 'Z'. //: //: 2 An object that is assigned a value that did NOT require memory //: allocation, does NOT allocate memory from its object allocator; //: also cross check with what is expected for 'Z'. //: //: 3 No additional memory is allocated by the source object. (C-8) //: //: 4 All object memory is released when the object is destroyed. //: (C-11) //: //: 5 Repeat steps similar to those described in P-4 except that, this //: time, there is no inner loop (as in P-4.2); instead, the source //: object, 'Z', is a reference to the target object, 'mX', and both 'mX' //: and 'ZZ' are initialized to have the value 'V'. For each row //: (representing a distinct object value, 'V') in the table described in //: P-3: (C-9) //: //: 1 Create a 'bslma::TestAllocator' object, 'oa'. //: //: 2 Use the value constructor and 'oa' to create a modifiable 'Obj' //: 'mX'; also use the value constructor and a distinct "scratch" //: allocator to create a 'const' 'Obj' 'ZZ'. //: //: 3 Let 'Z' be a reference providing only 'const' access to 'mX'. //: //: 4 Assign 'mX' from 'Z' in the presence of injected exceptions (using //: the 'bslma::TestAllocator_EXCEPTION_TEST_*' macros). (C-9) //: //: 5 Verify that the address of the return value is the same as that of //: 'mX'. //: //: 6 Use the equality-comparison operator to verify that the target //: object, 'mX', still has the same value as that of 'ZZ'. //: //: 7 Use the 'allocator' accessor of 'mX' to verify that it is still the //: object allocator. //: //: 8 Use the appropriate test allocators to verify that: //: //: 1 Any memory that is allocated is from the object allocator. //: //: 2 No additional (e.g., temporary) object memory is allocated when //: assigning an object value that did NOT initially require //: allocated memory. //: //: 3 All object memory is released when the object is destroyed. //: //: 6 Use the test allocator from P-2 to verify that no memory is ever //: allocated from the default allocator. (C-3) // // Testing: // set& operator=(const set& rhs); // ------------------------------------------------------------------------ const int NUM_DATA = DEFAULT_NUM_DATA; const DefaultDataRow (&DATA)[NUM_DATA] = DEFAULT_DATA; bslma::TestAllocator da("default", veryVeryVeryVerbose); bslma::DefaultAllocatorGuard dag(&da); if (verbose) printf("\nCompare each pair of similar and different" " values (u, ua, v, va) in S X A X S X A" " without perturbation.\n"); { // Create first object for (int ti = 0; ti < NUM_DATA; ++ti) { const int LINE1 = DATA[ti].d_line; const char *const SPEC1 = DATA[ti].d_spec; bslma::TestAllocator scratch("scratch", veryVeryVeryVerbose); Obj mZ(&scratch); const Obj& Z = gg(&mZ, SPEC1); Obj mZZ(&scratch); const Obj& ZZ = gg(&mZZ, SPEC1); // Ensure the first row of the table contains the // default-constructed value. static bool firstFlag = true; if (firstFlag) { ASSERTV(LINE1, Obj(&scratch) == Z); firstFlag = false; } // Create second object for (int tj = 0; tj < NUM_DATA; ++tj) { const int LINE2 = DATA[tj].d_line; const char *const SPEC2 = DATA[tj].d_spec; bslma::TestAllocator oa("object", veryVeryVeryVerbose); { Obj mX(&oa); const Obj& X = gg(&mX, SPEC2); ASSERTV(LINE1, LINE2, (Z == X) == (LINE1 == LINE2)); bslma::TestAllocatorMonitor oam(&oa), sam(&scratch); BSLMA_TESTALLOCATOR_EXCEPTION_TEST_BEGIN(oa) { if (veryVeryVerbose) { T_ T_ Q(ExceptionTestBody) } Obj *mR = &(mX = Z); ASSERTV(LINE1, LINE2, Z == X); ASSERTV(LINE1, LINE2, mR == &mX); } BSLMA_TESTALLOCATOR_EXCEPTION_TEST_END ASSERTV(LINE1, LINE2, ZZ == Z); // ASSERTV(LINE1, LINE2, &oa == X.get_allocator()); // ASSERTV(LINE1, LINE2, &scratch == Z.get_allocator()); ASSERTV(LINE1, LINE2, sam.isInUseSame()); ASSERTV(LINE1, LINE2, 0 == da.numBlocksTotal()); } // Verify all memory is released on object destruction. ASSERTV(LINE1, LINE2, oa.numBlocksInUse(), 0 == oa.numBlocksInUse()); } // self-assignment bslma::TestAllocator oa("object", veryVeryVeryVerbose); { bslma::TestAllocator scratch("scratch", veryVeryVeryVerbose); Obj mX(&oa); const Obj& X = gg(&mX, SPEC1); Obj mZZ(&scratch); const Obj& ZZ = gg(&mZZ, SPEC1); const Obj& Z = mX; ASSERTV(LINE1, ZZ == Z); bslma::TestAllocatorMonitor oam(&oa), sam(&scratch); BSLMA_TESTALLOCATOR_EXCEPTION_TEST_BEGIN(oa) { if (veryVeryVerbose) { T_ T_ Q(ExceptionTestBody) } Obj *mR = &(mX = Z); ASSERTV(LINE1, ZZ == Z); ASSERTV(LINE1, mR == &X); } BSLMA_TESTALLOCATOR_EXCEPTION_TEST_END // ASSERTV(LINE1, &oa == Z.get_allocator()); ASSERTV(LINE1, sam.isTotalSame()); ASSERTV(LINE1, oam.isTotalSame()); ASSERTV(LINE1, 0 == da.numBlocksTotal()); } // Verify all object memory is released on destruction. ASSERTV(LINE1, oa.numBlocksInUse(), 0 == oa.numBlocksInUse()); } } } template <class CONTAINER> template <bool PROPAGATE_ON_CONTAINER_SWAP_FLAG, bool OTHER_FLAGS> void TestDriver<CONTAINER>::testCase8_propagate_on_container_swap_dispatch() { typedef typename CONTAINER::value_type VALUE; // Set the three properties of 'bsltf::StdStatefulAllocator' that are not // under test in this test case to 'false'. typedef bsltf::StdStatefulAllocator<VALUE, OTHER_FLAGS, OTHER_FLAGS, PROPAGATE_ON_CONTAINER_SWAP_FLAG, OTHER_FLAGS> StdAlloc; typedef bsl::deque<VALUE, StdAlloc> CObj; typedef bsl::stack<VALUE, CObj> Obj; const bool PROPAGATE = PROPAGATE_ON_CONTAINER_SWAP_FLAG; static const char *SPECS[] = { "", "A", "BC", "CDE", }; const int NUM_SPECS = static_cast<const int>(sizeof SPECS / sizeof *SPECS); bslma::TestAllocator da("default", veryVeryVeryVerbose); bslma::DefaultAllocatorGuard dag(&da); for (int ti = 0; ti < NUM_SPECS; ++ti) { const char *const ISPEC = SPECS[ti]; const size_t ILENGTH = strlen(ISPEC); TestValues IVALUES(ISPEC); bslma::TestAllocator xoa("x-original", veryVeryVeryVerbose); bslma::TestAllocator yoa("y-original", veryVeryVeryVerbose); StdAlloc xma(&xoa); StdAlloc yma(&yoa); StdAlloc scratch(&da); const CObj CI(IVALUES.begin(), IVALUES.end(), scratch); const Obj ZZ(CI, scratch); // control for (int tj = 0; tj < NUM_SPECS; ++tj) { const char *const JSPEC = SPECS[tj]; const size_t JLENGTH = strlen(JSPEC); TestValues JVALUES(JSPEC); const CObj CJ(JVALUES.begin(), JVALUES.end(), scratch); const Obj WW(CJ, scratch); // control { Obj mX(CI, xma); const Obj& X = mX; if (veryVerbose) { T_ P_(ISPEC) P_(X) P(ZZ) } Obj mY(CJ, yma); const Obj& Y = mY; ASSERTV(ISPEC, JSPEC, ZZ, X, ZZ == X); ASSERTV(ISPEC, JSPEC, WW, Y, WW == Y); // member 'swap' { bslma::TestAllocatorMonitor dam(&da); bslma::TestAllocatorMonitor xoam(&xoa); bslma::TestAllocatorMonitor yoam(&yoa); mX.swap(mY); ASSERTV(ISPEC, JSPEC, WW, X, WW == X); ASSERTV(ISPEC, JSPEC, ZZ, Y, ZZ == Y); if (PROPAGATE) { // TBD no 'get_allocator' in 'stack' #if 0 ASSERTV(ISPEC, JSPEC, yma == X.get_allocator()); ASSERTV(ISPEC, JSPEC, xma == Y.get_allocator()); #endif ASSERTV(ISPEC, JSPEC, dam.isTotalSame()); ASSERTV(ISPEC, JSPEC, xoam.isTotalSame()); ASSERTV(ISPEC, JSPEC, yoam.isTotalSame()); } // TBD no 'get_allocator' in 'stack' #if 0 else { ASSERTV(ISPEC, JSPEC, xma == X.get_allocator()); ASSERTV(ISPEC, JSPEC, yma == Y.get_allocator()); } #endif } // free function 'swap' { bslma::TestAllocatorMonitor dam(&da); bslma::TestAllocatorMonitor xoam(&xoa); bslma::TestAllocatorMonitor yoam(&yoa); swap(mX, mY); ASSERTV(ISPEC, JSPEC, ZZ, X, ZZ == X); ASSERTV(ISPEC, JSPEC, WW, Y, WW == Y); // TBD no 'get_allocator' in 'stack' #if 0 ASSERTV(ISPEC, JSPEC, xma == X.get_allocator()); ASSERTV(ISPEC, JSPEC, yma == Y.get_allocator()); #endif if (PROPAGATE) { ASSERTV(ISPEC, JSPEC, dam.isTotalSame()); ASSERTV(ISPEC, JSPEC, xoam.isTotalSame()); ASSERTV(ISPEC, JSPEC, yoam.isTotalSame()); } } } ASSERTV(ISPEC, 0 == xoa.numBlocksInUse()); ASSERTV(ISPEC, 0 == yoa.numBlocksInUse()); } } ASSERTV(0 == da.numBlocksInUse()); } template <class CONTAINER> void TestDriver<CONTAINER>::testCase8_propagate_on_container_swap() { // ------------------------------------------------------------------------ // SWAP MEMBER AND FREE FUNCTIONS: ALLOCATOR PROPAGATION // // Concerns: //: 1 If the 'propagate_on_container_swap' trait is 'false', the //: allocators used by the source and target objects remain unchanged //: (i.e., the allocators are *not* exchanged). //: //: 2 If the 'propagate_on_container_swap' trait is 'true', the //: allocator used by the target (source) object is updated to be a copy //: of that used by the source (target) object (i.e., the allocators //: *are* exchanged). //: //: 3 If the allocators are propagated (i.e., exchanged), there is no //: additional allocation from any allocator. //: //: 4 The effect of the 'propagate_on_container_swap' trait is independent //: of the other three allocator propagation traits. //: //: 5 Following the swap operation, neither object holds on to memory //: allocated from the other object's allocator. // // Plan: //: 1 Specify a set S of object values with varied differences, ordered by //: increasing length, to be used in the following tests. //: //: 2 Create two 'bsltf::StdStatefulAllocator' objects with their //: 'propagate_on_container_swap' property configured to 'false'. In two //: successive iterations of P-3, first configure the three properties //: not under test to be 'false', then configure them all to be 'true'. //: //: 3 For each value '(x, y)' in the cross product S x S: (C-1) //: //: 1 Initialize two objects from 'x', a control object 'ZZ' using a //: scratch allocator and an object 'X' using one of the allocators //: from P-2. //: //: 2 Initialize two objects from 'y', a control object 'WW' using a //: scratch allocator and an object 'Y' using the other allocator from //: P-2. //: //: 3 Using both member 'swap' and free function 'swap', swap 'X' with //: 'Y' and use 'operator==' to verify that 'X' and 'Y' have the //: expected values. //: //: 4 Use the 'get_allocator' method to verify that the allocators of 'X' //: and 'Y' are *not* exchanged. (C-1) //: //: 4 Repeat P-2..3 except that this time configure the allocator property //: under test to 'true' and verify that the allocators of 'X' and 'Y' //: *are* exchanged. Also verify that there is no additional allocation //: from any allocator. (C-2..5) // // Testing: // propagate_on_container_swap // ------------------------------------------------------------------------ if (verbose) printf("\nSWAP MEMBER AND FREE FUNCTIONS: ALLOCATOR PROPAGATION" "\n=====================================================\n"); if (verbose) printf("\n'propagate_on_container_swap::value == false'\n"); testCase8_propagate_on_container_swap_dispatch<false, false>(); testCase8_propagate_on_container_swap_dispatch<false, true>(); if (verbose) printf("\n'propagate_on_container_swap::value == true'\n"); testCase8_propagate_on_container_swap_dispatch<true, false>(); testCase8_propagate_on_container_swap_dispatch<true, true>(); } template <class CONTAINER> void TestDriver<CONTAINER>::testCase8() { // ------------------------------------------------------------------------ // SWAP MEMBER AND FREE FUNCTIONS // Ensure that, when member and free 'swap' are implemented, we can // exchange the values of any two objects that use the same // allocator. // // Concerns: //: 1 Both functions exchange the values of the (two) supplied objects. //: //: 2 The common object allocator address held by both objects is //: unchanged. //: //: 3 If the two objects being swapped uses the same allocators, neither //: function allocates memory from any allocator. //: //: 4 Both functions have standard signatures and return types. //: //: 5 Two objects with different allocators may be swapped. In which case, //: memory may be allocated. //: //: 6 Using either function to swap an object with itself does not //: affect the value of the object (alias-safety). //: //: 7 The free 'swap' function is discoverable through ADL (Argument //: Dependent Lookup). //: //: 8 QoI: Asserted precondition violations are detected when enabled. // // Plan: //: 1 Use the addresses of the 'swap' member and free functions defined //: in this component to initialize, respectively, member-function //: and free-function pointers having the appropriate signatures and //: return types. (C-4) //: //: 2 Create a 'bslma::TestAllocator' object, and install it as the //: default allocator (note that a ubiquitous test allocator is //: already installed as the global allocator). //: //: 3 Using the table-driven technique: //: //: 1 Specify a set of (unique) valid object values (one per row) in //: terms of their individual attributes, including (a) first, the //: default value, (b) boundary values corresponding to every range //: of values that each individual attribute can independently //: attain, and (c) values that should require allocation from each //: individual attribute that can independently allocate memory. //: //: 2 Additionally, provide a (tri-valued) column, 'MEM', indicating //: the expectation of memory allocation for all typical //: implementations of individual attribute types: ('Y') "Yes", //: ('N') "No", or ('?') "implementation-dependent". //: //: 4 For each row 'R1' in the table of P-3: (C-1..2, 6) //: //: 1 Create a 'bslma::TestAllocator' object, 'oa'. //: //: 2 Use the value constructor and 'oa' to create a modifiable //: 'Obj', 'mW', having the value described by 'R1'; also use the //: copy constructor and a "scratch" allocator to create a 'const' //: 'Obj' 'XX' from 'mW'. //: //: 3 Use the member and free 'swap' functions to swap the value of //: 'mW' with itself; verify, after each swap, that: (C-6) //: //: 1 The value is unchanged. (C-6) //: //: 2 The allocator address held by the object is unchanged. //: //: 3 There was no additional object memory allocation. //: //: 4 For each row 'R2' in the table of P-3: (C-1..2) //: //: 1 Use the copy constructor and 'oa' to create a modifiable //: 'Obj', 'mX', from 'XX' (P-4.2). //: //: 2 Use the value constructor and 'oa' to create a modifiable //: 'Obj', 'mY', and having the value described by 'R2'; also use //: the copy constructor to create, using a "scratch" allocator, //: a 'const' 'Obj', 'YY', from 'Y'. //: //: 3 Use, in turn, the member and free 'swap' functions to swap //: the values of 'mX' and 'mY'; verify, after each swap, that: //: (C-1..2) //: //: 1 The values have been exchanged. (C-1) //: //: 2 The common object allocator address held by 'mX' and 'mY' //: is unchanged in both objects. (C-2) //: //: 3 There was no additional object memory allocation. //: //: 5 Create a new object allocator, 'oaz' //: //: 6 Repeat P-4.4.2 with 'oaz' under the presence of exception. //: //: 5 Verify that the free 'swap' function is discoverable through ADL: //: (C-6) //: //: 1 Create a set of attribute values, 'A', distinct from the values //: corresponding to the default-constructed object, choosing //: values that allocate memory if possible. //: //: 2 Create a 'bslma::TestAllocator' object, 'oa'. //: //: 3 Use the default constructor and 'oa' to create a modifiable //: 'Obj' 'mX' (having default attribute values); also use the copy //: constructor and a "scratch" allocator to create a 'const' 'Obj' //: 'XX' from 'mX'. //: //: 4 Use the value constructor and 'oa' to create a modifiable 'Obj' //: 'mY' having the value described by the 'Ai' attributes; also //: use the copy constructor and a "scratch" allocator to create a //: 'const' 'Obj' 'YY' from 'mY'. //: //: 5 Use the 'invokeAdlSwap' helper function template to swap the //: values of 'mX' and 'mY', using the free 'swap' function defined //: in this component, then verify that: (C-7) //: //: 1 The values have been exchanged. //: //: 2 There was no additional object memory allocation. (C-7) //: //: 6 Use the test allocator from P-2 to verify that no memory is ever //: allocated from the default allocator. (C-3) //: //: 7 Verify that, in appropriate build modes, defensive checks are //: triggered when an attempt is made to swap objects that do not //: refer to the same allocator, but not when the allocators are the //: same (using the 'BSLS_ASSERTTEST_*' macros). (C-7) // // Testing: // void swap(set& other); // void swap(set<K, C, A>& a, set<K, C, A>& b); // ------------------------------------------------------------------------ if (verbose) printf("\nSWAP MEMBER AND FREE FUNCTIONS" "\n==============================\n"); if (verbose) printf( "\nAssign the address of each function to a variable.\n"); { typedef void (Obj::*funcPtr)(Obj&); typedef void (*freeFuncPtr)(Obj&, Obj&); // Verify that the signatures and return types are standard. funcPtr memberSwap = &Obj::swap; freeFuncPtr freeSwap = bsl::swap; (void) memberSwap; // quash potential compiler warnings (void) freeSwap; } if (verbose) printf( "\nCreate a test allocator and install it as the default.\n"); bslma::TestAllocator da("default", veryVeryVeryVerbose); bslma::DefaultAllocatorGuard dag(&da); if (verbose) printf( "\nUse a table of distinct object values and expected memory usage.\n"); const int NUM_DATA = DEFAULT_NUM_DATA; const DefaultDataRow (&DATA)[NUM_DATA] = DEFAULT_DATA; for (int ti = 0; ti < NUM_DATA; ++ti) { const int LINE1 = DATA[ti].d_line; const char *const SPEC1 = DATA[ti].d_spec; bslma::TestAllocator oa("object", veryVeryVeryVerbose); bslma::TestAllocator scratch("scratch", veryVeryVeryVerbose); Obj mW(&oa); const Obj& W = gg(&mW, SPEC1); const Obj XX(W, &scratch); // Ensure the first row of the table contains the // default-constructed value. static bool firstFlag = true; if (firstFlag) { ASSERTV(LINE1, Obj() == W); firstFlag = false; } // member 'swap' { bslma::TestAllocatorMonitor oam(&oa); mW.swap(mW); ASSERTV(LINE1, XX == W); // ASSERTV(LINE1, &oa == W.get_allocator()); ASSERTV(LINE1, oam.isTotalSame()); } // free function 'swap' { bslma::TestAllocatorMonitor oam(&oa); swap(mW, mW); ASSERTV(LINE1, XX == W); // ASSERTV(LINE1, &oa == W.get_allocator()); ASSERTV(LINE1, oam.isTotalSame()); } for (int tj = 0; tj < NUM_DATA; ++tj) { const int LINE2 = DATA[tj].d_line; const char *const SPEC2 = DATA[tj].d_spec; Obj mX(XX, &oa); const Obj& X = mX; Obj mY(&oa); const Obj& Y = gg(&mY, SPEC2); const Obj YY(Y, &scratch); // member 'swap' { bslma::TestAllocatorMonitor oam(&oa); mX.swap(mY); ASSERTV(LINE1, LINE2, YY == X); ASSERTV(LINE1, LINE2, XX == Y); // ASSERTV(LINE1, LINE2, &oa == X.get_allocator()); // ASSERTV(LINE1, LINE2, &oa == Y.get_allocator()); ASSERTV(LINE1, LINE2, oam.isTotalSame()); } // free function 'swap' { bslma::TestAllocatorMonitor oam(&oa); swap(mX, mY); ASSERTV(LINE1, LINE2, XX == X); ASSERTV(LINE1, LINE2, YY == Y); // ASSERTV(LINE1, LINE2, &oa == X.get_allocator()); // ASSERTV(LINE1, LINE2, &oa == Y.get_allocator()); ASSERTV(LINE1, LINE2, oam.isTotalSame()); } bslma::TestAllocator oaz("z_object", veryVeryVeryVerbose); Obj mZ(&oaz); const Obj& Z = gg(&mZ, SPEC2); const Obj ZZ(Z, &scratch); // member 'swap' { bslma::TestAllocatorMonitor oam(&oa); bslma::TestAllocatorMonitor oazm(&oaz); BSLMA_TESTALLOCATOR_EXCEPTION_TEST_BEGIN(oa) { ExceptionGuard<Obj> guardX(&X, L_, &scratch); ExceptionGuard<Obj> guardZ(&Z, L_, &scratch); mX.swap(mZ); guardX.release(); guardZ.release(); } BSLMA_TESTALLOCATOR_EXCEPTION_TEST_END ASSERTV(LINE1, LINE2, ZZ == X); ASSERTV(LINE1, LINE2, XX == Z); // ASSERTV(LINE1, LINE2, &oa == X.get_allocator()); // ASSERTV(LINE1, LINE2, &oaz == Z.get_allocator()); if (0 == X.size()) { ASSERTV(LINE1, LINE2, emptyWillAlloc()||oam.isTotalSame()); } else { ASSERTV(LINE1, LINE2, oam.isTotalUp()); } if (0 == Z.size()) { ASSERTV(LINE1, LINE2, emptyWillAlloc() || oazm.isTotalSame()); } else { ASSERTV(LINE1, LINE2, oazm.isTotalUp()); } } // free function 'swap' { bslma::TestAllocatorMonitor oam(&oa); bslma::TestAllocatorMonitor oazm(&oaz); BSLMA_TESTALLOCATOR_EXCEPTION_TEST_BEGIN(oa) { ExceptionGuard<Obj> guardX(&X, L_, &scratch); ExceptionGuard<Obj> guardZ(&Z, L_, &scratch); swap(mX, mZ); guardX.release(); guardZ.release(); } BSLMA_TESTALLOCATOR_EXCEPTION_TEST_END ASSERTV(LINE1, LINE2, XX == X); ASSERTV(LINE1, LINE2, ZZ == Z); // ASSERTV(LINE1, LINE2, &oa == X.get_allocator()); // ASSERTV(LINE1, LINE2, &oaz == Z.get_allocator()); if (0 == X.size()) { ASSERTV(LINE1, LINE2, emptyWillAlloc()||oam.isTotalSame()); } else { ASSERTV(LINE1, LINE2, oam.isTotalUp()); } if (0 == Z.size()) { ASSERTV(LINE1, LINE2, emptyWillAlloc() || oazm.isTotalSame()); } else { ASSERTV(LINE1, LINE2, oazm.isTotalUp()); } } } } if (verbose) printf( "\nInvoke free 'swap' function in a context where ADL is used.\n"); { // 'A' values: Should cause memory allocation if possible. bslma::TestAllocator oa("object", veryVeryVeryVerbose); bslma::TestAllocator scratch("scratch", veryVeryVeryVerbose); Obj mX(&oa); const Obj& X = mX; const Obj XX(X, &scratch); Obj mY(&oa); const Obj& Y = gg(&mY, "ABC"); const Obj YY(Y, &scratch); bslma::TestAllocatorMonitor oam(&oa); invokeAdlSwap(mX, mY); ASSERTV(YY == X); ASSERTV(XX == Y); ASSERT(oam.isTotalSame()); } } template <class CONTAINER> template <bool SELECT_ON_CONTAINER_COPY_CONSTRUCTION_FLAG, bool OTHER_FLAGS> void TestDriver<CONTAINER>:: testCase7_select_on_container_copy_construction_dispatch() { typedef typename CONTAINER::value_type VALUE; const int TYPE_ALLOC = bslma::UsesBslmaAllocator<VALUE>::value; // Set the three properties of 'bsltf::StdStatefulAllocator' that are not // under test in this test case to 'false'. typedef bsltf::StdStatefulAllocator< VALUE, SELECT_ON_CONTAINER_COPY_CONSTRUCTION_FLAG, OTHER_FLAGS, OTHER_FLAGS, OTHER_FLAGS> StdAlloc; typedef bsl::deque<VALUE, StdAlloc> CObj; typedef bsl::stack<VALUE, CObj> Obj; const bool PROPAGATE = SELECT_ON_CONTAINER_COPY_CONSTRUCTION_FLAG; static const char *SPECS[] = { "", "A", "BC", "CDE", }; const int NUM_SPECS = static_cast<const int>(sizeof SPECS / sizeof *SPECS); for (int ti = 0; ti < NUM_SPECS; ++ti) { const char *const SPEC = SPECS[ti]; const size_t LENGTH = strlen(SPEC); TestValues VALUES(SPEC); bslma::TestAllocator da("default", veryVeryVeryVerbose); bslma::TestAllocator oa("object", veryVeryVeryVerbose); bslma::DefaultAllocatorGuard dag(&da); StdAlloc ma(&oa); StdAlloc scratch(&da); { const CObj C(VALUES.begin(), VALUES.end(), scratch); const Obj W(C, ma); // control ASSERTV(ti, LENGTH == W.size()); // same lengths if (veryVerbose) { printf("\tControl Obj: "); P(W); } Obj mX(C, ma); const Obj& X = mX; if (veryVerbose) { printf("\t\tDynamic Obj: "); P(X); } bslma::TestAllocatorMonitor dam(&da); bslma::TestAllocatorMonitor oam(&oa); const Obj Y(X); ASSERTV(SPEC, W == Y); ASSERTV(SPEC, W == X); // TBD no 'get_allocator' in 'stack' #if 0 ASSERTV(SPEC, PROPAGATE, PROPAGATE == (ma == Y.get_allocator())); ASSERTV(SPEC, PROPAGATE, ma == X.get_allocator()); #endif if (PROPAGATE) { ASSERTV(SPEC, 0 != TYPE_ALLOC || dam.isInUseSame()); ASSERTV(SPEC, 0 == LENGTH || oam.isInUseUp()); } else { ASSERTV(SPEC, 0 == LENGTH || dam.isInUseUp()); ASSERTV(SPEC, oam.isTotalSame()); } } ASSERTV(SPEC, 0 == da.numBlocksInUse()); ASSERTV(SPEC, 0 == oa.numBlocksInUse()); } } template <class CONTAINER> void TestDriver<CONTAINER>::testCase7_select_on_container_copy_construction() { // ------------------------------------------------------------------------ // COPY CONSTRUCTOR: ALLOCATOR PROPAGATION // // Concerns: //: 1 The allocator of a source object using a standard allocator is //: propagated to the newly constructed object according to the //: 'select_on_container_copy_construction' method of the allocator. //: //: 2 In the absence of a 'select_on_container_copy_construction' method, //: the allocator of a source object using a standard allocator is always //: propagated to the newly constructed object (C++03 semantics). //: //: 3 The effect of the 'select_on_container_copy_construction' trait is //: independent of the other three allocator propagation traits. // // Plan: //: 1 Specify a set S of object values with varied differences, ordered by //: increasing length, to be used in the following tests. //: //: 2 Create a 'bsltf::StdStatefulAllocator' with its //: 'select_on_container_copy_construction' property configured to //: 'false'. In two successive iterations of P-3..5, first configure the //: three properties not under test to be 'false', then confgiure them //: all to be 'true'. //: //: 3 For each value in S, initialize objects 'W' (a control) and 'X' using //: the allocator from P-2. //: //: 4 Copy construct 'Y' from 'X' and use 'operator==' to verify that both //: 'X' and 'Y' subsequently have the same value as 'W'. //: //: 5 Use the 'get_allocator' method to verify that the allocator of 'X' //: is *not* propagated to 'Y'. //: //: 6 Repeat P-2..5 except that this time configure the allocator property //: under test to 'true' and verify that the allocator of 'X' *is* //: propagated to 'Y'. (C-1) //: //: 7 Repeat P-2..5 except that this time use a 'StatefulStlAllocator', //: which does not define a 'select_on_container_copy_construction' //: method, and verify that the allocator of 'X' is *always* propagated //: to 'Y'. (C-2..3) // // Testing: // select_on_container_copy_construction // ------------------------------------------------------------------------ if (verbose) printf("\n'select_on_container_copy_construction' " "propagates *default* allocator.\n"); testCase7_select_on_container_copy_construction_dispatch<false, false>(); testCase7_select_on_container_copy_construction_dispatch<false, true>(); if (verbose) printf("\n'select_on_container_copy_construction' " "propagates allocator of source object.\n"); testCase7_select_on_container_copy_construction_dispatch<true, false>(); testCase7_select_on_container_copy_construction_dispatch<true, true>(); // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if (verbose) printf("\nVerify C++03 semantics (allocator has no " "'select_on_container_copy_construction' method).\n"); typedef typename CONTAINER::value_type VALUE; typedef StatefulStlAllocator<VALUE> Allocator; typedef bsl::deque<VALUE, Allocator> CObj; typedef bsl::stack<VALUE, CObj> Obj; { static const char *SPECS[] = { "", "A", "BC", "CDE", }; const int NUM_SPECS = static_cast<const int>(sizeof SPECS / sizeof *SPECS); for (int ti = 0; ti < NUM_SPECS; ++ti) { const char *const SPEC = SPECS[ti]; const size_t LENGTH = strlen(SPEC); TestValues VALUES(SPEC); const int ALLOC_ID = ti + 73; Allocator a; a.setId(ALLOC_ID); const CObj C(VALUES.begin(), VALUES.end(), a); const Obj W(C, a); // control ASSERTV(ti, LENGTH == W.size()); // same lengths if (veryVerbose) { printf("\tControl Obj: "); P(W); } Obj mX(C, a); const Obj& X = mX; if (veryVerbose) { printf("\t\tDynamic Obj: "); P(X); } const Obj Y(X); ASSERTV(SPEC, W == Y); ASSERTV(SPEC, W == X); // TBD no 'get_allocator' in 'stack' #if 0 ASSERTV(SPEC, ALLOC_ID == Y.get_allocator().id()); #endif } } } template <class CONTAINER> void TestDriver<CONTAINER>::testCase7() { // ------------------------------------------------------------------------ // TESTING COPY CONSTRUCTOR: //: 1 The new object's value is the same as that of the original object //: (relying on the equality operator) and created with the correct //: capacity. //: //: 2 The value of the original object is left unaffected. //: //: 3 Subsequent changes in or destruction of the source object have no //: effect on the copy-constructed object. //: //: 4 Subsequent changes ('insert's) on the created object have no //: effect on the original and change the capacity of the new object //: correctly. //: //: 5 The object has its internal memory management system hooked up //: properly so that *all* internally allocated memory draws from a //: user-supplied allocator whenever one is specified. //: //: 6 The function is exception neutral w.r.t. memory allocation. // // Plan: //: 1 Specify a set S of object values with substantial and varied //: differences, ordered by increasing length, to be used in the //: following tests. //: //: 2 For each value in S, initialize objects w and x, copy construct y //: from x and use 'operator==' to verify that both x and y subsequently //: have the same value as w. Let x go out of scope and again verify //: that w == y. (C-1..4) //: //: 3 For each value in S initialize objects w and x, and copy construct y //: from x. Change the state of y, by using the *primary* *manipulator* //: 'push'. Using the 'operator!=' verify that y differs from x and //: w, and verify that the capacity of y changes correctly. (C-5) //: //: 4 Perform tests performed as P-2: (C-6) //: 1 While passing a testAllocator as a parameter to the new object and //: ascertaining that the new object gets its memory from the provided //: testAllocator. //: 2 Verify neither of global and default allocator is used to supply //: memory. (C-6) //: //: 5 Perform tests as P-2 in the presence of exceptions during memory //: allocations using a 'bslma::TestAllocator' and varying its //: *allocation* *limit*. (C-7) // // Testing: // set(const set& original); // set(const set& original, const A& allocator); // ------------------------------------------------------------------------ const char *cont = ContainerName<container_type>::name(); const char *val = ValueName<value_type>::name(); if (verbose) { P_(cont); P_(val); P(typeAlloc()); } const TestValues VALUES; // contains 52 distinct increasing values bslma::TestAllocator da(veryVeryVerbose); bslma::DefaultAllocatorGuard dag(&da); bslma::TestAllocator oa(veryVeryVerbose); { static const char *SPECS[] = { "", "A", "BC", "CDE", "DEAB", "EABCD", "ABCDEFG", "HFGEDCBA", "CFHEBIDGA", "BENCKHGMALJDFOI", "IDMLNEFHOPKGBCJA", "OIQGDNPMLKBACHFEJ" }; const int NUM_SPECS = sizeof SPECS / sizeof *SPECS; for (int ti = 0; ti < NUM_SPECS; ++ti) { const char *const SPEC = SPECS[ti]; const size_t LENGTH = strlen(SPEC); if (verbose) { printf("\nFor an object of length " ZU ":\n", LENGTH); P(SPEC); } // Create control object 'W'. Obj mW; const Obj& W = gg(&mW, SPEC); ASSERTV(ti, LENGTH == W.size()); // same lengths Obj mX(&oa); const Obj& X = gg(&mX, SPEC); ASSERT(W == X); { // Testing concern 1..4. if (veryVerbose) { printf("\t\t\tRegular Case :"); } Obj *pX = new Obj(&oa); gg(pX, SPEC); const Obj Y0(*pX); ASSERTV(SPEC, W == Y0); ASSERTV(SPEC, W == X); // ASSERTV(SPEC, Y0.get_allocator() == // bslma::Default::defaultAllocator()); delete pX; ASSERTV(SPEC, W == Y0); } { // Testing concern 5. if (veryVerbose) printf("\t\t\tInsert into created obj, " "without test allocator:\n"); Obj mY1(X); const Obj& Y1 = mY1; mY1.push(VALUES['Z' - 'A']); ASSERTV(SPEC, Y1.size() == LENGTH + 1); ASSERTV(SPEC, W != Y1); ASSERTV(SPEC, X != Y1); ASSERTV(SPEC, W == X); } { // Testing concern 5 with test allocator. if (veryVerbose) printf("\t\t\tInsert into created obj, " "with test allocator:\n"); bslma::TestAllocatorMonitor dam(&da); bslma::TestAllocatorMonitor oam(&oa); Obj mY11(X, &oa); const Obj& Y11 = mY11; ASSERT(dam.isTotalSame()); ASSERTV(cont, LENGTH, oam.isTotalSame(), emptyWillAlloc(), oam.isTotalSame() == (!emptyWillAlloc() && 0 == LENGTH)); mY11.push(VALUES['Z' - 'A']); ASSERT(dam.isTotalSame()); ASSERTV(SPEC, Y11.size() == LENGTH + 1); ASSERTV(SPEC, W != Y11); ASSERTV(SPEC, X != Y11); // ASSERTV(SPEC, Y11.get_allocator() == X.get_allocator()); } { // Exception checking. BSLMA_TESTALLOCATOR_EXCEPTION_TEST_BEGIN(oa) { bslma::TestAllocatorMonitor dam(&da); bslma::TestAllocatorMonitor oam(&oa); const Obj Y2(X, &oa); if (veryVerbose) { printf("\t\t\tException Case :\n"); } ASSERT(dam.isTotalSame()); ASSERT(oam.isTotalUp() || (!emptyWillAlloc() && 0 == LENGTH)); ASSERTV(SPEC, W == Y2); ASSERTV(SPEC, W == X); // ASSERTV(SPEC, Y2.get_allocator() == X.get_allocator()); } BSLMA_TESTALLOCATOR_EXCEPTION_TEST_END } } } } template <class CONTAINER> void TestDriver<CONTAINER>::testCase6() { // --------------------------------------------------------------------- // TESTING EQUALITY OPERATORS: // Concerns: //: 1 Two objects, 'X' and 'Y', compare equal if and only if they contain //: the same values. //: //: 2 No non-salient attributes (i.e., 'allocator') participate. //: //: 3 'true == (X == X)' (i.e., identity) //: //: 4 'false == (X != X)' (i.e., identity) //: //: 5 'X == Y' if and only if 'Y == X' (i.e., commutativity) //: //: 6 'X != Y' if and only if 'Y != X' (i.e., commutativity) //: //: 7 'X != Y' if and only if '!(X == Y)' // // Plan: //: 1 Use the respective addresses of 'operator==' and 'operator!=' to //: initialize function pointers having the appropriate signatures and //: return types for the two homogeneous, free equality- comparison //: operators defined in this component. (C-8..9, 12..13) //: //: 2 Create a 'bslma::TestAllocator' object, and install it as the default //: allocator (note that a ubiquitous test allocator is already installed //: as the global allocator). //: //: 3 Using the table-driven technique, specify a set of distinct //: specifications for the 'gg' function. //: //: 4 For each row 'R1' in the table of P-3: (C-1..7) //: //: 1 Create a single object, using a comparator that can be disabled and //: a"scratch" allocator, and use it to verify the reflexive //: (anti-reflexive) property of equality (inequality) in the presence //: of aliasing. (C-3..4) //: //: 2 For each row 'R2' in the table of P-3: (C-1..2, 5..7) //: //: 1 Record, in 'EXP', whether or not distinct objects created from //: 'R1' and 'R2', respectively, are expected to have the same value. //: //: 2 For each of two configurations, 'a' and 'b': (C-1..2, 5..7) //: //: 1 Create two (object) allocators, 'oax' and 'oay'. //: //: 2 Create an object 'X', using 'oax', having the value 'R1'. //: //: 3 Create an object 'Y', using 'oax' in configuration 'a' and //: 'oay' in configuration 'b', having the value 'R2'. //: //: 4 Disable the comparator so that it will cause an error if it's //: used. //: //: 5 Verify the commutativity property and expected return value for //: both '==' and '!=', while monitoring both 'oax' and 'oay' to //: ensure that no object memory is ever allocated by either //: operator. (C-1..2, 5..7, 10) //: //: 5 Use the test allocator from P-2 to verify that no memory is ever //: allocated from the default allocator. (C-11) // // Testing: // bool operator==(const set<K, C, A>& lhs, const set<K, C, A>& rhs); // bool operator!=(const set<K, C, A>& lhs, const set<K, C, A>& rhs); // ------------------------------------------------------------------------ if (verbose) printf("\nEQUALITY-COMPARISON OPERATORS" "\n=============================\n"); if (verbose) printf("\nAssign the address of each operator to a variable.\n"); { typedef bool (*operatorPtr)(const Obj&, const Obj&); // Verify that the signatures and return types are standard. operatorPtr operatorEq = operator==; operatorPtr operatorNe = operator!=; (void) operatorEq; // quash potential compiler warnings (void) operatorNe; } const int NUM_DATA = DEFAULT_NUM_DATA; const DefaultDataRow (&DATA)[NUM_DATA] = DEFAULT_DATA; bslma::TestAllocator da("default", veryVeryVeryVerbose); bslma::DefaultAllocatorGuard dag(&da); bslma::TestAllocatorMonitor dam(&da); if (verbose) printf("\nCompare every value with every value.\n"); { // Create first object for (int ti = 0; ti < NUM_DATA; ++ti) { const char *const SPEC1 = DATA[ti].d_spec; const size_t LENGTH1 = strlen(DATA[ti].d_spec); if (veryVerbose) { T_ P(SPEC1) } // Ensure an object compares correctly with itself (alias test). { bslma::TestAllocator scratch("scratch", veryVeryVeryVerbose); Obj mX(&scratch); const Obj& X = gg(&mX, SPEC1); ASSERTV(SPEC1, X == X); ASSERTV(SPEC1, !(X != X)); } for (int tj = 0; tj < NUM_DATA; ++tj) { const char *const SPEC2 = DATA[tj].d_spec; const size_t LENGTH2 = strlen(DATA[tj].d_spec); if (veryVerbose) { T_ T_ P(SPEC2) } const bool EXP = ti == tj; // expected equality for (char cfg = 'a'; cfg <= 'b'; ++cfg) { const char CONFIG = cfg; // Determines 'Y's allocator. // Create two distinct test allocators, 'oax' and 'oay'. bslma::TestAllocator oax("objectx", veryVeryVeryVerbose); bslma::TestAllocator oay("objecty", veryVeryVeryVerbose); // Map allocators above to objects 'X' and 'Y' below. bslma::TestAllocator& xa = oax; bslma::TestAllocator& ya = 'a' == CONFIG ? oax : oay; Obj mX(&xa); const Obj& X = gg(&mX, SPEC1); Obj mY(&ya); const Obj& Y = gg(&mY, SPEC2); ASSERTV(CONFIG, LENGTH1 == X.size()); ASSERTV(CONFIG, LENGTH2 == Y.size()); // Verify value, commutativity, and no memory allocation. bslma::TestAllocatorMonitor oaxm(&xa); bslma::TestAllocatorMonitor oaym(&ya); ASSERTV(CONFIG, EXP == (X == Y)); ASSERTV(CONFIG, EXP == (Y == X)); ASSERTV(CONFIG, !EXP == (X != Y)); ASSERTV(CONFIG, !EXP == (Y != X)); ASSERTV(CONFIG, oaxm.isTotalSame()); ASSERTV(CONFIG, oaym.isTotalSame()); } } } } ASSERT(dam.isTotalSame()); } template <class CONTAINER> void TestDriver<CONTAINER>::testCase4() { // ------------------------------------------------------------------------ // BASIC ACCESSORS // Ensure each basic accessor: // - top // - size // properly interprets object state. // // Concerns: //: 1 Each accessor returns the value of the correct property of the //: object. //: //: 2 Each accessor method is declared 'const'. //: //: 3 No accessor allocates any memory. //: // // Plan: //: 1 For each set of 'SPEC' of different length: //: //: 1 Default construct the object with various configuration: //: //: 2 Add in a series of objects. //: //: 3 Verify 'top' yields the expected result. // // Testing: // const_iterator cbegin(); // const_iterator cend(); // size_type size() const; // ------------------------------------------------------------------------ const char *cont = ContainerName<container_type>::name(); const char *val = ValueName<value_type>::name(); if (verbose) { P_(cont); P_(val); P(typeAlloc()); } const TestValues VALUES; // contains 52 distinct increasing values static const struct { int d_line; // source line number const char *d_spec; // specification string const char *d_results; // expected results } DATA[] = { //line spec result //---- -------- ------ { L_, "", "" }, { L_, "A", "A" }, { L_, "AB", "AB" }, { L_, "ABC", "ABC" }, { L_, "ABCD", "ABCD" }, { L_, "ABCDE", "ABCDE" } }; const int NUM_DATA = sizeof DATA / sizeof *DATA; if (verbose) { printf( "\nCreate objects with various allocator configurations.\n"); } { for (int ti = 0; ti < NUM_DATA; ++ti) { const int LINE = DATA[ti].d_line; const char *const SPEC = DATA[ti].d_spec; const size_t LENGTH = strlen(DATA[ti].d_results); const TestValues EXP(DATA[ti].d_results); if (verbose) { P_(LINE) P_(LENGTH) P(SPEC); } for (char cfg = 'a'; cfg <= 'd'; ++cfg) { const char CONFIG = cfg; bslma::TestAllocator da("default", veryVeryVeryVerbose); bslma::TestAllocator fa("footprint", veryVeryVeryVerbose); bslma::TestAllocator sa1("supplied1", veryVeryVeryVerbose); bslma::TestAllocator sa2("supplied2", veryVeryVeryVerbose); bslma::DefaultAllocatorGuard dag(&da); bslma::TestAllocator& oa = 'a' == CONFIG || 'b' == CONFIG ? da : 'c' == CONFIG ? sa1 : sa2; bslma::TestAllocator& noa = &oa != &da ? da : sa1; bslma::TestAllocatorMonitor oam(&oa); bslma::TestAllocatorMonitor noam(&noa); Obj& mX = 'a' == CONFIG ? * new (fa) Obj() : 'b' == CONFIG ? * new (fa) Obj((bslma::Allocator *) 0) : 'c' == CONFIG ? * new (fa) Obj(&sa1) : * new (fa) Obj(&sa2); ASSERT( oam.isTotalUp() == emptyWillAlloc()); ASSERT(noam.isTotalSame()); const Obj& X = gg(&mX, SPEC); ASSERT(&X == &mX); oam.reset(); // -------------------------------------------------------- // Verify basic accessors // ASSERTV(LINE, SPEC, CONFIG, &oa == X.get_allocator()); ASSERTV(LINE, SPEC, CONFIG, LENGTH == X.size()); if (LENGTH > 0) { ASSERTV(LINE, SPEC, CONFIG, EXP[LENGTH - 1] == mX.top()); ASSERTV(LINE, SPEC, CONFIG, EXP[LENGTH - 1] == X.top()); } else { bsls::AssertTestHandlerGuard hG; ASSERT_SAFE_FAIL(mX.top()); } ASSERTV(LINE, LENGTH, X.empty(), (0 == LENGTH) == mX.empty()); ASSERTV(LINE, LENGTH, X.empty(), (0 == LENGTH) == X.empty()); ASSERT( oam.isTotalSame()); ASSERT(noam.isTotalSame()); fa.deleteObject(&mX); } } } } template <class CONTAINER> void TestDriver<CONTAINER>::testCase3() { // ------------------------------------------------------------------------ // TESTING PRIMITIVE GENERATOR FUNCTIONS gg AND ggg: // Having demonstrated that our primary manipulators work as expected // under normal conditions // // Concerns: //: 1 Valid generator syntax produces expected results //: //: 2 Invalid syntax is detected and reported. // // Plan: //: 1 For each of an enumerated sequence of 'spec' values, ordered by //: increasing 'spec' length: //: //: 1 Use the primitive generator function 'gg' to set the state of a //: newly created object. //: //: 2 Verify that 'gg' returns a valid reference to the modified argument //: object. //: //: 3 Use the basic accessors to verify that the value of the object is //: as expected. (C-1) //: //: 2 For each of an enumerated sequence of 'spec' values, ordered by //: increasing 'spec' length, use the primitive generator function 'ggg' //: to set the state of a newly created object. //: //: 1 Verify that 'ggg' returns the expected value corresponding to the //: location of the first invalid value of the 'spec'. (C-2) // // Testing: // set<K,A>& gg(set<K,A> *object, const char *spec); // int ggg(set<K,A> *object, const char *spec, int verbose = 1); // ------------------------------------------------------------------------ const char *cont = ContainerName<container_type>::name(); const char *val = ValueName<value_type>::name(); if (verbose) { P_(cont); P(val); } bslma::TestAllocator oa(veryVeryVerbose); bslma::TestAllocator da(veryVeryVerbose); bslma::DefaultAllocatorGuard dag(&da); if (verbose) printf("\nTesting generator on valid specs.\n"); { static const struct { int d_line; // source line number const char *d_spec; // specification string const char *d_results; // expected element values } DATA[] = { //line spec results //---- -------- ------- { L_, "", "" }, { L_, "A", "A" }, { L_, "B", "B" }, { L_, "AB", "AB" }, { L_, "CD", "CD" }, { L_, "ABC", "ABC" }, { L_, "ABCD", "ABCD" }, { L_, "ABCDE", "ABCDE" }, }; const int NUM_DATA = sizeof DATA / sizeof *DATA; int oldLen = -1; for (int ti = 0; ti < NUM_DATA ; ++ti) { const int LINE = DATA[ti].d_line; const char *const SPEC = DATA[ti].d_spec; const size_t LENGTH = strlen(DATA[ti].d_results); const TestValues EXP(DATA[ti].d_results); const int curLen = (int)strlen(SPEC); bslma::TestAllocatorMonitor oam(&oa); bslma::TestAllocatorMonitor dam(&da); Obj mX(&oa); const Obj& X = gg(&mX, SPEC); LOOP3_ASSERT(oam.isTotalUp(), emptyWillAlloc(), LENGTH, oam.isTotalUp() == (emptyWillAlloc() || LENGTH > 0)); ASSERT(dam.isTotalSame()); const Obj& Y = g( SPEC); ASSERT(&mX == &X); ASSERT(Y == X); if (curLen != oldLen) { if (verbose) printf("\tof length %d:\n", curLen); ASSERTV(LINE, oldLen <= curLen); // non-decreasing oldLen = curLen; } ASSERTV(LINE, LENGTH == X.size()); ASSERTV(LINE, LENGTH == Y.size()); emptyNVerifyStack(&mX, EXP, LENGTH, L_); emptyNVerifyStack(const_cast<Obj *>(&Y), EXP, LENGTH, L_); } } if (verbose) printf("\nTesting generator on invalid specs.\n"); { static const struct { int d_line; // source line number const char *d_spec; // specification string int d_index; // offending character index } DATA[] = { //line spec index //---- -------- ----- { L_, "", -1, }, // control { L_, "A", -1, }, // control { L_, " ", 0, }, { L_, ".", 0, }, { L_, "E", -1, }, // control { L_, "a", 0, }, { L_, "z", 0, }, { L_, "AE", -1, }, // control { L_, "aE", 0, }, { L_, "Ae", 1, }, { L_, ".~", 0, }, { L_, "~!", 0, }, { L_, " ", 0, }, { L_, "ABC", -1, }, // control { L_, " BC", 0, }, { L_, "A C", 1, }, { L_, "AB ", 2, }, { L_, "?#:", 0, }, { L_, " ", 0, }, { L_, "ABCDE", -1, }, // control { L_, "aBCDE", 0, }, { L_, "ABcDE", 2, }, { L_, "ABCDe", 4, }, { L_, "AbCdE", 1, } }; const int NUM_DATA = sizeof DATA / sizeof *DATA; int oldLen = -1; for (int ti = 0; ti < NUM_DATA ; ++ti) { const int LINE = DATA[ti].d_line; const char *const SPEC = DATA[ti].d_spec; const int INDEX = DATA[ti].d_index; const int LENGTH = static_cast<int>(strlen(SPEC)); Obj mX(&oa); if (LENGTH != oldLen) { if (verbose) printf("\tof length %d:\n", LENGTH); ASSERTV(LINE, oldLen <= LENGTH); // non-decreasing oldLen = LENGTH; } if (veryVerbose) printf("\t\tSpec = \"%s\"\n", SPEC); int RESULT = ggg(&mX, SPEC, veryVerbose); ASSERTV(LINE, INDEX == RESULT); } } } template <class CONTAINER> void TestDriver<CONTAINER>::testCase2() { // ------------------------------------------------------------------------ // TESTING CONSTRUCTORS AND PRIMARY MANIPULATORS (BOOTSTRAP): // The basic concern is that the default constructor, the destructor, // and, under normal conditions (i.e., no aliasing), the primary // manipulators // - push // - pop // // Concerns: //: 1 An object created with the default constructor (with or without a //: supplied allocator) has the contractually specified default value. //: //: 2 If an allocator is NOT supplied to the default constructor, the //: default allocator in effect at the time of construction becomes the //: object allocator for the resulting object. //: //: 3 If an allocator IS supplied to the default constructor, that //: allocator becomes the object allocator for the resulting object. //: //: 4 Supplying a null allocator address has the same effect as not //: supplying an allocator. //: //: 5 Supplying an allocator to the default constructor has no effect on //: subsequent object values. //: //: 6 Any memory allocation is from the object allocator. //: //: 7 There is no temporary allocation from any allocator. //: //: 8 Every object releases any allocated memory at destruction. //: //: 9 QoI: The default constructor allocates no memory (only true if the //: underlying container is 'vector'). //: //:10 'insert' adds an additional element to the object if the element //: being inserted does not already exist. //: //:11 'push' pushes a sequence of objects into the stack, and 'pop' will //: recover those same values in reverse order. //: //:12 Any argument can be 'const'. //: //:13 Any memory allocation is exception neutral. //: //:14 All version of the copy c'tor produce an object with the same value //: as the original, and allocate memory from the appropriate allocator. //: //:15 All versions of the c'tor from container produce an object with the //: appropriate value, and allocate memory from the appropriate //: allocator. // // Plan: //: 1 For each value of increasing length, 'L': //: //: 2 Using a loop-based approach, default-construct three distinct //: objects, in turn, but configured differently: (a) without passing //: an allocator, (b) passing a null allocator address explicitly, and //: (c) passing the address of a test allocator distinct from the //: default. For each of these three iterations: (C-1..14) //: //: 1 Create three 'bslma::TestAllocator' objects, and install one as //: as the current default allocator (note that a ubiquitous test //: allocator is already installed as the global allocator). //: //: 2 Use the default constructor to dynamically create an object //: 'X', with its object allocator configured appropriately (see //: P-2); use a distinct test allocator for the object's footprint. //: //: 3 Use the (as yet unproven) 'get_allocator' to ensure that its //: object allocator is properly installed. (C-2..4) //: //: 4 Use the appropriate test allocators to verify that no memory is //: allocated by the default constructor. (C-9) //: //: 5 Use the individual (as yet unproven) salient attribute accessors //: to verify the default-constructed value. (C-1) //: //: 6 Insert 'L - 1' elements in order of increasing value into the //: container. //: //: 7 Insert the 'L'th value in the presense of exception and use the //: (as yet unproven) basic accessors to verify the container has the //: expected values. Verify the number of allocation is as expected. //: (C-5..6, 13..14) //: //: 8 Verify that no temporary memory is allocated from the object //: allocator. (C-7) //: //: 9 Make a copy of the object using the appropriate copy c'tor. //: //: 10 Verify that all object memory is released when the object is //: destroyed. (Implicit in test allocator). (C-8) //: 11 Verify that calling 'pop' on an empty stack will fail an assert //: in safe mode. // // Testing: // stack; // stack(bslma_Allocator *); // ~stack(); // push(const value_type& value); // stack(const CONTAINER& container, bslma_allocator *); // stack(const stack& stack, bslma_allocator *); // ------------------------------------------------------------------------ const char *cont = ContainerName<container_type>::name(); const char *val = ValueName<value_type>::name(); if (verbose) { P_(cont); P_(val); P(typeAlloc()); } const TestValues VALUES; // contains 52 distinct increasing values const size_t MAX_LENGTH = 9; for (size_t ti = 0; ti < MAX_LENGTH; ++ti) { const size_t LENGTH = ti; for (char cfg = 'a'; cfg <= 'c'; ++cfg) { const char CONFIG = cfg; // how we specify the allocator bslma::TestAllocator da("default", veryVeryVeryVerbose); bslma::TestAllocator fa("footprint", veryVeryVeryVerbose); bslma::TestAllocator sa("supplied", veryVeryVeryVerbose); bslma::DefaultAllocatorGuard dag(&da); // ---------------------------------------------------------------- if (veryVerbose) { printf("\n\tTesting default constructor.\n"); } Obj *objPtr; switch (CONFIG) { case 'a': { objPtr = new (fa) Obj(); } break; case 'b': { objPtr = new (fa) Obj((bslma::Allocator *) 0); } break; case 'c': { objPtr = new (fa) Obj(&sa); } break; default: { ASSERTV(CONFIG, !"Bad allocator config."); return; // RETURN } break; } Obj& mX = *objPtr; const Obj& X = mX; bslma::TestAllocator& oa = 'c' == CONFIG ? sa : da; // Verify any attribute allocators are installed properly. // ASSERTV(LENGTH, CONFIG, &oa == X.get_allocator()); // Verify no allocation from the object/non-object allocators. ASSERTV(CONFIG, emptyWillAlloc() == !!oa.numBlocksTotal()); ASSERTV(LENGTH, CONFIG, 0 == X.size()); ASSERTV(LENGTH, CONFIG, X.empty()); { bsls::AssertTestHandlerGuard hG; ASSERT_SAFE_FAIL(mX.pop()); } // ---------------------------------------------------------------- if (veryVerbose) { printf("\n\tTesting 'push' (bootstrap).\n"); } for (size_t tj = 0; tj + 1 < LENGTH; ++tj) { mX.push(VALUES[tj]); ASSERTV(LENGTH, tj, CONFIG, VALUES[tj] == mX.top()); ASSERTV(LENGTH, tj, CONFIG, VALUES[tj] == X.top()); } if (LENGTH > 1) { ASSERTV(CONFIG, oa.numBlocksTotal() > 0); } if (0 < LENGTH) { ASSERTV(LENGTH, CONFIG, LENGTH - 1 == X.size()); bslma::TestAllocator scratch("scratch", veryVeryVeryVerbose); // insert the last element with an exception guard BSLMA_TESTALLOCATOR_EXCEPTION_TEST_BEGIN(oa) { ExceptionGuard<Obj> guard(&X, L_, &scratch); mX.push(VALUES[LENGTH - 1]); guard.release(); // Verify no temporary memory is allocated from the object // allocator. if (1 == LENGTH && !typeAlloc()) { // If the vector grows, the old vector will be // deallocated, so only do this test on '1 == LENGTH'. ASSERTV(LENGTH, CONFIG, oa.numBlocksTotal(), oa.numBlocksInUse(), oa.numBlocksTotal() == oa.numBlocksInUse()); } ASSERTV(LENGTH, CONFIG, VALUES[LENGTH - 1] == mX.top()); ASSERTV(LENGTH, CONFIG, VALUES[LENGTH - 1] == X.top()); } BSLMA_TESTALLOCATOR_EXCEPTION_TEST_END ASSERTV(LENGTH, CONFIG, LENGTH == X.size()); } // Test copy c'tors { bslma::TestAllocatorMonitor oaMonitor(&oa); Obj *copyPtr; switch (CONFIG) { case 'a': { copyPtr = new (fa) Obj(X); } break; case 'b': { copyPtr = new (fa) Obj(X, (bslma::Allocator *) 0); } break; case 'c': { copyPtr = new (fa) Obj(X, &sa); } break; default: { ASSERTV(CONFIG, !"Bad allocator config."); return; // RETURN } break; } ASSERT(X == *copyPtr); ASSERT((0 < LENGTH || emptyWillAlloc()) == oaMonitor.isTotalUp()); emptyAndVerify(copyPtr, VALUES, LENGTH, L_); fa.deleteObject(copyPtr); } // Test container c'tors { bslma::TestAllocatorMonitor oaMonitor(&oa); bslma::TestAllocator ca; CONTAINER c(&ca); const CONTAINER& C = c; // We have to insert the values one at a time, 'vector' has a // problem with range inserts of function ptrs. for (size_t tk = 0; tk < LENGTH; ++tk) { c.push_back(VALUES[tk]); } Obj *cCopyPtr; switch (CONFIG) { case 'a': { cCopyPtr = new (fa) Obj(C); } break; case 'b': { cCopyPtr = new (fa) Obj(C, (bslma::Allocator *) 0); } break; case 'c': { cCopyPtr = new (fa) Obj(C, &sa); } break; default: { ASSERTV(CONFIG, !"Bad allocator config."); return; // RETURN } break; } ASSERT(X == *cCopyPtr); ASSERT((0 < LENGTH || emptyWillAlloc()) == oaMonitor.isTotalUp()); if ('a' == CONFIG) { // Sometimes don't do this, just so we test the case where // we destroy a non-empty object. emptyAndVerify(cCopyPtr, VALUES, LENGTH, L_); } fa.deleteObject(cCopyPtr); } emptyAndVerify(&mX, VALUES, LENGTH, L_); if (&oa != &da) { ASSERT(0 == da.numBlocksTotal()); } // Reclaim dynamically allocated object under test. fa.deleteObject(objPtr); } } } template <class CONTAINER> void TestDriver<CONTAINER>::testCase1_NoAlloc(int *testValues, size_t numValues) { // ------------------------------------------------------------------------ // BREATHING TEST // This case exercises (but does not fully test) basic functionality. // // Concerns: //: 1 The class is sufficiently functional to enable comprehensive //: testing in subsequent test cases. // // Plan: //: 1 Execute each method to verify functionality for simple case. // // Testing: // BREATHING TEST // ------------------------------------------------------------------------ // Sanity check. ASSERTV(0 < numValues); ASSERTV(8 > numValues); // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if (veryVerbose) { printf("Default construct an empty set.\n"); } { Obj x; const Obj& X = x; ASSERTV(0 == X.size()); ASSERTV(true == X.empty()); } // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if (veryVerbose) { printf("Test use of allocators.\n"); } { Obj o1; const Obj& O1 = o1; for (size_t i = 0; i < numValues; ++i) { o1.push(value_type(testValues[i])); } ASSERTV(numValues == O1.size()); Obj o2(O1); const Obj& O2 = o2; ASSERTV(numValues == O1.size()); ASSERTV(numValues == O2.size()); Obj o3; const Obj& O3 = o3; ASSERTV(numValues == O1.size()); ASSERTV(numValues == O2.size()); ASSERTV(0 == O3.size()); o1.swap(o3); ASSERTV(0 == O1.size()); ASSERTV(numValues == O2.size()); ASSERTV(numValues == O3.size()); ASSERTV(0 == O1.size()); ASSERTV(numValues == O2.size()); ASSERTV(numValues == O3.size()); } // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if (veryVerbose) { printf("Test primary manipulators/accessors on every permutation.\n"); } native_std::sort(testValues, testValues + numValues); do { // For each possible permutation of values, insert values, iterate over // the resulting container, find values, and then erase values. Obj x; const Obj& X = x; for (size_t i = 0; i < numValues; ++i) { Obj y(X); const Obj& Y = y; ASSERTV(X == Y); ASSERTV(!(X != Y)); // Test 'insert'. value_type value(testValues[i]); x.push(value); ASSERTV(testValues[i] == x.top()); // Test size, empty. ASSERTV(i + 1 == X.size()); ASSERTV(false == X.empty()); ASSERTV(X != Y); ASSERTV(!(X == Y)); y = x; ASSERTV(X == Y); ASSERTV(!(X != Y)); } ASSERTV(X.size() == numValues); for (int i = static_cast<int>(numValues) - 1; i >= 0; --i) { testValues[i] = X.top(); x.pop(); } ASSERTV(X.size() == 0); } while (native_std::next_permutation(testValues, testValues + numValues)); // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if (veryVerbose) { printf("Test class comparison operators.\n"); } { // Iterate over possible selections of elements to add to two // containers, 'X' and 'Y' then compare the results of the comparison // operators to the comparison between two containers equivalent to // the underlying containers in the stack objects. for (size_t i = 0; i < numValues; ++i) { for (size_t j = 0; j < numValues; ++j) { for (size_t length = 0; length < numValues; ++length) { for (size_t m = 0; m < j; ++m) { Obj x; const Obj& X = x; Obj y; const Obj& Y = y; CONTAINER xx; const CONTAINER& XX = xx; CONTAINER yy; const CONTAINER& YY = yy; for (size_t k = 0; k < j; ++k) { size_t xIndex = (i + length) % numValues; size_t yIndex = (j + length) % numValues; x.push( testValues[xIndex]); xx.push_back(testValues[xIndex]); if (k < m) { y.push( testValues[yIndex]); yy.push_back(testValues[yIndex]); } } ASSERTV((X == Y) == (XX == YY)); ASSERTV((X != Y) == (XX != YY)); ASSERTV((X < Y) == (XX < YY)); ASSERTV((X > Y) == (XX > YY)); ASSERTV((X <= Y) == (XX <= YY)); ASSERTV((X >= Y) == (XX >= YY)); ASSERTV((X == Y) == !(X != Y)); ASSERTV((X != Y) == !(X == Y)); ASSERTV((X < Y) == !(X >= Y)); ASSERTV((X > Y) == !(X <= Y)); ASSERTV((X <= Y) == !(X > Y)); ASSERTV((X >= Y) == !(X < Y)); ASSERTV((Y == X) == (YY == XX)); ASSERTV((Y != X) == (YY != XX)); ASSERTV((Y < X) == (YY < XX)); ASSERTV((Y > X) == (YY > XX)); ASSERTV((Y <= X) == (YY <= XX)); ASSERTV((Y >= X) == (YY >= XX)); ASSERTV((Y == X) == !(Y != X)); ASSERTV((Y != X) == !(Y == X)); ASSERTV((Y < X) == !(Y >= X)); ASSERTV((Y > X) == !(Y <= X)); ASSERTV((Y <= X) == !(Y > X)); ASSERTV((Y >= X) == !(Y < X)); } } } } } } template <class CONTAINER> void TestDriver<CONTAINER>::testCase1(int *testValues, size_t numValues) { // ------------------------------------------------------------------------ // BREATHING TEST // This case exercises (but does not fully test) basic functionality. // // Concerns: //: 1 The class is sufficiently functional to enable comprehensive //: testing in subsequent test cases. // // Plan: //: 1 Execute each method to verify functionality for simple case. // // Testing: // BREATHING TEST // ------------------------------------------------------------------------ bslma::TestAllocator defaultAllocator("defaultAllocator"); bslma::DefaultAllocatorGuard defaultGuard(&defaultAllocator); bslma::TestAllocator objectAllocator("objectAllocator"); // Sanity check. ASSERTV(0 < numValues); ASSERTV(8 > numValues); // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if (veryVerbose) { printf("Default construct an empty set.\n"); } { Obj x(&objectAllocator); const Obj& X = x; ASSERTV(0 == X.size()); ASSERTV(true == X.empty()); ASSERTV(0 == defaultAllocator.numBytesInUse()); ASSERTV(emptyWillAlloc() == (0 != objectAllocator.numBytesInUse())); } // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if (veryVerbose) { printf("Test use of allocators.\n"); } { bslma::TestAllocatorMonitor defaultMonitor(&defaultAllocator); bslma::TestAllocator objectAllocator1("objectAllocator1"); bslma::TestAllocator objectAllocator2("objectAllocator2"); Obj o1(&objectAllocator1); const Obj& O1 = o1; // ASSERTV(&objectAllocator1 == O1.get_allocator().mechanism()); for (size_t i = 0; i < numValues; ++i) { o1.push(value_type(testValues[i])); } ASSERTV(numValues == O1.size()); ASSERTV(0 < objectAllocator1.numBytesInUse()); ASSERTV(0 == objectAllocator2.numBytesInUse()); Obj o2(O1, &objectAllocator2); const Obj& O2 = o2; // ASSERTV(&objectAllocator2 == O2.get_allocator().mechanism()); ASSERTV(numValues == O1.size()); ASSERTV(numValues == O2.size()); ASSERTV(0 < objectAllocator1.numBytesInUse()); ASSERTV(0 < objectAllocator2.numBytesInUse()); Obj o3(&objectAllocator1); const Obj& O3 = o3; // ASSERTV(&objectAllocator1 == O3.get_allocator().mechanism()); bslma::TestAllocatorMonitor monitor1(&objectAllocator1); ASSERTV(numValues == O1.size()); ASSERTV(numValues == O2.size()); ASSERTV(0 == O3.size()); ASSERTV(monitor1.isInUseSame()); ASSERTV(monitor1.isTotalSame()); ASSERTV(0 < objectAllocator1.numBytesInUse()); ASSERTV(0 < objectAllocator2.numBytesInUse()); o1.swap(o3); ASSERTV(0 == O1.size()); ASSERTV(numValues == O2.size()); ASSERTV(numValues == O3.size()); ASSERTV(monitor1.isInUseSame()); ASSERTV(monitor1.isTotalSame()); ASSERTV(0 < objectAllocator1.numBytesInUse()); ASSERTV(0 < objectAllocator2.numBytesInUse()); o3.swap(o2); ASSERTV(0 == O1.size()); ASSERTV(numValues == O2.size()); ASSERTV(numValues == O3.size()); ASSERTV(!monitor1.isInUseUp()); // Memory usage may go down depending // on implementation ASSERTV(monitor1.isTotalUp()); ASSERTV(0 < objectAllocator1.numBytesInUse()); ASSERTV(0 < objectAllocator2.numBytesInUse()); // ASSERTV(&objectAllocator1 == O1.get_allocator().mechanism()); // ASSERTV(&objectAllocator2 == O2.get_allocator().mechanism()); // ASSERTV(&objectAllocator1 == O3.get_allocator().mechanism()); ASSERTV(! defaultMonitor.isTotalUp()); } // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if (veryVerbose) { printf("Test primary manipulators/accessors on every permutation.\n"); } native_std::sort(testValues, testValues + numValues); do { // For each possible permutation of values, insert values, iterate over // the resulting container, find values, and then erase values. bslma::TestAllocatorMonitor defaultMonitor(&defaultAllocator); Obj x(&objectAllocator); const Obj& X = x; for (size_t i = 0; i < numValues; ++i) { Obj y(X, &objectAllocator); const Obj& Y = y; ASSERTV(X == Y); ASSERTV(!(X != Y)); // Test 'insert'. value_type value(testValues[i]); x.push(value); ASSERTV(testValues[i] == x.top()); // Test size, empty. ASSERTV(i + 1 == X.size()); ASSERTV(false == X.empty()); ASSERTV(X != Y); ASSERTV(!(X == Y)); y = x; ASSERTV(X == Y); ASSERTV(!(X != Y)); } ASSERTV(X.size() == numValues); ASSERTV(0 != objectAllocator.numBytesInUse()); ASSERTV(0 == defaultAllocator.numBytesInUse()); for (int i = static_cast<int>(numValues) - 1; i >= 0; --i) { testValues[i] = (int) X.top(); x.pop(); } ASSERTV(X.size() == 0); ASSERTV(! defaultMonitor.isTotalUp()); } while (native_std::next_permutation(testValues, testValues + numValues)); // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if (veryVerbose) { printf("Test class comparison operators.\n"); } { // Iterate over possible selections of elements to add to two // containers, 'X' and 'Y' then compare the results of the comparison // operators to the comparison between two containers equivalent to // the underlying containers in the stack objects. for (size_t i = 0; i < numValues; ++i) { for (size_t j = 0; j < numValues; ++j) { for (size_t length = 0; length < numValues; ++length) { for (size_t m = 0; m < j; ++m) { Obj x(&objectAllocator); const Obj& X = x; Obj y(&objectAllocator); const Obj& Y = y; CONTAINER xx(&objectAllocator); const CONTAINER& XX = xx; CONTAINER yy(&objectAllocator); const CONTAINER& YY = yy; for (size_t k = 0; k < j; ++k) { size_t xIndex = (i + length) % numValues; size_t yIndex = (j + length) % numValues; x.push( testValues[xIndex]); xx.push_back(testValues[xIndex]); if (k < m) { y.push( testValues[yIndex]); yy.push_back(testValues[yIndex]); } } ASSERTV((X == Y) == (XX == YY)); ASSERTV((X != Y) == (XX != YY)); ASSERTV((X < Y) == (XX < YY)); ASSERTV((X > Y) == (XX > YY)); ASSERTV((X <= Y) == (XX <= YY)); ASSERTV((X >= Y) == (XX >= YY)); ASSERTV((X == Y) == !(X != Y)); ASSERTV((X != Y) == !(X == Y)); ASSERTV((X < Y) == !(X >= Y)); ASSERTV((X > Y) == !(X <= Y)); ASSERTV((X <= Y) == !(X > Y)); ASSERTV((X >= Y) == !(X < Y)); ASSERTV((Y == X) == (YY == XX)); ASSERTV((Y != X) == (YY != XX)); ASSERTV((Y < X) == (YY < XX)); ASSERTV((Y > X) == (YY > XX)); ASSERTV((Y <= X) == (YY <= XX)); ASSERTV((Y >= X) == (YY >= XX)); ASSERTV((Y == X) == !(Y != X)); ASSERTV((Y != X) == !(Y == X)); ASSERTV((Y < X) == !(Y >= X)); ASSERTV((Y > X) == !(Y <= X)); ASSERTV((Y <= X) == !(Y > X)); ASSERTV((Y >= X) == !(Y < X)); } } } } } } // ============================================================================ // MAIN PROGRAM // ---------------------------------------------------------------------------- bool intLessThan(int a, int b) { return a < b; } int main(int argc, char *argv[]) { int test = argc > 1 ? atoi(argv[1]) : 0; verbose = argc > 2; veryVerbose = argc > 3; veryVeryVerbose = argc > 4; veryVeryVeryVerbose = argc > 5; printf("TEST " __FILE__ " CASE %d\n", test); bslma::TestAllocator globalAllocator("global", veryVeryVeryVerbose); bslma::Default::setGlobalAllocator(&globalAllocator); bslma::TestAllocator defaultAllocator("default", veryVeryVeryVerbose); ASSERT(0 == bslma::Default::setDefaultAllocator(&defaultAllocator)); switch (test) { case 0: case 19: { // -------------------------------------------------------------------- // 'noexcept' SPECIFICATION // -------------------------------------------------------------------- if (verbose) printf("\n" "'noexcept' SPECIFICATION" "\n" "------------------------" "\n"); TestDriver<bsl::vector<int> >::testCase19(); } break; case 18: { // -------------------------------------------------------------------- // MOVE MANIPULATORS // -------------------------------------------------------------------- if (verbose) printf("\n" "MOVE MANIPULATORS" "\n" "-----------------" "\n"); // TestDriver< MovableVector<int> >::testCase18(true); // TestDriver<NonMovableVector<int> >::testCase18(false); typedef signed char SC; typedef size_t SZ; typedef bsltf::TemplateTestFacility::ObjectPtr TTF_OP; // typedef bsltf::TemplateTestFacility::MethodPtr TTF_MP; typedef bsltf::EnumeratedTestType::Enum ETT; typedef bsltf::SimpleTestType STT; typedef bsltf::AllocTestType ATT; typedef bsltf::BitwiseMoveableTestType BMTT; typedef bsltf::AllocBitwiseMoveableTestType ABMTT; typedef bsltf::NonTypicalOverloadsTestType NTOTT; TestDriver< MovableVector< int> >::testCase18(true ); TestDriver<NonMovableVector< int> >::testCase18(false); TestDriver< MovableVector< SC> >::testCase18(true ); TestDriver<NonMovableVector< SC> >::testCase18(false); TestDriver< MovableVector< SZ> >::testCase18(true ); TestDriver<NonMovableVector< SZ> >::testCase18(false); TestDriver< MovableVector<TTF_OP> >::testCase18(true ); TestDriver<NonMovableVector<TTF_OP> >::testCase18(false); // TestDriver< MovableVector<TTF_MP> >::testCase18(true ); // TestDriver<NonMovableVector<TTF_MP> >::testCase18(false); TestDriver< MovableVector< ETT> >::testCase18(true ); TestDriver<NonMovableVector< ETT> >::testCase18(false); TestDriver< MovableVector< STT> >::testCase18(true ); TestDriver<NonMovableVector< STT> >::testCase18(false); TestDriver< MovableVector< ATT> >::testCase18(true ); TestDriver<NonMovableVector< ATT> >::testCase18(false); TestDriver< MovableVector< BMTT> >::testCase18(true ); TestDriver<NonMovableVector< BMTT> >::testCase18(false); TestDriver< MovableVector< ABMTT> >::testCase18(true ); TestDriver<NonMovableVector< ABMTT> >::testCase18(false); TestDriver< MovableVector< NTOTT> >::testCase18(true ); TestDriver<NonMovableVector< NTOTT> >::testCase18(false); #ifndef BSLS_PLATFORM_OS_WINDOWS typedef bsltf::TemplateTestFacility::ObjectPtr TTF_FP; TestDriver< MovableVector<TTF_FP> >::testCase18(true ); TestDriver<NonMovableVector<TTF_FP> >::testCase18(false); #endif #if !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES if (verbose) printf("\n" "Move Only Type" "\n" "--------------" "\n"); // typedef bsltf::MoveOnlyAllocTestType MOATT; // TestDriver<MOATT, MovableVector<MOATT> >::testCase18MoveOnlyType(); #endif // !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES // 'propagate_on_container_move_assignment' testing // TBD enable this #if 0 RUN_EACH_TYPE(TestDriver, testCase18_propagate_on_container_move_assignment, TEST_TYPES_REGULAR(deque)); RUN_EACH_TYPE(TestDriver, testCase18_propagate_on_container_move_assignment, TEST_TYPES_MOVABLE(deque)); #endif } break; case 17: { // -------------------------------------------------------------------- // MOVE CONSTRUCTORS // -------------------------------------------------------------------- if (verbose) printf("\n" "MOVE CONSTRUCTORS" "\n" "-----------------" "\n"); // TestDriver< MovableVector<int> >::testCase17(true); // TestDriver<NonMovableVector<int> >::testCase17(false); typedef signed char SC; typedef size_t SZ; typedef bsltf::TemplateTestFacility::ObjectPtr TTF_OP; typedef bsltf::TemplateTestFacility::MethodPtr TTF_MP; typedef bsltf::EnumeratedTestType::Enum ETT; typedef bsltf::SimpleTestType STT; typedef bsltf::AllocTestType ATT; typedef bsltf::BitwiseMoveableTestType BMTT; typedef bsltf::AllocBitwiseMoveableTestType ABMTT; typedef bsltf::NonTypicalOverloadsTestType NTOTT; TestDriver< MovableVector< int> >::testCase17(true ); TestDriver<NonMovableVector< int> >::testCase17(false); TestDriver< MovableVector< SC> >::testCase17(true ); TestDriver<NonMovableVector< SC> >::testCase17(false); TestDriver< MovableVector< SZ> >::testCase17(true ); TestDriver<NonMovableVector< SZ> >::testCase17(false); TestDriver< MovableVector<TTF_OP> >::testCase17(true ); TestDriver<NonMovableVector<TTF_OP> >::testCase17(false); TestDriver< MovableVector<TTF_MP> >::testCase17(true ); TestDriver<NonMovableVector<TTF_MP> >::testCase17(false); TestDriver< MovableVector< ETT> >::testCase17(true ); TestDriver<NonMovableVector< ETT> >::testCase17(false); TestDriver< MovableVector< STT> >::testCase17(true ); TestDriver<NonMovableVector< STT> >::testCase17(false); TestDriver< MovableVector< ATT> >::testCase17(true ); TestDriver<NonMovableVector< ATT> >::testCase17(false); TestDriver< MovableVector< BMTT> >::testCase17(true ); TestDriver<NonMovableVector< BMTT> >::testCase17(false); TestDriver< MovableVector< ABMTT> >::testCase17(true ); TestDriver<NonMovableVector< ABMTT> >::testCase17(false); TestDriver< MovableVector< NTOTT> >::testCase17(true ); TestDriver<NonMovableVector< NTOTT> >::testCase17(false); #ifndef BSLS_PLATFORM_OS_WINDOWS typedef bsltf::TemplateTestFacility::ObjectPtr TTF_FP; TestDriver< MovableVector<TTF_FP> >::testCase17(true ); TestDriver<NonMovableVector<TTF_FP> >::testCase17(false); #endif #if !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES if (verbose) printf("\n" "Move Only Type" "\n" "--------------" "\n"); // typedef bsltf::MoveOnlyAllocTestType MOATT; // TestDriver< MovableVector<MOATT> >::testCase17MoveOnlyType(); #endif // !BSLS_COMPILERFEATURES_SIMULATE_CPP11_FEATURES } break; case 16: { // -------------------------------------------------------------------- // USAGE EXAMPLE // // Concern: // Demonstrate the use of the stack. // // Plan: // Create the class 'ToDoList', which implements a list of chores to // be done, using the 'stack' container adapter, and demonstrate the // use of 'ToDoList'. // -------------------------------------------------------------------- // Then, create an object of type 'ToDoList'. ToDoList toDoList; // Next, a few tasks are requested: toDoList.enqueueTask("Change the car's oil."); toDoList.enqueueTask("Pay the bills."); // Then, the husband watches the Yankee's game on TV. Upon returning // to the list he consults the list to see what task is up next: ASSERT(!strcmp("Pay the bills.", toDoList.currentTask())); // Next, he sees that he has to pay the bills. When the bills are // finished, he flushes that task from the list: ASSERT(false == toDoList.finishTask()); // Then, he consults the list for the next task. ASSERT(!strcmp("Change the car's oil.", toDoList.currentTask())); // Next, he sees he has to change the car's oil. Before he can get // started, another request comes: toDoList.enqueueTask("Get some hot dogs."); ASSERT(!strcmp("Get some hot dogs.", toDoList.currentTask())); // Then, he drives the car to the convenience store and picks up some // hot dogs and buns. Upon returning home, he gives the hot dogs to // his wife, updates the list, and consults it for the next task. ASSERT(false == toDoList.finishTask()); ASSERT(!strcmp("Change the car's oil.", toDoList.currentTask())); // Next, he finishes the oil change, updates the list, and consults it // for the next task. ASSERT(true == toDoList.finishTask()); ASSERT(!strcmp("<EMPTY>", toDoList.currentTask())); // Finally, the wife has been informed that everything is done, and she // makes another request: toDoList.enqueueTask("Clean the rain gutters."); } break; case 15: { // -------------------------------------------------------------------- // TESTING EMPTY, SIZE // // Concern: // That the 'empty()' and 'size()' accessors work according to their // specifications. // // Plan: // Manipulate a 'stack' object, and observe that 'empty()' and // 'size()' return the expected values. // -------------------------------------------------------------------- stack<int> mX; const stack<int>& X = mX; ASSERT(mX.empty()); ASSERT(X.empty()); ASSERT(0 == mX.size()); ASSERT(0 == X.size()); for (int i = 7; i < 22; ++i) { mX.push(i); ASSERT(! mX.empty()); ASSERT(! X.empty()); ASSERT(i - 6 == (int) mX.size()); ASSERT(i - 6 == (int) X.size()); ASSERT(i == X.top()); mX.top() = static_cast<int>(X.size()); // 'top()' returns a ref to // modifiable ASSERT((int) X.size() == X.top()); } for (size_t i = X.size(); i > 0; --i, mX.pop()) { ASSERT(! mX.empty()); ASSERT(! X.empty()); ASSERT(i == X.size()); ASSERT(X.top() == static_cast<int>(i)); } ASSERT(mX.empty()); ASSERT(X.empty()); ASSERT(0 == mX.size()); ASSERT(0 == X.size()); } break; case 14: { // -------------------------------------------------------------------- // TESTING NON ALLOCATOR SUPPORTING TYPE // -------------------------------------------------------------------- typedef stack<int, NonAllocCont<int> > IStack; IStack mX; const IStack& X = mX; ASSERT(X.empty()); mX.push(3); mX.push(4); mX.push(5); ASSERT(! X.empty()); ASSERT(3 == X.size()); ASSERT(5 == X.top()); IStack mY(X); const IStack& Y = mY; ASSERT(X == Y); ASSERT(!(X != Y)); ASSERT(X <= Y); ASSERT(!(X > Y)); ASSERT(X >= Y); ASSERT(!(X < Y)); mY.pop(); mY.push(6); ASSERT(X != Y); ASSERT(!(X == Y)); ASSERT(X < Y); ASSERT(!(X >= Y)); ASSERT(X <= Y); ASSERT(!(X > Y)); } break; case 13: { // -------------------------------------------------------------------- // TESTING CONTAINER OVERRIDE // -------------------------------------------------------------------- // Verify that a stack with no container specified is the same as one // we 'deque' specified. typedef stack<int> IStack; typedef stack<int, deque<int> > IDStack; BSLMF_ASSERT((bsl::is_same<IStack, IDStack>::value)); // Verify that if a container is specified, the first template // argument is ignored. typedef stack<void, vector<int> > VIVStack; typedef stack<double, vector<int> > DIVStack; BSLMF_ASSERT((bsl::is_same<VIVStack::value_type, int>::value)); BSLMF_ASSERT((bsl::is_same<DIVStack::value_type, int>::value)); VIVStack vivs; const VIVStack& VIVS = vivs; vivs.push(4); ASSERT(4 == VIVS.top()); vivs.push(7); ASSERT(7 == VIVS.top()); ASSERT(2 == VIVS.size()); ASSERT(!VIVS.empty()); vivs.pop(); ASSERT(4 == VIVS.top()); } break; case 12: { // -------------------------------------------------------------------- // TESTING INEQUALITY OPERATORS // -------------------------------------------------------------------- if (verbose) printf("\nTesting Inequality Operators\n" "\n============================\n"); if (verbose) printf("deque ---------------------------------------\n"); RUN_EACH_TYPE(TestDriver, testCase12, TEST_TYPES_INEQUAL_COMPARABLE(deque)); if (verbose) printf("vector --------------------------------------\n"); RUN_EACH_TYPE(TestDriver, testCase12, TEST_TYPES_INEQUAL_COMPARABLE(vector)); } break; case 11: { // -------------------------------------------------------------------- // TESTING TYPE TRAITS // -------------------------------------------------------------------- if (verbose) printf("\nTesting Type Traits\n" "\n===================\n"); // Verify the bslma-allocator trait is not defined for non // bslma-allocators. typedef bsltf::StdTestAllocator<bsltf::AllocTestType> StlAlloc; typedef bsltf::AllocTestType ATT; typedef deque< ATT, StlAlloc> WeirdAllocDeque; typedef vector<ATT, StlAlloc> WeirdAllocVector; typedef bsl::stack<ATT, WeirdAllocDeque > WeirdAllocDequeStack; typedef bsl::stack<ATT, WeirdAllocVector> WeirdAllocVectorStack; typedef bsl::stack<int, NonAllocCont<int> > NonAllocStack; if (verbose) printf("NonAllocCont --------------------------------\n"); BSLMF_ASSERT((0 == bslma::UsesBslmaAllocator< NonAllocCont<int> >::value)); BSLMF_ASSERT((0 == bslma::UsesBslmaAllocator< NonAllocStack>::value)); TestDriver<NonAllocCont<int> >::testCase11(); if (verbose) printf("deque ---------------------------------------\n"); BSLMF_ASSERT((0 == bslma::UsesBslmaAllocator< WeirdAllocDeque>::value)); BSLMF_ASSERT((0 == bslma::UsesBslmaAllocator< WeirdAllocDequeStack>::value)); RUN_EACH_TYPE(TestDriver, testCase11, TEST_TYPES_REGULAR(deque)); if (verbose) printf("vector --------------------------------------\n"); BSLMF_ASSERT((0 == bslma::UsesBslmaAllocator< WeirdAllocVector>::value)); BSLMF_ASSERT((0 == bslma::UsesBslmaAllocator< WeirdAllocVectorStack>::value)); RUN_EACH_TYPE(TestDriver, testCase11, TEST_TYPES_REGULAR(vector)); } break; case 10: { // -------------------------------------------------------------------- // TESTING STL ALLOCATOR // -------------------------------------------------------------------- if (verbose) printf("\nTesting STL ALLOCTOR\n" "\n====================\n"); if (verbose) printf("deque ---------------------------------------\n"); RUN_EACH_TYPE(TestDriver, testCase10,TEST_TYPES_REGULAR(deque)); if (verbose) printf("vector --------------------------------------\n"); RUN_EACH_TYPE(TestDriver, testCase10,TEST_TYPES_REGULAR(vector)); } break; case 9: { // -------------------------------------------------------------------- // ASSIGNMENT OPERATOR // -------------------------------------------------------------------- if (verbose) printf("\nTesting Assignment Operator" "\n===========================\n"); if (verbose) printf("deque ---------------------------------------\n"); RUN_EACH_TYPE(TestDriver, testCase9, TEST_TYPES_REGULAR(deque)); if (verbose) printf("vector --------------------------------------\n"); RUN_EACH_TYPE(TestDriver, testCase9, TEST_TYPES_REGULAR(vector)); // 'propagate_on_container_copy_assignment' testing // TBD enable this #if 0 RUN_EACH_TYPE(TestDriver, testCase9_propagate_on_container_copy_assignment, TEST_TYPES_REGULAR(deque)); RUN_EACH_TYPE(TestDriver, testCase9_propagate_on_container_copy_assignment, TEST_TYPES_MOVABLE(deque)); #endif } break; case 8: { // -------------------------------------------------------------------- // MANIPULATOR AND FREE FUNCTION 'swap' // -------------------------------------------------------------------- if (verbose) printf("\nMANIPULATOR AND FREE FUNCTION 'swap'" "\n====================================\n"); if (verbose) printf("deque ---------------------------------------\n"); RUN_EACH_TYPE(TestDriver, testCase8, TEST_TYPES_REGULAR(deque)); if (verbose) printf("vector --------------------------------------\n"); RUN_EACH_TYPE(TestDriver, testCase8, TEST_TYPES_REGULAR(vector)); // 'propagate_on_container_swap' testing // TBD enable this #if 0 RUN_EACH_TYPE(TestDriver, testCase8_propagate_on_container_swap, TEST_TYPES_REGULAR(deque)); RUN_EACH_TYPE(TestDriver, testCase8_propagate_on_container_swap, TEST_TYPES_MOVABLE(deque)); #endif } break; case 7: { // -------------------------------------------------------------------- // COPY CONSTRUCTOR // -------------------------------------------------------------------- if (verbose) printf("\nTesting Copy Constructors" "\n=========================\n"); if (verbose) printf("deque ---------------------------------------\n"); RUN_EACH_TYPE(TestDriver, testCase7, TEST_TYPES_REGULAR(deque)); if (verbose) printf("vector --------------------------------------\n"); RUN_EACH_TYPE(TestDriver, testCase7, TEST_TYPES_REGULAR(vector)); // 'select_on_container_copy_construction' testing if (verbose) printf("\nCOPY CONSTRUCTOR: ALLOCATOR PROPAGATION" "\n=======================================\n"); // TBD enable this #if 0 RUN_EACH_TYPE(TestDriver, testCase7_select_on_container_copy_construction, TEST_TYPES_REGULAR(deque)); RUN_EACH_TYPE(TestDriver, testCase7_select_on_container_copy_construction, TEST_TYPES_MOVABLE(deque)); #endif } break; case 6: { // -------------------------------------------------------------------- // EQUALITY OPERATORS // -------------------------------------------------------------------- if (verbose) printf("\nTesting Equality Operators" "\n==========================\n"); if (verbose) printf("deque ---------------------------------------\n"); RUN_EACH_TYPE(TestDriver, testCase6, TEST_TYPES_REGULAR(deque)); if (verbose) printf("vector --------------------------------------\n"); RUN_EACH_TYPE(TestDriver, testCase6, TEST_TYPES_REGULAR(vector)); } break; case 5: { // -------------------------------------------------------------------- // TESTING OUTPUT (<<) OPERATOR // -------------------------------------------------------------------- if (verbose) printf("\nTesting Output (<<) Operator" "\n============================\n"); if (verbose) printf("There is no output operator for this component.\n"); } break; case 4: { // -------------------------------------------------------------------- // BASIC ACCESSORS // -------------------------------------------------------------------- if (verbose) printf("\nTesting Basic Accessors" "\n=======================\n"); if (verbose) printf("deque ---------------------------------------\n"); RUN_EACH_TYPE(TestDriver, testCase4, TEST_TYPES_REGULAR(deque)); if (verbose) printf("vector --------------------------------------\n"); RUN_EACH_TYPE(TestDriver, testCase4, TEST_TYPES_REGULAR(vector)); } break; case 3: { // -------------------------------------------------------------------- // GENERATOR FUNCTIONS 'gg' and 'ggg' // -------------------------------------------------------------------- if (verbose) printf("\nTesting 'gg'" "\n============\n"); if (verbose) printf("deque ---------------------------------------\n"); RUN_EACH_TYPE(TestDriver, testCase3, TEST_TYPES_REGULAR(deque)); if (verbose) printf("vector --------------------------------------\n"); RUN_EACH_TYPE(TestDriver, testCase3, TEST_TYPES_REGULAR(vector)); } break; case 2: { // -------------------------------------------------------------------- // PRIMARY MANIPULATORS // -------------------------------------------------------------------- if (verbose) printf("\nTesting C'tors and Primary Manipulators\n" "=======================================\n"); if (verbose) printf("deque ---------------------------------------\n"); RUN_EACH_TYPE(TestDriver, testCase2, TEST_TYPES_REGULAR(deque)); if (verbose) printf("vector --------------------------------------\n"); RUN_EACH_TYPE(TestDriver, testCase2, TEST_TYPES_REGULAR(vector)); if (verbose) printf("NonAllocCont --------------------------------\n"); // RUN_EACH_TYPE(TestDriver, testCase2, TEST_TYPES_REGULAR(NonAllocCont)); } break; case 1: { // -------------------------------------------------------------------- // BREATHING TEST // This case exercises (but does not fully test) basic functionality. // // Concerns: //: 1 The class is sufficiently functional to enable comprehensive //: testing in subsequent test cases. // // Plan: //: 1 Run each method with arbitrary inputs and verify the behavior is //: as expected. // // Testing: // BREATHING TEST // -------------------------------------------------------------------- if (verbose) printf("\nBREATHING TEST" "\n==============\n"); { int INT_VALUES[] = { INT_MIN, -2, -1, 0, 1, 2, INT_MAX }; int NUM_INT_VALUES = sizeof(INT_VALUES) / sizeof(*INT_VALUES); if (verbose) printf("deque:\n"); TestDriver<bsl::deque<int> >::testCase1(INT_VALUES, NUM_INT_VALUES); if (verbose) printf("vector:\n"); TestDriver<bsl::vector<int> >::testCase1(INT_VALUES, NUM_INT_VALUES); if (verbose) printf("deque<double>:\n"); TestDriver<bsl::deque<double> >::testCase1(INT_VALUES, NUM_INT_VALUES); if (verbose) printf("NonAllocCont<int>:\n"); TestDriver<NonAllocCont<int> >::testCase1_NoAlloc(INT_VALUES, NUM_INT_VALUES); #if 0 // add once 'list' is in bslstl, add it if (verbose) printf("list:\n"); TestDriver<bsl::list<int> >::testCase1(INT_VALUES, NUM_INT_VALUES); #endif } } break; default: { fprintf(stderr, "WARNING: CASE `%d' NOT FOUND.\n", test); testStatus = -1; } } // CONCERN: In no case does memory come from the global allocator. ASSERTV(globalAllocator.numBlocksTotal(), 0 == globalAllocator.numBlocksTotal()); if (testStatus > 0) { fprintf(stderr, "Error, non-zero test status = %d.\n", testStatus); } return testStatus; } // ---------------------------------------------------------------------------- // Copyright 2013 Bloomberg Finance L.P. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // ----------------------------- END-OF-FILE ----------------------------------
265,388
82,950
// -*- c++ -*- // // michael a.g. aïvázis <michael.aivazis@para-sim.com> // (c) 1998-2021 all rights reserved // support #include <cassert> // get the grid #include <pyre/grid.h> // type alias using product_t = pyre::grid::product_t<4>; // exercise iterating over products int main(int argc, char * argv[]) { // initialize the journal pyre::journal::init(argc, argv); pyre::journal::application("product_iteration"); // make a channel pyre::journal::debug_t channel("pyre.grid.product"); // make one product_t p {0, 1, 2, 3}; // show me channel << "product before: " << p << pyre::journal::endl(__HERE__); // fill for (auto & f : p) { // with a specific value f = 42; } // show me channel << "product after: " << p << pyre::journal::endl(__HERE__); // check for (const auto f : p) { // that we get what we expect assert(( f == 42 )); } // all done return 0; } // end of file
1,025
378
/*========================================================================= Library: iMSTK Copyright (c) Kitware, Inc. & Center for Modeling, Simulation, & Imaging in Medicine, Rensselaer Polytechnic Institute. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.txt Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =========================================================================*/ #include "imstkQuadricDecimate.h" #include "imstkSurfaceMesh.h" #include "imstkLogger.h" #include "imstkGeometryUtilities.h" #include <vtkQuadricDecimation.h> namespace imstk { QuadricDecimate::QuadricDecimate() : m_VolumePreserving(true), m_TargetReduction(0.6) { setRequiredInputType<SurfaceMesh>(0); setNumInputPorts(1); setNumOutputPorts(1); setOutput(std::make_shared<SurfaceMesh>()); } void QuadricDecimate::setInputMesh(std::shared_ptr<SurfaceMesh> inputMesh) { setInput(inputMesh, 0); } void QuadricDecimate::requestUpdate() { std::shared_ptr<SurfaceMesh> inputMesh = std::dynamic_pointer_cast<SurfaceMesh>(getInput(0)); if (inputMesh == nullptr) { LOG(WARNING) << "No inputMesh to clean"; return; } vtkSmartPointer<vtkPolyData> inputMeshVtk = GeometryUtils::copyToVtkPolyData(std::dynamic_pointer_cast<SurfaceMesh>(inputMesh)); vtkNew<vtkQuadricDecimation> filter; filter->SetInputData(inputMeshVtk); filter->SetVolumePreservation(m_VolumePreserving); filter->SetTargetReduction(m_TargetReduction); filter->Update(); setOutput(GeometryUtils::copyToSurfaceMesh(filter->GetOutput())); } } // namespace imstk
2,060
670
#include "common/http/codec_client.h" #include <cstdint> #include <memory> #include "envoy/http/codec.h" #include "common/common/enum_to_int.h" #include "common/config/utility.h" #include "common/http/exception.h" #include "common/http/http1/codec_impl.h" #include "common/http/http2/codec_impl.h" #include "common/http/status.h" #include "common/http/utility.h" #ifdef ENVOY_ENABLE_QUIC #include "common/quic/codec_impl.h" #endif namespace Envoy { namespace Http { CodecClient::CodecClient(Type type, Network::ClientConnectionPtr&& connection, Upstream::HostDescriptionConstSharedPtr host, Event::Dispatcher& dispatcher) : type_(type), host_(host), connection_(std::move(connection)), idle_timeout_(host_->cluster().idleTimeout()) { if (type_ != Type::HTTP3) { // Make sure upstream connections process data and then the FIN, rather than processing // TCP disconnects immediately. (see https://github.com/envoyproxy/envoy/issues/1679 for // details) connection_->detectEarlyCloseWhenReadDisabled(false); } connection_->addConnectionCallbacks(*this); connection_->addReadFilter(Network::ReadFilterSharedPtr{new CodecReadFilter(*this)}); if (idle_timeout_) { idle_timer_ = dispatcher.createTimer([this]() -> void { onIdleTimeout(); }); enableIdleTimer(); } // We just universally set no delay on connections. Theoretically we might at some point want // to make this configurable. connection_->noDelay(true); } CodecClient::~CodecClient() { ASSERT(connect_called_, "CodecClient::connect() is not called through out the life time."); } void CodecClient::connect() { connect_called_ = true; ASSERT(codec_ != nullptr); // In general, codecs are handed new not-yet-connected connections, but in the // case of ALPN, the codec may be handed an already connected connection. if (!connection_->connecting()) { ASSERT(connection_->state() == Network::Connection::State::Open); connected_ = true; } else { ENVOY_CONN_LOG(debug, "connecting", *connection_); connection_->connect(); } } void CodecClient::close() { connection_->close(Network::ConnectionCloseType::NoFlush); } void CodecClient::deleteRequest(ActiveRequest& request) { connection_->dispatcher().deferredDelete(request.removeFromList(active_requests_)); if (codec_client_callbacks_) { codec_client_callbacks_->onStreamDestroy(); } if (numActiveRequests() == 0) { enableIdleTimer(); } } RequestEncoder& CodecClient::newStream(ResponseDecoder& response_decoder) { ActiveRequestPtr request(new ActiveRequest(*this, response_decoder)); request->encoder_ = &codec_->newStream(*request); request->encoder_->getStream().addCallbacks(*request); LinkedList::moveIntoList(std::move(request), active_requests_); disableIdleTimer(); return *active_requests_.front()->encoder_; } void CodecClient::onEvent(Network::ConnectionEvent event) { if (event == Network::ConnectionEvent::Connected) { ENVOY_CONN_LOG(debug, "connected", *connection_); connection_->streamInfo().setDownstreamSslConnection(connection_->ssl()); connected_ = true; } if (event == Network::ConnectionEvent::RemoteClose) { remote_closed_ = true; } // HTTP/1 can signal end of response by disconnecting. We need to handle that case. if (type_ == Type::HTTP1 && event == Network::ConnectionEvent::RemoteClose && !active_requests_.empty()) { Buffer::OwnedImpl empty; onData(empty); } if (event == Network::ConnectionEvent::RemoteClose || event == Network::ConnectionEvent::LocalClose) { ENVOY_CONN_LOG(debug, "disconnect. resetting {} pending requests", *connection_, active_requests_.size()); disableIdleTimer(); idle_timer_.reset(); StreamResetReason reason = StreamResetReason::ConnectionFailure; if (connected_) { reason = StreamResetReason::ConnectionTermination; if (protocol_error_) { if (Runtime::runtimeFeatureEnabled( "envoy.reloadable_features.return_502_for_upstream_protocol_errors")) { reason = StreamResetReason::ProtocolError; connection_->streamInfo().setResponseFlag( StreamInfo::ResponseFlag::UpstreamProtocolError); } } } while (!active_requests_.empty()) { // Fake resetting all active streams so that reset() callbacks get invoked. active_requests_.front()->encoder_->getStream().resetStream(reason); } } } void CodecClient::responsePreDecodeComplete(ActiveRequest& request) { ENVOY_CONN_LOG(debug, "response complete", *connection_); if (codec_client_callbacks_) { codec_client_callbacks_->onStreamPreDecodeComplete(); } deleteRequest(request); // HTTP/2 can send us a reset after a complete response if the request was not complete. Users // of CodecClient will deal with the premature response case and we should not handle any // further reset notification. request.encoder_->getStream().removeCallbacks(request); } void CodecClient::onReset(ActiveRequest& request, StreamResetReason reason) { ENVOY_CONN_LOG(debug, "request reset", *connection_); if (codec_client_callbacks_) { codec_client_callbacks_->onStreamReset(reason); } deleteRequest(request); } void CodecClient::onData(Buffer::Instance& data) { const Status status = codec_->dispatch(data); if (!status.ok()) { ENVOY_CONN_LOG(debug, "Error dispatching received data: {}", *connection_, status.message()); // Don't count 408 responses where we have no active requests as protocol errors if (!isPrematureResponseError(status) || (!active_requests_.empty() || getPrematureResponseHttpCode(status) != Code::RequestTimeout)) { host_->cluster().stats().upstream_cx_protocol_error_.inc(); protocol_error_ = true; } close(); } // All data should be consumed at this point if the connection remains open. ASSERT(data.length() == 0 || connection_->state() != Network::Connection::State::Open, absl::StrCat("extraneous bytes after response complete: ", data.length())); } CodecClientProd::CodecClientProd(Type type, Network::ClientConnectionPtr&& connection, Upstream::HostDescriptionConstSharedPtr host, Event::Dispatcher& dispatcher, Random::RandomGenerator& random_generator) : CodecClient(type, std::move(connection), host, dispatcher) { switch (type) { case Type::HTTP1: { codec_ = std::make_unique<Http1::ClientConnectionImpl>( *connection_, host->cluster().http1CodecStats(), *this, host->cluster().http1Settings(), host->cluster().maxResponseHeadersCount()); break; } case Type::HTTP2: { codec_ = std::make_unique<Http2::ClientConnectionImpl>( *connection_, *this, host->cluster().http2CodecStats(), random_generator, host->cluster().http2Options(), Http::DEFAULT_MAX_REQUEST_HEADERS_KB, host->cluster().maxResponseHeadersCount(), Http2::ProdNghttp2SessionFactory::get()); break; } case Type::HTTP3: { #ifdef ENVOY_ENABLE_QUIC auto& quic_session = dynamic_cast<Quic::EnvoyQuicClientSession&>(*connection_); codec_ = std::make_unique<Quic::QuicHttpClientConnectionImpl>( quic_session, *this, host->cluster().http3CodecStats(), host->cluster().http3Options(), Http::DEFAULT_MAX_REQUEST_HEADERS_KB, host->cluster().maxResponseHeadersCount()); // Initialize the session after max request header size is changed in above http client // connection creation. quic_session.Initialize(); break; #else // Should be blocked by configuration checking at an earlier point. NOT_REACHED_GCOVR_EXCL_LINE; #endif } } connect(); } } // namespace Http } // namespace Envoy
7,859
2,340
/* BugEngine <bugengine.devel@gmail.com> see LICENSE for detail */ #ifndef BE_3D_SHADER_SHADER_SCRIPT_HH_ #define BE_3D_SHADER_SHADER_SCRIPT_HH_ /**************************************************************************************************/ #include <bugengine/plugin.graphics.3d/stdafx.h> #include <bugengine/resource/description.script.hh> namespace BugEngine { namespace Shaders { class Node; class IShaderBuilder; class Output; enum Stage { VertexStage, GeometryStage, TesselationControlStage, TessalationEvaluationStage, FragmentStage }; } // namespace Shaders class be_api(3D) ShaderProgramDescription : public Resource::Description { BE_NOCOPY(ShaderProgramDescription); private: minitl::vector< ref< Shaders::Output > > m_outputs; protected: ShaderProgramDescription(minitl::vector< ref< Shaders::Output > > outputs); ~ShaderProgramDescription(); public: virtual void buildSource(Shaders::IShaderBuilder & builder) const; }; } // namespace BugEngine /**************************************************************************************************/ #endif
1,173
361
/*! @file Defines `boost::hana::tuple`. @copyright Louis Dionne 2013-2017 @copyright Jason Rice 2017 Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt) */ #ifndef BOOST_HANA_TUPLE_HPP #define BOOST_HANA_TUPLE_HPP #include <boost/hana/fwd/tuple.hpp> #include <boost/hana/basic_tuple.hpp> #include <boost/hana/bool.hpp> #include <boost/hana/config.hpp> #include <boost/hana/detail/decay.hpp> #include <boost/hana/detail/fast_and.hpp> #include <boost/hana/detail/index_if.hpp> #include <boost/hana/detail/intrinsics.hpp> #include <boost/hana/detail/operators/adl.hpp> #include <boost/hana/detail/operators/comparable.hpp> #include <boost/hana/detail/operators/iterable.hpp> #include <boost/hana/detail/operators/monad.hpp> #include <boost/hana/detail/operators/orderable.hpp> #include <boost/hana/fwd/at.hpp> #include <boost/hana/fwd/core/make.hpp> #include <boost/hana/fwd/drop_front.hpp> #include <boost/hana/fwd/index_if.hpp> #include <boost/hana/fwd/is_empty.hpp> #include <boost/hana/fwd/length.hpp> #include <boost/hana/fwd/optional.hpp> #include <boost/hana/fwd/unpack.hpp> #include <boost/hana/type.hpp> // required by fwd decl of tuple_t #include <cstddef> #include <type_traits> #include <utility> BOOST_HANA_NAMESPACE_BEGIN namespace detail { template <typename Xs, typename Ys, std::size_t ...n> constexpr void assign(Xs& xs, Ys&& ys, std::index_sequence<n...>) { int sequence[] = {int{}, ((void)( hana::at_c<n>(xs) = hana::at_c<n>(static_cast<Ys&&>(ys)) ), int{})...}; (void)sequence; } struct from_index_sequence_t { }; template <typename Tuple, typename ...Yn> struct is_same_tuple : std::false_type { }; template <typename Tuple> struct is_same_tuple<typename detail::decay<Tuple>::type, Tuple> : std::true_type { }; template <bool SameTuple, bool SameNumberOfElements, typename Tuple, typename ...Yn> struct enable_tuple_variadic_ctor; template <typename ...Xn, typename ...Yn> struct enable_tuple_variadic_ctor<false, true, hana::tuple<Xn...>, Yn...> : std::enable_if< detail::fast_and<BOOST_HANA_TT_IS_CONSTRUCTIBLE(Xn, Yn&&)...>::value > { }; } ////////////////////////////////////////////////////////////////////////// // tuple ////////////////////////////////////////////////////////////////////////// template <> struct tuple<> : detail::operators::adl<tuple<>> , detail::iterable_operators<tuple<>> { constexpr tuple() { } using hana_tag = tuple_tag; }; template <typename ...Xn> struct tuple : detail::operators::adl<tuple<Xn...>> , detail::iterable_operators<tuple<Xn...>> { basic_tuple<Xn...> storage_; using hana_tag = tuple_tag; private: template <typename Other, std::size_t ...n> explicit constexpr tuple(detail::from_index_sequence_t, std::index_sequence<n...>, Other&& other) : storage_(hana::at_c<n>(static_cast<Other&&>(other))...) { } public: template <typename ...dummy, typename = typename std::enable_if< detail::fast_and<BOOST_HANA_TT_IS_CONSTRUCTIBLE(Xn, dummy...)...>::value >::type> constexpr tuple() : storage_() { } template <typename ...dummy, typename = typename std::enable_if< detail::fast_and<BOOST_HANA_TT_IS_CONSTRUCTIBLE(Xn, Xn const&, dummy...)...>::value >::type> constexpr tuple(Xn const& ...xn) : storage_(xn...) { } template <typename ...Yn, typename = typename detail::enable_tuple_variadic_ctor< detail::is_same_tuple<tuple, Yn...>::value, sizeof...(Xn) == sizeof...(Yn), tuple, Yn... >::type> constexpr tuple(Yn&& ...yn) : storage_(static_cast<Yn&&>(yn)...) { } template <typename ...Yn, typename = typename std::enable_if< detail::fast_and<BOOST_HANA_TT_IS_CONSTRUCTIBLE(Xn, Yn const&)...>::value >::type> constexpr tuple(tuple<Yn...> const& other) : tuple(detail::from_index_sequence_t{}, std::make_index_sequence<sizeof...(Xn)>{}, other.storage_) { } template <typename ...Yn, typename = typename std::enable_if< detail::fast_and<BOOST_HANA_TT_IS_CONSTRUCTIBLE(Xn, Yn&&)...>::value >::type> constexpr tuple(tuple<Yn...>&& other) : tuple(detail::from_index_sequence_t{}, std::make_index_sequence<sizeof...(Xn)>{}, static_cast<tuple<Yn...>&&>(other).storage_) { } // The three following constructors are required to make sure that // the tuple(Yn&&...) constructor is _not_ preferred over the copy // constructor for unary tuples containing a type that is constructible // from tuple<...>. See test/tuple/cnstr.trap.cpp template <typename ...dummy, typename = typename std::enable_if< detail::fast_and<BOOST_HANA_TT_IS_CONSTRUCTIBLE(Xn, Xn const&, dummy...)...>::value >::type> constexpr tuple(tuple const& other) : tuple(detail::from_index_sequence_t{}, std::make_index_sequence<sizeof...(Xn)>{}, other.storage_) { } template <typename ...dummy, typename = typename std::enable_if< detail::fast_and<BOOST_HANA_TT_IS_CONSTRUCTIBLE(Xn, Xn const&, dummy...)...>::value >::type> constexpr tuple(tuple& other) : tuple(const_cast<tuple const&>(other)) { } template <typename ...dummy, typename = typename std::enable_if< detail::fast_and<BOOST_HANA_TT_IS_CONSTRUCTIBLE(Xn, Xn&&, dummy...)...>::value >::type> constexpr tuple(tuple&& other) : tuple(detail::from_index_sequence_t{}, std::make_index_sequence<sizeof...(Xn)>{}, static_cast<tuple&&>(other).storage_) { } template <typename ...Yn, typename = typename std::enable_if< detail::fast_and<BOOST_HANA_TT_IS_ASSIGNABLE(Xn&, Yn const&)...>::value >::type> constexpr tuple& operator=(tuple<Yn...> const& other) { detail::assign(this->storage_, other.storage_, std::make_index_sequence<sizeof...(Xn)>{}); return *this; } template <typename ...Yn, typename = typename std::enable_if< detail::fast_and<BOOST_HANA_TT_IS_ASSIGNABLE(Xn&, Yn&&)...>::value >::type> constexpr tuple& operator=(tuple<Yn...>&& other) { detail::assign(this->storage_, static_cast<tuple<Yn...>&&>(other).storage_, std::make_index_sequence<sizeof...(Xn)>{}); return *this; } }; ////////////////////////////////////////////////////////////////////////// // Operators ////////////////////////////////////////////////////////////////////////// namespace detail { template <> struct comparable_operators<tuple_tag> { static constexpr bool value = true; }; template <> struct orderable_operators<tuple_tag> { static constexpr bool value = true; }; template <> struct monad_operators<tuple_tag> { static constexpr bool value = true; }; } ////////////////////////////////////////////////////////////////////////// // Foldable ////////////////////////////////////////////////////////////////////////// template <> struct unpack_impl<tuple_tag> { template <typename F> static constexpr decltype(auto) apply(tuple<>&&, F&& f) { return static_cast<F&&>(f)(); } template <typename F> static constexpr decltype(auto) apply(tuple<>&, F&& f) { return static_cast<F&&>(f)(); } template <typename F> static constexpr decltype(auto) apply(tuple<> const&, F&& f) { return static_cast<F&&>(f)(); } template <typename Xs, typename F> static constexpr decltype(auto) apply(Xs&& xs, F&& f) { return hana::unpack(static_cast<Xs&&>(xs).storage_, static_cast<F&&>(f)); } }; template <> struct length_impl<tuple_tag> { template <typename ...Xs> static constexpr auto apply(tuple<Xs...> const&) { return hana::size_c<sizeof...(Xs)>; } }; ////////////////////////////////////////////////////////////////////////// // Iterable ////////////////////////////////////////////////////////////////////////// template <> struct at_impl<tuple_tag> { template <typename Xs, typename N> static constexpr decltype(auto) apply(Xs&& xs, N const&) { constexpr std::size_t index = N::value; return hana::at_c<index>(static_cast<Xs&&>(xs).storage_); } }; template <> struct drop_front_impl<tuple_tag> { template <std::size_t N, typename Xs, std::size_t ...i> static constexpr auto helper(Xs&& xs, std::index_sequence<i...>) { return hana::make<tuple_tag>(hana::at_c<i+N>(static_cast<Xs&&>(xs))...); } template <typename Xs, typename N> static constexpr auto apply(Xs&& xs, N const&) { constexpr std::size_t len = decltype(hana::length(xs))::value; return helper<N::value>(static_cast<Xs&&>(xs), std::make_index_sequence< N::value < len ? len - N::value : 0 >{}); } }; template <> struct is_empty_impl<tuple_tag> { template <typename ...Xs> static constexpr auto apply(tuple<Xs...> const&) { return hana::bool_c<sizeof...(Xs) == 0>; } }; // compile-time optimizations (to reduce the # of function instantiations) template <std::size_t n, typename ...Xs> constexpr decltype(auto) at_c(tuple<Xs...> const& xs) { return hana::at_c<n>(xs.storage_); } template <std::size_t n, typename ...Xs> constexpr decltype(auto) at_c(tuple<Xs...>& xs) { return hana::at_c<n>(xs.storage_); } template <std::size_t n, typename ...Xs> constexpr decltype(auto) at_c(tuple<Xs...>&& xs) { return hana::at_c<n>(static_cast<tuple<Xs...>&&>(xs).storage_); } template <> struct index_if_impl<tuple_tag> { template <typename ...Xs, typename Pred> static constexpr auto apply(tuple<Xs...> const&, Pred const&) -> typename detail::index_if<Pred, Xs...>::type { return {}; } }; ////////////////////////////////////////////////////////////////////////// // Sequence ////////////////////////////////////////////////////////////////////////// template <> struct Sequence<tuple_tag> { static constexpr bool value = true; }; template <> struct make_impl<tuple_tag> { template <typename ...Xs> static constexpr tuple<typename detail::decay<Xs>::type...> apply(Xs&& ...xs) { return {static_cast<Xs&&>(xs)...}; } }; BOOST_HANA_NAMESPACE_END #endif // !BOOST_HANA_TUPLE_HPP
11,427
3,637
#include "RE/BSShader/BSShaderMaterial/BSLightingShaderMaterialBase/BSLightingShaderMaterialLandscape.h" namespace RE { BSLightingShaderMaterialLandscape* BSLightingShaderMaterialLandscape::CreateMaterial() { auto material = malloc<BSLightingShaderMaterialLandscape>(); material->ctor(); return material; } BSLightingShaderMaterialLandscape* BSLightingShaderMaterialLandscape::ctor() { using func_t = decltype(&BSLightingShaderMaterialLandscape::ctor); REL::Relocation<func_t> func{ Offset::BSLightingShaderMaterialLandscape::Ctor }; return func(this); } }
578
204
// Copyright (c) 2020 John Pursey // // Use of this source code is governed by an MIT-style License that can be found // in the LICENSE file or at https://opensource.org/licenses/MIT. #include "gb/base/flags.h" #include "gtest/gtest.h" namespace gb { namespace { enum BasicEnum { kBasicEnum_Zero, kBasicEnum_One, kBasicEnum_Two, kBasicEnum_Three, kBasicEnum_Big = 63, }; enum SizedEnum : int8_t { kSizedEnum_Zero, kSizedEnum_One, kSizedEnum_Two, kSizedEnum_Three, kSizedEnum_Big = 63, }; enum class ClassEnum : int8_t { kZero, kOne, kTwo, kThree, kBig = 63, }; // Helpers to test parameter passing and implicit conversion. Flags<BasicEnum> BasicIdentity(Flags<BasicEnum> flags) { return flags; } Flags<SizedEnum> SizedIdentity(Flags<SizedEnum> flags) { return flags; } Flags<ClassEnum> ClassIdentity(Flags<ClassEnum> flags) { return flags; } // Static assert is used to ensure constexpr-ness. static_assert(Flags<BasicEnum>().IsEmpty(), "BasicEnum default flags not empty"); static_assert(Flags<BasicEnum>().GetMask() == 0, "BasicEnum default mask is not zero"); static_assert(Flags<BasicEnum>(1).GetMask() == 1, "BasicEnum 1 is not 1"); static_assert(Flags<BasicEnum>(kBasicEnum_Zero).GetMask() == 1, "BasicEnum Zero is not 1"); static_assert(Flags<BasicEnum>(kBasicEnum_Big).GetMask() == 1ULL << 63, "BasicEnum Big is not 1 << 63"); static_assert(Flags<BasicEnum>(kBasicEnum_Zero).IsSet(kBasicEnum_Zero), "BasicEnum Zero does not have Zero set"); static_assert(!Flags<BasicEnum>(kBasicEnum_Zero).IsSet(kBasicEnum_One), "BasicEnum Zero has One set"); static_assert(Flags<BasicEnum>({}).GetMask() == 0, "BasicEnum {} is not 0"); static_assert(Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}).GetMask() == 3, "BasicEnum {Zero,One} is not 3"); static_assert( Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}).IsSet(kBasicEnum_One), "BasicEnum {Zero,One} does not have One set"); static_assert(Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One, kBasicEnum_Two}) .IsSet({kBasicEnum_Zero, kBasicEnum_Two}), "BasicEnum {Zero,One,Two} does not have {Zero,Two} set"); static_assert(!Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}) .IsSet({kBasicEnum_Zero, kBasicEnum_Two}), "BasicEnum {Zero,One} does have {Zero,Two} set"); static_assert(Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}) .Intersects({kBasicEnum_Zero, kBasicEnum_Two}), "BasicEnum {Zero,One} does not intersect {Zero,Two}"); static_assert(!Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}) .Intersects({kBasicEnum_Two, kBasicEnum_Three}), "BasicEnum {Zero,One} intersects {Two, Three}"); static_assert(Flags<BasicEnum>(kBasicEnum_Zero) == Flags<BasicEnum>(kBasicEnum_Zero), "BasicEnum Zero is not equal to Zero"); static_assert(!(Flags<BasicEnum>(kBasicEnum_Zero) == Flags<BasicEnum>(kBasicEnum_One)), "BasicEnum Zero is equal to One"); static_assert(Flags<BasicEnum>(kBasicEnum_Zero) != Flags<BasicEnum>(kBasicEnum_One), "BasicEnum Zero is equal to One"); static_assert(!(Flags<BasicEnum>(kBasicEnum_Zero) != Flags<BasicEnum>(kBasicEnum_Zero)), "BasicEnum Zero is not equal to Zero"); static_assert(Flags<BasicEnum>(kBasicEnum_Zero) < Flags<BasicEnum>(kBasicEnum_One), "BasicEnum Zero is not less than One"); static_assert(!(Flags<BasicEnum>(kBasicEnum_Zero) < Flags<BasicEnum>(kBasicEnum_Zero)), "BasicEnum Zero is less than Zero"); static_assert(Flags<BasicEnum>(kBasicEnum_Zero) <= Flags<BasicEnum>(kBasicEnum_One), "BasicEnum Zero is not less or equal to One"); static_assert(Flags<BasicEnum>(kBasicEnum_Zero) <= Flags<BasicEnum>(kBasicEnum_Zero), "BasicEnum Zero is not less or equal to Zero"); static_assert(!(Flags<BasicEnum>(kBasicEnum_One) <= Flags<BasicEnum>(kBasicEnum_Zero)), "BasicEnum One is less or equal to Zero"); static_assert(Flags<BasicEnum>(kBasicEnum_One) > Flags<BasicEnum>(kBasicEnum_Zero), "BasicEnum One is not greater than Zero"); static_assert(!(Flags<BasicEnum>(kBasicEnum_One) > Flags<BasicEnum>(kBasicEnum_One)), "BasicEnum One is greater than One"); static_assert(Flags<BasicEnum>(kBasicEnum_One) >= Flags<BasicEnum>(kBasicEnum_Zero), "BasicEnum One is not greater or equal to Zero"); static_assert(Flags<BasicEnum>(kBasicEnum_One) >= Flags<BasicEnum>(kBasicEnum_One), "BasicEnum One is not greater or equal to One"); static_assert(!(Flags<BasicEnum>(kBasicEnum_Zero) >= Flags<BasicEnum>(kBasicEnum_One)), "BasicEnum Zero is greater or equal to One"); static_assert(Flags<BasicEnum>(kBasicEnum_Zero) + kBasicEnum_One == Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}), "BasicEnum Zero + One is not equal to {Zero, One}"); static_assert(kBasicEnum_Zero + Flags<BasicEnum>(kBasicEnum_One) == Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}), "BasicEnum Zero + One is not equal to {Zero, One}"); static_assert( Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}) + Flags<BasicEnum>({kBasicEnum_One, kBasicEnum_Two}) == Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One, kBasicEnum_Two}), "BasicEnum {Zero,One} + {One,Two} is not equal to {Zero,One,Two}"); static_assert(Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}) - Flags<BasicEnum>(kBasicEnum_Zero) == Flags<BasicEnum>(kBasicEnum_One), "BasicEnum {Zero,One} - Zero is not equal to One"); static_assert(Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}) - kBasicEnum_Zero == Flags<BasicEnum>(kBasicEnum_One), "BasicEnum {Zero,One} - Zero is not equal to One"); static_assert(Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}) - Flags<BasicEnum>(kBasicEnum_Two) == Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}), "BasicEnum {Zero,One} - Two is not equal to {Zero,One}"); static_assert(Union(kBasicEnum_Zero, Flags<BasicEnum>(kBasicEnum_One)) == Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}), "BasicEnum Zero union One is not equal to {Zero, One}"); static_assert( Union(Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}), Flags<BasicEnum>({kBasicEnum_One, kBasicEnum_Two})) == Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One, kBasicEnum_Two}), "BasicEnum {Zero,One} union {One,Two} is not equal to {Zero,One,Two}"); static_assert(Intersect(Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}), Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_Two})) == Flags<BasicEnum>(kBasicEnum_Zero), "BasicEnum {Zero,One} intersect {Zero,Two} is not equal to Zero"); static_assert(Intersect(Flags<BasicEnum>({kBasicEnum_Zero, kBasicEnum_One}), Flags<BasicEnum>(kBasicEnum_Two)) .IsEmpty(), "BasicEnum {Zero,One} intersect Two is not empty"); static_assert(Flags<BasicEnum>({kBasicEnum_One, kBasicEnum_Two}) == Flags<BasicEnum>(kBasicEnum_One, kBasicEnum_Two), "BasicEnum {One,Two} is not equal to (One,Two)"); static_assert( Flags<BasicEnum>({kBasicEnum_One, kBasicEnum_Two, kBasicEnum_Three}) == Flags<BasicEnum>(kBasicEnum_One, kBasicEnum_Two, kBasicEnum_Three), "BasicEnum {One,Two,Three} is not equal to (One,Two,Three)"); static_assert( Flags<BasicEnum>({Flags<BasicEnum>{kBasicEnum_Zero, kBasicEnum_One}, kBasicEnum_Two, Flags<BasicEnum>{kBasicEnum_Three, kBasicEnum_Big}}) == Flags<BasicEnum>(Flags<BasicEnum>{kBasicEnum_Zero, kBasicEnum_One}, kBasicEnum_Two, Flags<BasicEnum>{kBasicEnum_Three, kBasicEnum_Big}), "BasicEnum {{Zero,One},Two,{Three,Big}} is not equal to " "({Zero,One},Two,{Three,Big})"); static_assert(Flags<SizedEnum>().IsEmpty(), "SizedEnum default flags not empty"); static_assert(Flags<SizedEnum>().GetMask() == 0, "SizedEnum default mask is not zero"); static_assert(Flags<SizedEnum>(1).GetMask() == 1, "SizedEnum 1 is not 1"); static_assert(Flags<SizedEnum>(kSizedEnum_Zero).GetMask() == 1, "SizedEnum Zero is not 1"); static_assert(Flags<SizedEnum>(kSizedEnum_Big).GetMask() == 1ULL << 63, "SizedEnum Big is not 1 << 63"); static_assert(Flags<SizedEnum>(kSizedEnum_Zero).IsSet(kSizedEnum_Zero), "SizedEnum Zero does not have Zero set"); static_assert(!Flags<SizedEnum>(kSizedEnum_Zero).IsSet(kSizedEnum_One), "SizedEnum Zero has One set"); static_assert(Flags<SizedEnum>({}).GetMask() == 0, "SizedEnum {} is not 0"); static_assert(Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}).GetMask() == 3, "SizedEnum {Zero,One} is not 3"); static_assert( Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}).IsSet(kSizedEnum_One), "SizedEnum {Zero,One} does not have One set"); static_assert(Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One, kSizedEnum_Two}) .IsSet({kSizedEnum_Zero, kSizedEnum_Two}), "SizedEnum {Zero,One,Two} does not have {Zero,Two} set"); static_assert(!Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}) .IsSet({kSizedEnum_Zero, kSizedEnum_Two}), "SizedEnum {Zero,One} does have {Zero,Two} set"); static_assert(Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}) .Intersects({kSizedEnum_Zero, kSizedEnum_Two}), "SizedEnum {Zero,One} does not intersect {Zero,Two}"); static_assert(!Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}) .Intersects({kSizedEnum_Two, kSizedEnum_Three}), "SizedEnum {Zero,One} intersects {Two, Three}"); static_assert(Flags<SizedEnum>(kSizedEnum_Zero) == Flags<SizedEnum>(kSizedEnum_Zero), "SizedEnum Zero is not equal to Zero"); static_assert(!(Flags<SizedEnum>(kSizedEnum_Zero) == Flags<SizedEnum>(kSizedEnum_One)), "SizedEnum Zero is equal to One"); static_assert(Flags<SizedEnum>(kSizedEnum_Zero) != Flags<SizedEnum>(kSizedEnum_One), "SizedEnum Zero is equal to One"); static_assert(!(Flags<SizedEnum>(kSizedEnum_Zero) != Flags<SizedEnum>(kSizedEnum_Zero)), "SizedEnum Zero is not equal to Zero"); static_assert(Flags<SizedEnum>(kSizedEnum_Zero) < Flags<SizedEnum>(kSizedEnum_One), "SizedEnum Zero is not less than One"); static_assert(!(Flags<SizedEnum>(kSizedEnum_Zero) < Flags<SizedEnum>(kSizedEnum_Zero)), "SizedEnum Zero is less than Zero"); static_assert(Flags<SizedEnum>(kSizedEnum_Zero) <= Flags<SizedEnum>(kSizedEnum_One), "SizedEnum Zero is not less or equal to One"); static_assert(Flags<SizedEnum>(kSizedEnum_Zero) <= Flags<SizedEnum>(kSizedEnum_Zero), "SizedEnum Zero is not less or equal to Zero"); static_assert(!(Flags<SizedEnum>(kSizedEnum_One) <= Flags<SizedEnum>(kSizedEnum_Zero)), "SizedEnum One is less or equal to Zero"); static_assert(Flags<SizedEnum>(kSizedEnum_One) > Flags<SizedEnum>(kSizedEnum_Zero), "SizedEnum One is not greater than Zero"); static_assert(!(Flags<SizedEnum>(kSizedEnum_One) > Flags<SizedEnum>(kSizedEnum_One)), "SizedEnum One is greater than One"); static_assert(Flags<SizedEnum>(kSizedEnum_One) >= Flags<SizedEnum>(kSizedEnum_Zero), "SizedEnum One is not greater or equal to Zero"); static_assert(Flags<SizedEnum>(kSizedEnum_One) >= Flags<SizedEnum>(kSizedEnum_One), "SizedEnum One is not greater or equal to One"); static_assert(!(Flags<SizedEnum>(kSizedEnum_Zero) >= Flags<SizedEnum>(kSizedEnum_One)), "SizedEnum Zero is greater or equal to One"); static_assert(Flags<SizedEnum>(kSizedEnum_Zero) + kSizedEnum_One == Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}), "SizedEnum Zero + One is not equal to {Zero, One}"); static_assert(kSizedEnum_Zero + Flags<SizedEnum>(kSizedEnum_One) == Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}), "SizedEnum Zero + One is not equal to {Zero, One}"); static_assert( Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}) + Flags<SizedEnum>({kSizedEnum_One, kSizedEnum_Two}) == Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One, kSizedEnum_Two}), "SizedEnum {Zero,One} + {One,Two} is not equal to {Zero,One,Two}"); static_assert(Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}) - Flags<SizedEnum>(kSizedEnum_Zero) == Flags<SizedEnum>(kSizedEnum_One), "SizedEnum {Zero,One} - Zero is not equal to One"); static_assert(Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}) - kSizedEnum_Zero == Flags<SizedEnum>(kSizedEnum_One), "SizedEnum {Zero,One} - Zero is not equal to One"); static_assert(Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}) - Flags<SizedEnum>(kSizedEnum_Two) == Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}), "SizedEnum {Zero,One} - Two is not equal to {Zero,One}"); static_assert(Union(kSizedEnum_Zero, Flags<SizedEnum>(kSizedEnum_One)) == Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}), "SizedEnum Zero union One is not equal to {Zero, One}"); static_assert( Union(Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}), Flags<SizedEnum>({kSizedEnum_One, kSizedEnum_Two})) == Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One, kSizedEnum_Two}), "SizedEnum {Zero,One} union {One,Two} is not equal to {Zero,One,Two}"); static_assert(Intersect(Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}), Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_Two})) == Flags<SizedEnum>(kSizedEnum_Zero), "SizedEnum {Zero,One} intersect {Zero,Two} is not equal to Zero"); static_assert(Intersect(Flags<SizedEnum>({kSizedEnum_Zero, kSizedEnum_One}), Flags<SizedEnum>(kSizedEnum_Two)) .IsEmpty(), "SizedEnum {Zero,One} intersect Two is not empty"); static_assert(Flags<SizedEnum>({kSizedEnum_One, kSizedEnum_Two}) == Flags<SizedEnum>(kSizedEnum_One, kSizedEnum_Two), "SizedEnum {One,Two} is not equal to (One,Two)"); static_assert( Flags<SizedEnum>({kSizedEnum_One, kSizedEnum_Two, kSizedEnum_Three}) == Flags<SizedEnum>(kSizedEnum_One, kSizedEnum_Two, kSizedEnum_Three), "SizedEnum {One,Two,Three} is not equal to (One,Two,Three)"); static_assert( Flags<SizedEnum>({Flags<SizedEnum>{kSizedEnum_Zero, kSizedEnum_One}, kSizedEnum_Two, Flags<SizedEnum>{kSizedEnum_Three, kSizedEnum_Big}}) == Flags<SizedEnum>(Flags<SizedEnum>{kSizedEnum_Zero, kSizedEnum_One}, kSizedEnum_Two, Flags<SizedEnum>{kSizedEnum_Three, kSizedEnum_Big}), "SizedEnum {{Zero,One},Two,{Three,Big}} is not equal to " "({Zero,One},Two,{Three,Big})"); static_assert(Flags<ClassEnum>().IsEmpty(), "ClassEnum default flags not empty"); static_assert(Flags<ClassEnum>().GetMask() == 0, "ClassEnum default mask is not zero"); static_assert(Flags<ClassEnum>(1).GetMask() == 1, "ClassEnum 1 is not 1"); static_assert(Flags<ClassEnum>(ClassEnum::kZero).GetMask() == 1, "ClassEnum Zero is not 1"); static_assert(Flags<ClassEnum>(ClassEnum::kBig).GetMask() == 1ULL << 63, "ClassEnum Big is not 1 << 63"); static_assert(Flags<ClassEnum>(ClassEnum::kZero).IsSet(ClassEnum::kZero), "ClassEnum Zero does not have Zero set"); static_assert(!Flags<ClassEnum>(ClassEnum::kZero).IsSet(ClassEnum::kOne), "ClassEnum Zero has One set"); static_assert(Flags<ClassEnum>({}).GetMask() == 0, "ClassEnum {} is not 0"); static_assert(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}).GetMask() == 3, "ClassEnum {Zero,One} is not 3"); static_assert(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}) .IsSet(ClassEnum::kOne), "ClassEnum {Zero,One} does not have One set"); static_assert(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne, ClassEnum::kTwo}) .IsSet({ClassEnum::kZero, ClassEnum::kTwo}), "ClassEnum {Zero,One,Two} does not have {Zero,Two} set"); static_assert(!Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}) .IsSet({ClassEnum::kZero, ClassEnum::kTwo}), "ClassEnum {Zero,One} does have {Zero,Two} set"); static_assert(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}) .Intersects({ClassEnum::kZero, ClassEnum::kTwo}), "ClassEnum {Zero,One} does not intersect {Zero,Two}"); static_assert(!Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}) .Intersects({ClassEnum::kTwo, ClassEnum::kThree}), "ClassEnum {Zero,One} intersects {Two, Three}"); static_assert(Flags<ClassEnum>(ClassEnum::kZero) == Flags<ClassEnum>(ClassEnum::kZero), "ClassEnum Zero is not equal to Zero"); static_assert(!(Flags<ClassEnum>(ClassEnum::kZero) == Flags<ClassEnum>(ClassEnum::kOne)), "ClassEnum Zero is equal to One"); static_assert(Flags<ClassEnum>(ClassEnum::kZero) == ClassEnum::kZero, "ClassEnum Zero is not equal to Zero"); static_assert(Flags<ClassEnum>(ClassEnum::kZero) != Flags<ClassEnum>(ClassEnum::kOne), "ClassEnum Zero is equal to One"); static_assert(!(Flags<ClassEnum>(ClassEnum::kZero) != Flags<ClassEnum>(ClassEnum::kZero)), "ClassEnum Zero is not equal to Zero"); static_assert(Flags<ClassEnum>(ClassEnum::kZero) != ClassEnum::kOne, "ClassEnum Zero is equal to One"); static_assert(Flags<ClassEnum>(ClassEnum::kZero) < Flags<ClassEnum>(ClassEnum::kOne), "ClassEnum Zero is not less than One"); static_assert(!(Flags<ClassEnum>(ClassEnum::kZero) < Flags<ClassEnum>(ClassEnum::kZero)), "ClassEnum Zero is less than Zero"); static_assert(Flags<ClassEnum>(ClassEnum::kZero) <= Flags<ClassEnum>(ClassEnum::kOne), "ClassEnum Zero is not less or equal to One"); static_assert(Flags<ClassEnum>(ClassEnum::kZero) <= Flags<ClassEnum>(ClassEnum::kZero), "ClassEnum Zero is not less or equal to Zero"); static_assert(!(Flags<ClassEnum>(ClassEnum::kOne) <= Flags<ClassEnum>(ClassEnum::kZero)), "ClassEnum One is less or equal to Zero"); static_assert(Flags<ClassEnum>(ClassEnum::kOne) > Flags<ClassEnum>(ClassEnum::kZero), "ClassEnum One is not greater than Zero"); static_assert(!(Flags<ClassEnum>(ClassEnum::kOne) > Flags<ClassEnum>(ClassEnum::kOne)), "ClassEnum One is greater than One"); static_assert(Flags<ClassEnum>(ClassEnum::kOne) >= Flags<ClassEnum>(ClassEnum::kZero), "ClassEnum One is not greater or equal to Zero"); static_assert(Flags<ClassEnum>(ClassEnum::kOne) >= Flags<ClassEnum>(ClassEnum::kOne), "ClassEnum One is not greater or equal to One"); static_assert(!(Flags<ClassEnum>(ClassEnum::kZero) >= Flags<ClassEnum>(ClassEnum::kOne)), "ClassEnum Zero is greater or equal to One"); static_assert(Flags<ClassEnum>(ClassEnum::kZero) + ClassEnum::kOne == Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}), "ClassEnum Zero + One is not equal to {Zero, One}"); static_assert(ClassEnum::kZero + Flags<ClassEnum>(ClassEnum::kOne) == Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}), "ClassEnum Zero + One is not equal to {Zero, One}"); static_assert( Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}) + Flags<ClassEnum>({ClassEnum::kOne, ClassEnum::kTwo}) == Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne, ClassEnum::kTwo}), "ClassEnum {Zero,One} + {One,Two} is not equal to {Zero,One,Two}"); static_assert(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}) - Flags<ClassEnum>(ClassEnum::kZero) == Flags<ClassEnum>(ClassEnum::kOne), "ClassEnum {Zero,One} - Zero is not equal to One"); static_assert(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}) - ClassEnum::kZero == Flags<ClassEnum>(ClassEnum::kOne), "ClassEnum {Zero,One} - Zero is not equal to One"); static_assert(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}) - Flags<ClassEnum>(ClassEnum::kTwo) == Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}), "ClassEnum {Zero,One} - Two is not equal to {Zero,One}"); static_assert(Union(Flags<ClassEnum>(ClassEnum::kZero), Flags<ClassEnum>(ClassEnum::kOne)) == Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}), "ClassEnum Zero union One is not equal to {Zero, One}"); static_assert( Union(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}), Flags<ClassEnum>({ClassEnum::kOne, ClassEnum::kTwo})) == Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne, ClassEnum::kTwo}), "ClassEnum {Zero,One} union {One,Two} is not equal to {Zero,One,Two}"); static_assert(Intersect(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}), Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kTwo})) == Flags<ClassEnum>(ClassEnum::kZero), "ClassEnum {Zero,One} intersect {Zero,Two} is not equal to Zero"); static_assert(Intersect(Flags<ClassEnum>({ClassEnum::kZero, ClassEnum::kOne}), Flags<ClassEnum>(ClassEnum::kTwo)) .IsEmpty(), "ClassEnum {Zero,One} intersect Two is not empty"); static_assert(Flags<ClassEnum>({ClassEnum::kOne, ClassEnum::kTwo}) == Flags<ClassEnum>(ClassEnum::kOne, ClassEnum::kTwo), "ClassEnum {One,Two} is not equal to (One,Two)"); static_assert( Flags<ClassEnum>({ClassEnum::kOne, ClassEnum::kTwo, ClassEnum::kThree}) == Flags<ClassEnum>(ClassEnum::kOne, ClassEnum::kTwo, ClassEnum::kThree), "ClassEnum {One,Two,Three} is not equal to (One,Two,Three)"); static_assert( Flags<ClassEnum>({Flags<ClassEnum>{ClassEnum::kZero, ClassEnum::kOne}, ClassEnum::kTwo, Flags<ClassEnum>{ClassEnum::kThree, ClassEnum::kBig}}) == Flags<ClassEnum>(Flags<ClassEnum>{ClassEnum::kZero, ClassEnum::kOne}, ClassEnum::kTwo, Flags<ClassEnum>{ClassEnum::kThree, ClassEnum::kBig}), "ClassEnum {{Zero,One},Two,{Three,Big}} is not equal to " "({Zero,One},Two,{Three,Big})"); TEST(FlagsTest, BasicImplicitParameterConversions) { EXPECT_EQ(BasicIdentity({}), Flags<BasicEnum>({})); EXPECT_EQ(BasicIdentity(kBasicEnum_One), Flags<BasicEnum>(kBasicEnum_One)); EXPECT_EQ(BasicIdentity({kBasicEnum_One, kBasicEnum_Two}), Flags<BasicEnum>(kBasicEnum_One, kBasicEnum_Two)); } TEST(FlagsTest, BasicSet) { Flags<BasicEnum> flags; flags.Set(kBasicEnum_Zero); EXPECT_EQ(flags, Flags<BasicEnum>(kBasicEnum_Zero)); flags.Set({kBasicEnum_One, kBasicEnum_Two}); EXPECT_EQ(flags, Flags<BasicEnum>(kBasicEnum_Zero, kBasicEnum_One, kBasicEnum_Two)); flags.Set({kBasicEnum_One, kBasicEnum_Three}); EXPECT_EQ(flags, Flags<BasicEnum>(kBasicEnum_Zero, kBasicEnum_One, kBasicEnum_Two, kBasicEnum_Three)); } TEST(FlagsTest, BasicClear) { Flags<BasicEnum> flags(kBasicEnum_Zero); flags.Clear(); EXPECT_TRUE(flags.IsEmpty()); flags.Set({kBasicEnum_One, kBasicEnum_Two}); flags.Clear(kBasicEnum_One); EXPECT_EQ(flags, kBasicEnum_Two); } TEST(FlagsTest, BasicAssign) { Flags<BasicEnum> flags; flags = kBasicEnum_Zero; EXPECT_EQ(flags, Flags<BasicEnum>(kBasicEnum_Zero)); flags = {kBasicEnum_One, kBasicEnum_Two}; EXPECT_EQ(flags, Flags<BasicEnum>(kBasicEnum_One, kBasicEnum_Two)); } TEST(FlagsTest, BasicAddAssign) { Flags<BasicEnum> flags; flags += kBasicEnum_Zero; EXPECT_EQ(flags, Flags<BasicEnum>(kBasicEnum_Zero)); flags += {kBasicEnum_One, kBasicEnum_Two}; EXPECT_EQ(flags, Flags<BasicEnum>(kBasicEnum_Zero, kBasicEnum_One, kBasicEnum_Two)); flags += {kBasicEnum_One, kBasicEnum_Three}; EXPECT_EQ(flags, Flags<BasicEnum>(kBasicEnum_Zero, kBasicEnum_One, kBasicEnum_Two, kBasicEnum_Three)); flags += {}; EXPECT_EQ(flags, Flags<BasicEnum>(kBasicEnum_Zero, kBasicEnum_One, kBasicEnum_Two, kBasicEnum_Three)); } TEST(FlagsTest, BasicSubAssign) { Flags<BasicEnum> flags(kBasicEnum_Zero); flags -= kBasicEnum_Zero; EXPECT_TRUE(flags.IsEmpty()); flags.Set({kBasicEnum_One, kBasicEnum_Two, kBasicEnum_Three}); flags -= {kBasicEnum_One, kBasicEnum_Three}; EXPECT_EQ(flags, kBasicEnum_Two); flags -= {}; EXPECT_EQ(flags, kBasicEnum_Two); } TEST(FlagsTest, SizedImplicitParameterConversions) { EXPECT_EQ(SizedIdentity({}), Flags<SizedEnum>({})); EXPECT_EQ(SizedIdentity(kSizedEnum_One), Flags<SizedEnum>(kSizedEnum_One)); EXPECT_EQ(SizedIdentity({kSizedEnum_One, kSizedEnum_Two}), Flags<SizedEnum>(kSizedEnum_One, kSizedEnum_Two)); } TEST(FlagsTest, SizedSet) { Flags<SizedEnum> flags; flags.Set(kSizedEnum_Zero); EXPECT_EQ(flags, Flags<SizedEnum>(kSizedEnum_Zero)); flags.Set({kSizedEnum_One, kSizedEnum_Two}); EXPECT_EQ(flags, Flags<SizedEnum>(kSizedEnum_Zero, kSizedEnum_One, kSizedEnum_Two)); flags.Set({kSizedEnum_One, kSizedEnum_Three}); EXPECT_EQ(flags, Flags<SizedEnum>(kSizedEnum_Zero, kSizedEnum_One, kSizedEnum_Two, kSizedEnum_Three)); } TEST(FlagsTest, SizedClear) { Flags<SizedEnum> flags(kSizedEnum_Zero); flags.Clear(); EXPECT_TRUE(flags.IsEmpty()); flags.Set({kSizedEnum_One, kSizedEnum_Two}); flags.Clear(kSizedEnum_One); EXPECT_EQ(flags, kSizedEnum_Two); } TEST(FlagsTest, SizedAssign) { Flags<SizedEnum> flags; flags = kSizedEnum_Zero; EXPECT_EQ(flags, Flags<SizedEnum>(kSizedEnum_Zero)); flags = {kSizedEnum_One, kSizedEnum_Two}; EXPECT_EQ(flags, Flags<SizedEnum>(kSizedEnum_One, kSizedEnum_Two)); } TEST(FlagsTest, SizedAddAssign) { Flags<SizedEnum> flags; flags += kSizedEnum_Zero; EXPECT_EQ(flags, Flags<SizedEnum>(kSizedEnum_Zero)); flags += {kSizedEnum_One, kSizedEnum_Two}; EXPECT_EQ(flags, Flags<SizedEnum>(kSizedEnum_Zero, kSizedEnum_One, kSizedEnum_Two)); flags += {kSizedEnum_One, kSizedEnum_Three}; EXPECT_EQ(flags, Flags<SizedEnum>(kSizedEnum_Zero, kSizedEnum_One, kSizedEnum_Two, kSizedEnum_Three)); flags += {}; EXPECT_EQ(flags, Flags<SizedEnum>(kSizedEnum_Zero, kSizedEnum_One, kSizedEnum_Two, kSizedEnum_Three)); } TEST(FlagsTest, SizedSubAssign) { Flags<SizedEnum> flags(kSizedEnum_Zero); flags -= kSizedEnum_Zero; EXPECT_TRUE(flags.IsEmpty()); flags.Set({kSizedEnum_One, kSizedEnum_Two, kSizedEnum_Three}); flags -= {kSizedEnum_One, kSizedEnum_Three}; EXPECT_EQ(flags, kSizedEnum_Two); flags -= {}; EXPECT_EQ(flags, kSizedEnum_Two); } TEST(FlagsTest, ClassImplicitParameterConversions) { EXPECT_EQ(ClassIdentity({}), Flags<ClassEnum>({})); EXPECT_EQ(ClassIdentity(ClassEnum::kOne), Flags<ClassEnum>(ClassEnum::kOne)); EXPECT_EQ(ClassIdentity({ClassEnum::kOne, ClassEnum::kTwo}), Flags<ClassEnum>(ClassEnum::kOne, ClassEnum::kTwo)); } TEST(FlagsTest, ClassSet) { Flags<ClassEnum> flags; flags.Set(ClassEnum::kZero); EXPECT_EQ(flags, Flags<ClassEnum>(ClassEnum::kZero)); flags.Set({ClassEnum::kOne, ClassEnum::kTwo}); EXPECT_EQ(flags, Flags<ClassEnum>(ClassEnum::kZero, ClassEnum::kOne, ClassEnum::kTwo)); flags.Set({ClassEnum::kOne, ClassEnum::kThree}); EXPECT_EQ(flags, Flags<ClassEnum>(ClassEnum::kZero, ClassEnum::kOne, ClassEnum::kTwo, ClassEnum::kThree)); } TEST(FlagsTest, ClassClear) { Flags<ClassEnum> flags(ClassEnum::kZero); flags.Clear(); EXPECT_TRUE(flags.IsEmpty()); flags.Set({ClassEnum::kOne, ClassEnum::kTwo}); flags.Clear(ClassEnum::kOne); EXPECT_EQ(flags, ClassEnum::kTwo); } TEST(FlagsTest, ClassAssign) { Flags<ClassEnum> flags; flags = ClassEnum::kZero; EXPECT_EQ(flags, Flags<ClassEnum>(ClassEnum::kZero)); flags = {ClassEnum::kOne, ClassEnum::kTwo}; EXPECT_EQ(flags, Flags<ClassEnum>(ClassEnum::kOne, ClassEnum::kTwo)); } TEST(FlagsTest, ClassAddAssign) { Flags<ClassEnum> flags; flags += ClassEnum::kZero; EXPECT_EQ(flags, Flags<ClassEnum>(ClassEnum::kZero)); flags += {ClassEnum::kOne, ClassEnum::kTwo}; EXPECT_EQ(flags, Flags<ClassEnum>(ClassEnum::kZero, ClassEnum::kOne, ClassEnum::kTwo)); flags += {ClassEnum::kOne, ClassEnum::kThree}; EXPECT_EQ(flags, Flags<ClassEnum>(ClassEnum::kZero, ClassEnum::kOne, ClassEnum::kTwo, ClassEnum::kThree)); flags += {}; EXPECT_EQ(flags, Flags<ClassEnum>(ClassEnum::kZero, ClassEnum::kOne, ClassEnum::kTwo, ClassEnum::kThree)); } TEST(FlagsTest, ClassSubAssign) { Flags<ClassEnum> flags(ClassEnum::kZero); flags -= ClassEnum::kZero; EXPECT_TRUE(flags.IsEmpty()); flags.Set({ClassEnum::kOne, ClassEnum::kTwo, ClassEnum::kThree}); flags -= {ClassEnum::kOne, ClassEnum::kThree}; EXPECT_EQ(flags, ClassEnum::kTwo); flags -= {}; EXPECT_EQ(flags, ClassEnum::kTwo); } } // namespace } // namespace gb
31,089
10,202
// Copyright (c) 2022 Feng Yang // // I am making my contributions/submissions to this project solely in my // personal capacity and am not conveying any rights to any intellectual // property of any third parties. #include "profiler_spy.h" namespace vox { ProfilerSpy::ProfilerSpy(const std::string &p_name) : name(p_name), start(std::chrono::steady_clock::now()) { } ProfilerSpy::~ProfilerSpy() { end = std::chrono::steady_clock::now(); Profiler::save(*this); } }
482
179