diff
stringlengths
41
2.03M
msg
stringlengths
1
1.5k
repo
stringlengths
5
40
sha
stringlengths
40
40
time
stringlengths
20
20
mmm a / tensorflow / compiler / xla / service / cpu_transfer_manager . cc <nl> ppp b / tensorflow / compiler / xla / service / cpu_transfer_manager . cc <nl> Status CpuTransferManager : : TransferLiteralToInfeed ( se : : StreamExecutor * executor , <nl> ShapeUtil : : HumanString ( literal . shape ( ) ) . c_str ( ) ) ; <nl> } <nl> <nl> - cpu : : runtime : : InfeedManager * infeed_manager = <nl> - cpu : : runtime : : GetInfeedManager ( ) ; <nl> - <nl> int64 size = GetByteSizeRequirement ( shape ) ; <nl> if ( size > std : : numeric_limits < int32 > : : max ( ) ) { <nl> return Unimplemented ( " Infeed shape is too large : % s needs % lld bytes " , <nl> ShapeUtil : : HumanString ( literal . shape ( ) ) . c_str ( ) , size ) ; <nl> } <nl> + <nl> + return TransferBufferToInfeed ( executor , size , <nl> + LiteralUtil : : InternalData ( literal ) ) ; <nl> + } <nl> + <nl> + Status CpuTransferManager : : TransferBufferToInfeed ( se : : StreamExecutor * executor , <nl> + int64 size , <nl> + const void * source ) { <nl> int32 size_32 = static_cast < int32 > ( size ) ; <nl> CpuInfeedBuffer * queued_buffer = new CpuInfeedBuffer ( size_32 ) ; <nl> - TF_RETURN_IF_ERROR ( TransferBufferToDevice ( <nl> - executor , / * size = * / size , / * source = * / LiteralUtil : : InternalData ( literal ) , <nl> - queued_buffer - > device_memory ( ) ) ) ; <nl> + TF_RETURN_IF_ERROR ( TransferBufferToDevice ( executor , / * size = * / size , <nl> + / * source = * / source , <nl> + queued_buffer - > device_memory ( ) ) ) ; <nl> <nl> + cpu : : runtime : : InfeedManager * infeed_manager = <nl> + cpu : : runtime : : GetInfeedManager ( ) ; <nl> infeed_manager - > EnqueueBuffer ( queued_buffer ) ; <nl> <nl> return Status : : OK ( ) ; <nl> mmm a / tensorflow / compiler / xla / service / cpu_transfer_manager . h <nl> ppp b / tensorflow / compiler / xla / service / cpu_transfer_manager . h <nl> class CpuTransferManager : public GenericTransferManager { <nl> <nl> Status TransferLiteralToInfeed ( perftools : : gputools : : StreamExecutor * executor , <nl> const Literal & literal ) override ; <nl> + Status TransferBufferToInfeed ( perftools : : gputools : : StreamExecutor * executor , <nl> + int64 size , const void * source ) override ; <nl> <nl> private : <nl> TF_DISALLOW_COPY_AND_ASSIGN ( CpuTransferManager ) ; <nl> mmm a / tensorflow / compiler / xla / service / generic_transfer_manager . cc <nl> ppp b / tensorflow / compiler / xla / service / generic_transfer_manager . cc <nl> Status GenericTransferManager : : TransferLiteralToDevice ( <nl> <nl> Status GenericTransferManager : : TransferLiteralToInfeed ( <nl> se : : StreamExecutor * executor , const Literal & literal ) { <nl> - return Unimplemented ( " Infeed is not supported on GPU ( b / 30467474 ) " ) ; <nl> + return Unimplemented ( " Generic transfer to Infeed " ) ; <nl> + } <nl> + <nl> + Status GenericTransferManager : : TransferBufferToInfeed ( <nl> + perftools : : gputools : : StreamExecutor * executor , int64 size , <nl> + const void * source ) { <nl> + return Unimplemented ( " Generic transfer to Infeed " ) ; <nl> } <nl> <nl> Status GenericTransferManager : : TransferLiteralFromOutfeed ( <nl> mmm a / tensorflow / compiler / xla / service / generic_transfer_manager . h <nl> ppp b / tensorflow / compiler / xla / service / generic_transfer_manager . h <nl> class GenericTransferManager : public TransferManager { <nl> <nl> Status TransferLiteralToInfeed ( perftools : : gputools : : StreamExecutor * executor , <nl> const Literal & literal ) override ; <nl> + Status TransferBufferToInfeed ( perftools : : gputools : : StreamExecutor * executor , <nl> + int64 size , const void * source ) override ; <nl> <nl> Status TransferLiteralFromOutfeed ( <nl> perftools : : gputools : : StreamExecutor * executor , const Shape & literal_shape , <nl> mmm a / tensorflow / compiler / xla / service / gpu_transfer_manager . cc <nl> ppp b / tensorflow / compiler / xla / service / gpu_transfer_manager . cc <nl> Status GpuTransferManager : : TransferLiteralToInfeed ( se : : StreamExecutor * executor , <nl> return Status : : OK ( ) ; <nl> } <nl> <nl> + Status GpuTransferManager : : TransferBufferToInfeed ( se : : StreamExecutor * executor , <nl> + int64 size , <nl> + const void * source ) { <nl> + return TransferBufferToInfeedInternal ( executor , size , source ) . status ( ) ; <nl> + } <nl> + <nl> StatusOr < gpu : : InfeedBuffer * > <nl> GpuTransferManager : : TransferLiteralToInfeedInternal ( <nl> se : : StreamExecutor * executor , const Literal & literal ) { <nl> GpuTransferManager : : TransferLiteralToInfeedInternal ( <nl> ShapeUtil : : HumanString ( literal . shape ( ) ) . c_str ( ) ) ; <nl> } <nl> <nl> + return TransferBufferToInfeedInternal ( executor , size , <nl> + LiteralUtil : : InternalData ( literal ) ) ; <nl> + } <nl> + <nl> + StatusOr < gpu : : InfeedBuffer * > GpuTransferManager : : TransferBufferToInfeedInternal ( <nl> + se : : StreamExecutor * executor , int64 size , const void * source ) { <nl> gpu : : InfeedManager * infeed_manager = gpu : : GetOrCreateInfeedManager ( ) ; <nl> se : : Stream * stream = infeed_manager - > GetStream ( executor ) ; <nl> if ( stream = = nullptr ) { <nl> GpuTransferManager : : TransferLiteralToInfeedInternal ( <nl> } <nl> <nl> gpu : : InfeedBuffer * buffer = new gpu : : InfeedBuffer ( executor , size ) ; <nl> - stream - > ThenMemcpy ( buffer - > device_memory ( ) , <nl> - LiteralUtil : : InternalData ( literal ) , size ) ; <nl> + stream - > ThenMemcpy ( buffer - > device_memory ( ) , source , size ) ; <nl> <nl> VLOG ( 2 ) < < " Queued infeed data on stream " < < stream ; <nl> <nl> mmm a / tensorflow / compiler / xla / service / gpu_transfer_manager . h <nl> ppp b / tensorflow / compiler / xla / service / gpu_transfer_manager . h <nl> class GpuTransferManager : public GenericTransferManager { <nl> <nl> Status TransferLiteralToInfeed ( perftools : : gputools : : StreamExecutor * executor , <nl> const Literal & literal ) override ; <nl> + Status TransferBufferToInfeed ( perftools : : gputools : : StreamExecutor * executor , <nl> + int64 size , const void * source ) override ; <nl> <nl> private : <nl> / / Internal helper function for TransferLiteralToInfeed ( ) . Input <nl> class GpuTransferManager : public GenericTransferManager { <nl> StatusOr < gpu : : InfeedBuffer * > TransferLiteralToInfeedInternal ( <nl> perftools : : gputools : : StreamExecutor * executor , const Literal & literal ) ; <nl> <nl> + / / Internal helper function for TransferLiteralToInfeed ( ) . <nl> + StatusOr < gpu : : InfeedBuffer * > TransferBufferToInfeedInternal ( <nl> + perftools : : gputools : : StreamExecutor * executor , int64 size , <nl> + const void * source ) ; <nl> + <nl> TF_DISALLOW_COPY_AND_ASSIGN ( GpuTransferManager ) ; <nl> } ; <nl> <nl> mmm a / tensorflow / compiler / xla / service / transfer_manager . h <nl> ppp b / tensorflow / compiler / xla / service / transfer_manager . h <nl> class TransferManager { <nl> perftools : : gputools : : StreamExecutor * executor , <nl> const Literal & literal ) = 0 ; <nl> <nl> + / / Transfer a memory block of the given size from ' source ' buffer to the <nl> + / / Infeed interface of the device using the given executor . <nl> + / / <nl> + / / size is the size to transfer from source in bytes . <nl> + / / <nl> + / / source is the source data that must be in the target - dependent layout that <nl> + / / the Infeed HLO used in the computation expects . <nl> + virtual Status TransferBufferToInfeed ( <nl> + perftools : : gputools : : StreamExecutor * executor , int64 size , <nl> + const void * source ) = 0 ; <nl> + <nl> / / Transfers the given literal from the Outfeed interface of the device , <nl> / / using the given executor . <nl> virtual Status TransferLiteralFromOutfeed ( <nl>
[ XLA ] Add transfer buffer to infeed .
tensorflow/tensorflow
b52debb4e63cce1e0733d6d34975d4efb9934680
2017-06-15T21:20:48Z
mmm a / scripts / filter_planning . sh <nl> ppp b / scripts / filter_planning . sh <nl> perception_topic = " topic = = ' / apollo / perception / obstacles ' \ <nl> perfect_control_topic = " $ perception_topic \ <nl> or $ routing_topic \ <nl> or topic = = ' / apollo / perception / obstacles ' \ <nl> + or topic = = ' / apollo / prediction ' \ <nl> or topic = = ' / apollo / perception / traffic_light ' " <nl> <nl> planning_deps = " $ perfect_control_topic \ <nl> for bag in $ @ ; do <nl> fi <nl> filter $ bag $ folder <nl> done <nl> - <nl>
script : add prediction topic into filter_planning . sh
ApolloAuto/apollo
e8cc84d98b81a8cc2158f36652cc67ce6aed4d38
2017-12-27T19:48:40Z
mmm a / include / LightGBM / bin . h <nl> ppp b / include / LightGBM / bin . h <nl> class BinMapper { <nl> / * ! <nl> * \ brief Get bin info <nl> * / <nl> - inline std : : string bin_info ( ) const { <nl> + inline std : : string bin_info_string ( ) const { <nl> if ( bin_type_ = = BinType : : CategoricalBin ) { <nl> return Common : : Join ( bin_2_categorical_ , " : " ) ; <nl> } else { <nl> mmm a / include / LightGBM / dataset . h <nl> ppp b / include / LightGBM / dataset . h <nl> class Dataset { <nl> <nl> inline std : : vector < std : : string > feature_infos ( ) const { <nl> std : : vector < std : : string > bufs ; <nl> - for ( int i = 0 ; i < num_total_features_ ; i + + ) { <nl> + for ( int i = 0 ; i < num_total_features_ ; + + i ) { <nl> int fidx = used_feature_map_ [ i ] ; <nl> - if ( fidx = = - 1 ) { <nl> + if ( fidx < 0 ) { <nl> bufs . push_back ( " none " ) ; <nl> } else { <nl> const auto bin_mapper = FeatureBinMapper ( fidx ) ; <nl> - bufs . push_back ( bin_mapper - > bin_info ( ) ) ; <nl> + bufs . push_back ( bin_mapper - > bin_info_string ( ) ) ; <nl> } <nl> } <nl> return bufs ; <nl> mmm a / include / LightGBM / utils / array_args . h <nl> ppp b / include / LightGBM / utils / array_args . h <nl> class ArrayArgs { <nl> } / / namespace LightGBM <nl> <nl> # endif / / LightGBM_UTILS_ARRAY_AGRS_H_ <nl> - <nl> mmm a / src / boosting / gbdt_model_text . cpp <nl> ppp b / src / boosting / gbdt_model_text . cpp <nl> <nl> # include < LightGBM / config . h > <nl> # include < LightGBM / metric . h > <nl> # include < LightGBM / objective_function . h > <nl> + # include < LightGBM / utils / array_args . h > <nl> # include < LightGBM / utils / common . h > <nl> <nl> # include < string > <nl> std : : string GBDT : : DumpModel ( int start_iteration , int num_iteration ) const { <nl> str_buf < < " \ " monotone_constraints \ " : [ " <nl> < < Common : : Join ( monotone_constraints_ , " , " ) < < " ] , " < < ' \ n ' ; <nl> <nl> + str_buf < < " \ " feature_infos \ " : " < < " { " ; <nl> + bool first_obj = true ; <nl> + for ( size_t i = 0 ; i < feature_infos_ . size ( ) ; + + i ) { <nl> + std : : stringstream json_str_buf ; <nl> + auto strs = Common : : Split ( feature_infos_ [ i ] . c_str ( ) , " : " ) ; <nl> + if ( strs [ 0 ] [ 0 ] = = ' [ ' ) { <nl> + strs [ 0 ] . erase ( 0 , 1 ) ; / / remove ' [ ' <nl> + strs [ 1 ] . erase ( strs [ 1 ] . size ( ) - 1 ) ; / / remove ' ] ' <nl> + json_str_buf < < " { \ " min_value \ " : " < < strs [ 0 ] < < " , " ; <nl> + json_str_buf < < " \ " max_value \ " : " < < strs [ 1 ] < < " , " ; <nl> + json_str_buf < < " \ " values \ " : [ ] } " ; <nl> + } else if ( strs [ 0 ] ! = " none " ) { / / categorical feature <nl> + auto vals = Common : : StringToArray < int > ( feature_infos_ [ i ] , ' : ' ) ; <nl> + auto max_idx = ArrayArgs < int > : : ArgMax ( vals ) ; <nl> + auto min_idx = ArrayArgs < int > : : ArgMin ( vals ) ; <nl> + json_str_buf < < " { \ " min_value \ " : " < < vals [ min_idx ] < < " , " ; <nl> + json_str_buf < < " \ " max_value \ " : " < < vals [ max_idx ] < < " , " ; <nl> + json_str_buf < < " \ " values \ " : [ " < < Common : : Join ( vals , " , " ) < < " ] } " ; <nl> + } else { / / unused feature <nl> + continue ; <nl> + } <nl> + if ( ! first_obj ) { <nl> + str_buf < < " , " ; <nl> + } <nl> + str_buf < < " \ " " < < feature_names_ [ i ] < < " \ " : " ; <nl> + str_buf < < json_str_buf . str ( ) ; <nl> + first_obj = false ; <nl> + } <nl> + str_buf < < " } , " < < ' \ n ' ; <nl> + <nl> str_buf < < " \ " tree_info \ " : [ " ; <nl> int num_used_model = static_cast < int > ( models_ . size ( ) ) ; <nl> int total_iteration = num_used_model / num_tree_per_iteration_ ; <nl> std : : string GBDT : : DumpModel ( int start_iteration , int num_iteration ) const { <nl> } <nl> } <nl> str_buf < < ' \ n ' < < " \ " feature_importances \ " : " < < " { " ; <nl> - if ( ! pairs . empty ( ) ) { <nl> - str_buf < < " \ " " < < pairs [ 0 ] . second < < " \ " : " < < std : : to_string ( pairs [ 0 ] . first ) ; <nl> - for ( size_t i = 1 ; i < pairs . size ( ) ; + + i ) { <nl> + for ( size_t i = 0 ; i < pairs . size ( ) ; + + i ) { <nl> + if ( i > 0 ) { <nl> str_buf < < " , " ; <nl> - str_buf < < " \ " " < < pairs [ i ] . second < < " \ " : " < < std : : to_string ( pairs [ i ] . first ) ; <nl> } <nl> + str_buf < < " \ " " < < pairs [ i ] . second < < " \ " : " < < std : : to_string ( pairs [ i ] . first ) ; <nl> } <nl> str_buf < < " } " < < ' \ n ' ; <nl> <nl>
added feature infos to JSON dump ( )
microsoft/LightGBM
c4a7ab81702d0119de55d74f9d848f62435f3a1d
2020-02-20T05:51:05Z
mmm a / tensorflow / tools / compatibility / ast_edits . py <nl> ppp b / tensorflow / tools / compatibility / ast_edits . py <nl> class APIChangeSpec ( object ) : <nl> * ` function_warnings ` : maps full names of functions to warnings that will be <nl> printed out if the function is used . ( e . g . tf . nn . convolution ( ) ) <nl> * ` function_transformers ` : maps function names to custom handlers <nl> - * ` leftover_warnings ` : These warnings are printed if a matching Attribute <nl> - still exists after all other transformations have run . <nl> + * ` module_deprecations ` : maps module names to warnings that will be printed <nl> + if the module is still used after all other transformations have run <nl> <nl> For an example , see ` TFAPIChangeSpec ` . <nl> " " " <nl> mmm a / tensorflow / tools / compatibility / tf_upgrade_v2 . py <nl> ppp b / tensorflow / tools / compatibility / tf_upgrade_v2 . py <nl> def __init__ ( self ) : <nl> " the required code . " <nl> ) <nl> <nl> + flags_warning = ( <nl> + ast_edits . ERROR , <nl> + " tf . flags has been removed , please use the argparse or absl " <nl> + " modules if you need command line parsing . " ) <nl> + <nl> decay_function_comment = ( <nl> ast_edits . INFO , <nl> " To use learning rate decay schedules with TensorFlow 2 . 0 , switch to " <nl> def __init__ ( self ) : <nl> assert_rank_comment , <nl> " tf . debugging . assert_rank_in " : <nl> assert_rank_comment , <nl> - " tf . flags " : ( <nl> - ast_edits . ERROR , <nl> - " tf . flags has been removed , please use the argparse or absl " <nl> - " modules if you need command line parsing . " ) , <nl> " tf . train . exponential_decay " : <nl> decay_function_comment , <nl> " tf . train . piecewise_constant_decay " : <nl> def __init__ ( self ) : <nl> <nl> self . module_deprecations = { <nl> " tf . contrib " : contrib_warning , <nl> + " tf . flags " : flags_warning , <nl> } <nl> <nl> <nl> mmm a / tensorflow / tools / compatibility / tf_upgrade_v2_test . py <nl> ppp b / tensorflow / tools / compatibility / tf_upgrade_v2_test . py <nl> def test_contrib_framework_argsort ( self ) : <nl> _ , _ , _ , new_text = self . _upgrade ( text ) <nl> self . assertEqual ( expected , new_text ) <nl> <nl> + def test_flags_bare ( self ) : <nl> + _ , _ , errors , _ = self . _upgrade ( " tf . flags " ) <nl> + self . assertIn ( " tf . flags has been removed " , errors [ 0 ] ) <nl> + <nl> + def test_flags_flags ( self ) : <nl> + _ , _ , errors , _ = self . _upgrade ( " tf . flags . FLAGS " ) <nl> + self . assertIn ( " tf . flags has been removed " , errors [ 0 ] ) <nl> + <nl> <nl> class TestUpgradeFiles ( test_util . TensorFlowTestCase ) : <nl> <nl>
Make converter handle tf . flags as a module deprecation warning
tensorflow/tensorflow
f291fa19e7d9a3e73c7895a073a6acbf2dd7e15d
2019-02-01T20:14:25Z
mmm a / modules / gdscript / gdscript_parser . cpp <nl> ppp b / modules / gdscript / gdscript_parser . cpp <nl> void GDScriptParser : : _parse_class ( ClassNode * p_class ) { <nl> member . line = tokenizer - > get_token_line ( ) ; <nl> member . usages = 0 ; <nl> member . rpc_mode = rpc_mode ; <nl> - # ifdef TOOLS_ENABLED <nl> - Variant : : CallError ce ; <nl> - member . default_value = Variant : : construct ( member . _export . type , NULL , 0 , ce ) ; <nl> - # endif <nl> <nl> if ( current_class - > constant_expressions . has ( member . identifier ) ) { <nl> _set_error ( " A constant named \ " " + String ( member . identifier ) + " \ " already exists in this class ( at line : " + <nl> void GDScriptParser : : _parse_class ( ClassNode * p_class ) { <nl> } <nl> } <nl> <nl> + if ( autoexport & & member . data_type . has_type ) { <nl> + if ( member . data_type . kind = = DataType : : BUILTIN ) { <nl> + member . _export . type = member . data_type . builtin_type ; <nl> + } else if ( member . data_type . kind = = DataType : : NATIVE ) { <nl> + if ( ClassDB : : is_parent_class ( member . data_type . native_type , " Resource " ) ) { <nl> + member . _export . type = Variant : : OBJECT ; <nl> + member . _export . hint = PROPERTY_HINT_RESOURCE_TYPE ; <nl> + member . _export . usage | = PROPERTY_USAGE_SCRIPT_VARIABLE ; <nl> + member . _export . hint_string = member . data_type . native_type ; <nl> + member . _export . class_name = member . data_type . native_type ; <nl> + } else { <nl> + _set_error ( " Invalid export type . Only built - in and native resource types can be exported . " , member . line ) ; <nl> + return ; <nl> + } <nl> + <nl> + } else { <nl> + _set_error ( " Invalid export type . Only built - in and native resource types can be exported . " , member . line ) ; <nl> + return ; <nl> + } <nl> + } <nl> + <nl> + # ifdef TOOLS_ENABLED <nl> + Variant : : CallError ce ; <nl> + member . default_value = Variant : : construct ( member . _export . type , NULL , 0 , ce ) ; <nl> + # endif <nl> + <nl> if ( tokenizer - > get_token ( ) = = GDScriptTokenizer : : TK_OP_ASSIGN ) { <nl> <nl> # ifdef DEBUG_ENABLED <nl> void GDScriptParser : : _parse_class ( ClassNode * p_class ) { <nl> member . initial_assignment = op ; <nl> } <nl> <nl> - if ( autoexport & & member . data_type . has_type ) { <nl> - if ( member . data_type . kind = = DataType : : BUILTIN ) { <nl> - member . _export . type = member . data_type . builtin_type ; <nl> - } else if ( member . data_type . kind = = DataType : : NATIVE ) { <nl> - if ( ClassDB : : is_parent_class ( member . data_type . native_type , " Resource " ) ) { <nl> - member . _export . type = Variant : : OBJECT ; <nl> - member . _export . hint = PROPERTY_HINT_RESOURCE_TYPE ; <nl> - member . _export . usage | = PROPERTY_USAGE_SCRIPT_VARIABLE ; <nl> - member . _export . hint_string = member . data_type . native_type ; <nl> - member . _export . class_name = member . data_type . native_type ; <nl> - } else { <nl> - _set_error ( " Invalid export type . Only built - in and native resource types can be exported . " , member . line ) ; <nl> - return ; <nl> - } <nl> - <nl> - } else { <nl> - _set_error ( " Invalid export type . Only built - in and native resource types can be exported . " , member . line ) ; <nl> - return ; <nl> - } <nl> - } <nl> - <nl> if ( tokenizer - > get_token ( ) = = GDScriptTokenizer : : TK_PR_SETGET ) { <nl> <nl> tokenizer - > advance ( ) ; <nl>
Merge pull request from bojidar - bg / 27575 - poolarrayexport - default
godotengine/godot
9986f3804caf32dc092ac7f094381745dc3ae8f8
2020-01-16T13:58:06Z
mmm a / torch / _torch_docs . py <nl> ppp b / torch / _torch_docs . py <nl> def merge_dicts ( * dicts ) : <nl> See : meth : ` ~ torch . svd ` for more details . <nl> <nl> Arguments : <nl> - input ( Tensor ) : The input 2D tensor of dimensions : math : ` m \ times n ` <nl> + input ( Tensor ) : The input tensor of size : math : ` ( * , m , n ) ` where : math : ` * ` is zero or more batch dimensions <nl> rcond ( float ) : A floating point value to determine the cutoff for small singular values . <nl> Default : 1e - 15 <nl> <nl> Returns : <nl> - The pseudo - inverse of : attr : ` input ` of dimensions : math : ` n \ times m ` <nl> + The pseudo - inverse of : attr : ` input ` of dimensions : math : ` ( * , n , m ) ` <nl> <nl> Example : : <nl> <nl> def merge_dicts ( * dicts ) : <nl> [ - 0 . 7124 , - 0 . 1631 , - 0 . 2272 ] , <nl> [ 0 . 1356 , 0 . 3933 , - 0 . 5023 ] , <nl> [ - 0 . 0308 , - 0 . 1725 , - 0 . 5216 ] ] ) <nl> + > > > # Batched pinverse example <nl> + > > > a = torch . randn ( 2 , 6 , 3 ) <nl> + > > > b = torch . pinverse ( a ) <nl> + > > > torch . matmul ( b , a ) <nl> + tensor ( [ [ [ 1 . 0000e + 00 , 1 . 6391e - 07 , - 1 . 1548e - 07 ] , <nl> + [ 8 . 3121e - 08 , 1 . 0000e + 00 , - 2 . 7567e - 07 ] , <nl> + [ 3 . 5390e - 08 , 1 . 4901e - 08 , 1 . 0000e + 00 ] ] , <nl> + <nl> + [ [ 1 . 0000e + 00 , - 8 . 9407e - 08 , 2 . 9802e - 08 ] , <nl> + [ - 2 . 2352e - 07 , 1 . 0000e + 00 , 1 . 1921e - 07 ] , <nl> + [ 0 . 0000e + 00 , 8 . 9407e - 08 , 1 . 0000e + 00 ] ] ] ) <nl> <nl> . . _Moore - Penrose inverse : https : / / en . wikipedia . org / wiki / Moore % E2 % 80 % 93Penrose_inverse <nl> <nl>
Update pinverse doc for recent commit
pytorch/pytorch
1dcf1b8938fa0cf70373fb84a853c51517db46cc
2019-10-31T14:36:35Z
mmm a / Docker / Dockerfile <nl> ppp b / Docker / Dockerfile <nl> RUN git clone - b $ branch https : / / github . com / EOSIO / eos . git - - recursive \ <nl> & & cd eos & & echo " $ branch : $ ( git rev - parse HEAD ) " > / etc / eosio - version \ <nl> & & cmake - H . - B " / tmp / build " - GNinja - DCMAKE_BUILD_TYPE = Release - DWASM_ROOT = / opt / wasm - DCMAKE_CXX_COMPILER = clang + + \ <nl> - DCMAKE_C_COMPILER = clang - DCMAKE_INSTALL_PREFIX = / tmp / build - DSecp256k1_ROOT_DIR = / usr / local - DBUILD_MONGO_DB_PLUGIN = true \ <nl> - & & cmake - - build / tmp / build - - target install <nl> + & & cmake - - build / tmp / build - - target install & & rm / tmp / build / bin / eosiocpp <nl> <nl> <nl> FROM ubuntu : 18 . 04 <nl>
remove eosiocpp from eosio / eos
EOSIO/eos
d3c78bc1e6bb3d7f4c114a4008bdb459656d88cf
2018-05-17T16:52:19Z
mmm a / include / gmock / gmock - matchers . h <nl> ppp b / include / gmock / gmock - matchers . h <nl> class GTEST_API_ Matcher < internal : : string > <nl> Matcher ( const char * s ) ; / / NOLINT <nl> } ; <nl> <nl> + # if GTEST_HAS_STRING_PIECE_ <nl> + / / The following two specializations allow the user to write str <nl> + / / instead of Eq ( str ) and " foo " instead of Eq ( " foo " ) when a StringPiece <nl> + / / matcher is expected . <nl> + template < > <nl> + class GTEST_API_ Matcher < const StringPiece & > <nl> + : public internal : : MatcherBase < const StringPiece & > { <nl> + public : <nl> + Matcher ( ) { } <nl> + <nl> + explicit Matcher ( const MatcherInterface < const StringPiece & > * impl ) <nl> + : internal : : MatcherBase < const StringPiece & > ( impl ) { } <nl> + <nl> + / / Allows the user to write str instead of Eq ( str ) sometimes , where <nl> + / / str is a string object . <nl> + Matcher ( const internal : : string & s ) ; / / NOLINT <nl> + <nl> + / / Allows the user to write " foo " instead of Eq ( " foo " ) sometimes . <nl> + Matcher ( const char * s ) ; / / NOLINT <nl> + <nl> + / / Allows the user to pass StringPieces directly . <nl> + Matcher ( StringPiece s ) ; / / NOLINT <nl> + } ; <nl> + <nl> + template < > <nl> + class GTEST_API_ Matcher < StringPiece > <nl> + : public internal : : MatcherBase < StringPiece > { <nl> + public : <nl> + Matcher ( ) { } <nl> + <nl> + explicit Matcher ( const MatcherInterface < StringPiece > * impl ) <nl> + : internal : : MatcherBase < StringPiece > ( impl ) { } <nl> + <nl> + / / Allows the user to write str instead of Eq ( str ) sometimes , where <nl> + / / str is a string object . <nl> + Matcher ( const internal : : string & s ) ; / / NOLINT <nl> + <nl> + / / Allows the user to write " foo " instead of Eq ( " foo " ) sometimes . <nl> + Matcher ( const char * s ) ; / / NOLINT <nl> + <nl> + / / Allows the user to pass StringPieces directly . <nl> + Matcher ( StringPiece s ) ; / / NOLINT <nl> + } ; <nl> + # endif / / GTEST_HAS_STRING_PIECE_ <nl> + <nl> / / The PolymorphicMatcher class template makes it easy to implement a <nl> / / polymorphic matcher ( i . e . a matcher that can match values of more <nl> / / than one type , e . g . Eq ( n ) and NotNull ( ) ) . <nl> mmm a / src / gmock - matchers . cc <nl> ppp b / src / gmock - matchers . cc <nl> Matcher < internal : : string > : : Matcher ( const char * s ) { <nl> * this = Eq ( internal : : string ( s ) ) ; <nl> } <nl> <nl> + # if GTEST_HAS_STRING_PIECE_ <nl> + / / Constructs a matcher that matches a const StringPiece & whose value is <nl> + / / equal to s . <nl> + Matcher < const StringPiece & > : : Matcher ( const internal : : string & s ) { <nl> + * this = Eq ( s ) ; <nl> + } <nl> + <nl> + / / Constructs a matcher that matches a const StringPiece & whose value is <nl> + / / equal to s . <nl> + Matcher < const StringPiece & > : : Matcher ( const char * s ) { <nl> + * this = Eq ( internal : : string ( s ) ) ; <nl> + } <nl> + <nl> + / / Constructs a matcher that matches a const StringPiece & whose value is <nl> + / / equal to s . <nl> + Matcher < const StringPiece & > : : Matcher ( StringPiece s ) { <nl> + * this = Eq ( s . ToString ( ) ) ; <nl> + } <nl> + <nl> + / / Constructs a matcher that matches a StringPiece whose value is equal to s . <nl> + Matcher < StringPiece > : : Matcher ( const internal : : string & s ) { <nl> + * this = Eq ( s ) ; <nl> + } <nl> + <nl> + / / Constructs a matcher that matches a StringPiece whose value is equal to s . <nl> + Matcher < StringPiece > : : Matcher ( const char * s ) { <nl> + * this = Eq ( internal : : string ( s ) ) ; <nl> + } <nl> + <nl> + / / Constructs a matcher that matches a StringPiece whose value is equal to s . <nl> + Matcher < StringPiece > : : Matcher ( StringPiece s ) { <nl> + * this = Eq ( s . ToString ( ) ) ; <nl> + } <nl> + # endif / / GTEST_HAS_STRING_PIECE_ <nl> + <nl> namespace internal { <nl> <nl> / / Joins a vector of strings as if they are fields of a tuple ; returns <nl> mmm a / test / gmock - matchers_test . cc <nl> ppp b / test / gmock - matchers_test . cc <nl> TEST ( StringMatcherTest , CanBeImplicitlyConstructedFromString ) { <nl> EXPECT_FALSE ( m2 . Matches ( " hello " ) ) ; <nl> } <nl> <nl> + # if GTEST_HAS_STRING_PIECE_ <nl> + / / Tests that a C - string literal can be implicitly converted to a <nl> + / / Matcher < StringPiece > or Matcher < const StringPiece & > . <nl> + TEST ( StringPieceMatcherTest , CanBeImplicitlyConstructedFromCStringLiteral ) { <nl> + Matcher < StringPiece > m1 = " cats " ; <nl> + EXPECT_TRUE ( m1 . Matches ( " cats " ) ) ; <nl> + EXPECT_FALSE ( m1 . Matches ( " dogs " ) ) ; <nl> + <nl> + Matcher < const StringPiece & > m2 = " cats " ; <nl> + EXPECT_TRUE ( m2 . Matches ( " cats " ) ) ; <nl> + EXPECT_FALSE ( m2 . Matches ( " dogs " ) ) ; <nl> + } <nl> + <nl> + / / Tests that a string object can be implicitly converted to a <nl> + / / Matcher < StringPiece > or Matcher < const StringPiece & > . <nl> + TEST ( StringPieceMatcherTest , CanBeImplicitlyConstructedFromString ) { <nl> + Matcher < StringPiece > m1 = string ( " cats " ) ; <nl> + EXPECT_TRUE ( m1 . Matches ( " cats " ) ) ; <nl> + EXPECT_FALSE ( m1 . Matches ( " dogs " ) ) ; <nl> + <nl> + Matcher < const StringPiece & > m2 = string ( " cats " ) ; <nl> + EXPECT_TRUE ( m2 . Matches ( " cats " ) ) ; <nl> + EXPECT_FALSE ( m2 . Matches ( " dogs " ) ) ; <nl> + } <nl> + <nl> + / / Tests that a StringPiece object can be implicitly converted to a <nl> + / / Matcher < StringPiece > or Matcher < const StringPiece & > . <nl> + TEST ( StringPieceMatcherTest , CanBeImplicitlyConstructedFromStringPiece ) { <nl> + Matcher < StringPiece > m1 = StringPiece ( " cats " ) ; <nl> + EXPECT_TRUE ( m1 . Matches ( " cats " ) ) ; <nl> + EXPECT_FALSE ( m1 . Matches ( " dogs " ) ) ; <nl> + <nl> + Matcher < const StringPiece & > m2 = StringPiece ( " cats " ) ; <nl> + EXPECT_TRUE ( m2 . Matches ( " cats " ) ) ; <nl> + EXPECT_FALSE ( m2 . Matches ( " dogs " ) ) ; <nl> + } <nl> + # endif / / GTEST_HAS_STRING_PIECE_ <nl> + <nl> / / Tests that MakeMatcher ( ) constructs a Matcher < T > from a <nl> / / MatcherInterface * without requiring the user to explicitly <nl> / / write the type . <nl>
Adds special support for matching StringPiece . Pulls in gtest r646 .
google/googletest
1f122a06e6aad4d234123d2d8c1e352029ce0742
2013-03-25T16:27:03Z
mmm a / tensorflow / compiler / jit / xla_device_context . cc <nl> ppp b / tensorflow / compiler / jit / xla_device_context . cc <nl> void XlaDeviceAllocator : : DeallocateRaw ( void * ptr ) { <nl> delete XlaTensor : : FromOpaquePointer ( ptr ) ; <nl> } <nl> <nl> - AllocatorStats XlaDeviceAllocator : : GetStats ( ) { <nl> - stream_executor : : AllocatorStats se_stats = <nl> + absl : : optional < AllocatorStats > XlaDeviceAllocator : : GetStats ( ) { <nl> + absl : : optional < stream_executor : : AllocatorStats > se_stats = <nl> stream_executor_ - > GetAllocatorStats ( ) ; <nl> + if ( ! se_stats ) { <nl> + return absl : : nullopt ; <nl> + } <nl> <nl> tensorflow : : AllocatorStats tf_stats ; <nl> - tf_stats . num_allocs = se_stats . num_allocs ; <nl> - tf_stats . bytes_in_use = se_stats . bytes_in_use ; <nl> - tf_stats . peak_bytes_in_use = se_stats . peak_bytes_in_use ; <nl> - tf_stats . largest_alloc_size = se_stats . largest_alloc_size ; <nl> - tf_stats . bytes_limit = se_stats . bytes_limit ; <nl> + tf_stats . num_allocs = se_stats - > num_allocs ; <nl> + tf_stats . bytes_in_use = se_stats - > bytes_in_use ; <nl> + tf_stats . peak_bytes_in_use = se_stats - > peak_bytes_in_use ; <nl> + tf_stats . largest_alloc_size = se_stats - > largest_alloc_size ; <nl> + tf_stats . bytes_limit = se_stats - > bytes_limit ; <nl> return tf_stats ; <nl> } <nl> <nl> mmm a / tensorflow / compiler / jit / xla_device_context . h <nl> ppp b / tensorflow / compiler / jit / xla_device_context . h <nl> class XlaDeviceAllocator : public Allocator { <nl> <nl> void * AllocateRaw ( size_t alignment , size_t num_bytes ) override ; <nl> void DeallocateRaw ( void * ptr ) override ; <nl> - AllocatorStats GetStats ( ) override ; <nl> + absl : : optional < AllocatorStats > GetStats ( ) override ; <nl> <nl> private : <nl> / / The stream executor of the device . <nl> mmm a / tensorflow / contrib / memory_stats / kernels / memory_stats_ops . cc <nl> ppp b / tensorflow / contrib / memory_stats / kernels / memory_stats_ops . cc <nl> class MemoryStatsOp : public OpKernel { <nl> void Compute ( OpKernelContext * context ) override { <nl> Allocator * allocator = <nl> context - > device ( ) - > GetAllocator ( AllocatorAttributes ( ) ) ; <nl> - AllocatorStats allocator_stats = allocator - > GetStats ( ) ; <nl> + absl : : optional < AllocatorStats > allocator_stats = allocator - > GetStats ( ) ; <nl> + if ( ! allocator_stats ) { <nl> + * allocator_stats = AllocatorStats ( ) ; <nl> + } <nl> <nl> Tensor * output_tensor = nullptr ; <nl> OP_REQUIRES_OK ( <nl> context , context - > allocate_output ( 0 , TensorShape ( { } ) , & output_tensor ) ) ; <nl> - output_tensor - > scalar < int64 > ( ) ( ) = ExtractAllocatorStats ( allocator_stats ) ; <nl> + output_tensor - > scalar < int64 > ( ) ( ) = ExtractAllocatorStats ( * allocator_stats ) ; <nl> } <nl> <nl> protected : <nl> class BytesLimitOp : public MemoryStatsOp { <nl> private : <nl> int64 ExtractAllocatorStats ( <nl> const AllocatorStats & allocator_stats ) const override { <nl> - return allocator_stats . bytes_limit ; <nl> + return allocator_stats . bytes_limit ? * allocator_stats . bytes_limit : - 1 ; <nl> } <nl> } ; <nl> <nl> mmm a / tensorflow / core / common_runtime / bfc_allocator . cc <nl> ppp b / tensorflow / core / common_runtime / bfc_allocator . cc <nl> void BFCAllocator : : DumpMemoryLog ( size_t num_bytes ) { <nl> LOG ( INFO ) < < " Stats : \ n " < < stats_ . DebugString ( ) ; <nl> } <nl> <nl> - AllocatorStats BFCAllocator : : GetStats ( ) { <nl> + absl : : optional < AllocatorStats > BFCAllocator : : GetStats ( ) { <nl> mutex_lock l ( lock_ ) ; <nl> return stats_ ; <nl> } <nl> mmm a / tensorflow / core / common_runtime / bfc_allocator . h <nl> ppp b / tensorflow / core / common_runtime / bfc_allocator . h <nl> class BFCAllocator : public Allocator { <nl> <nl> int64 AllocationId ( const void * ptr ) override ; <nl> <nl> - AllocatorStats GetStats ( ) override ; <nl> + absl : : optional < AllocatorStats > GetStats ( ) override ; <nl> <nl> void ClearStats ( ) override ; <nl> <nl> mmm a / tensorflow / core / common_runtime / eager / kernel_and_device . cc <nl> ppp b / tensorflow / core / common_runtime / eager / kernel_and_device . cc <nl> void UpdateStats ( OpKernelContext * context , <nl> memory - > set_peak_bytes ( std : : get < 1 > ( sizes ) ) ; <nl> memory - > set_live_bytes ( std : : get < 2 > ( sizes ) ) ; <nl> <nl> - AllocatorStats allocator_stats = allocator_pair . first - > GetStats ( ) ; <nl> - memory - > set_allocator_bytes_in_use ( allocator_stats . bytes_in_use ) ; <nl> + absl : : optional < AllocatorStats > allocator_stats = <nl> + allocator_pair . first - > GetStats ( ) ; <nl> + if ( stats ) { <nl> + memory - > set_allocator_bytes_in_use ( allocator_stats - > bytes_in_use ) ; <nl> + } <nl> allocator_pair . second - > GetRecordsAndUnRef ( ) ; <nl> } <nl> auto * ms = stats - > mutable_memory_stats ( ) ; <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_bfc_allocator_test . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_bfc_allocator_test . cc <nl> namespace { <nl> <nl> static void CheckStats ( Allocator * a , int64 num_allocs , int64 bytes_in_use , <nl> int64 peak_bytes_in_use , int64 largest_alloc_size ) { <nl> - AllocatorStats stats = a - > GetStats ( ) ; <nl> - LOG ( INFO ) < < " Alloc stats : " < < std : : endl < < stats . DebugString ( ) ; <nl> - EXPECT_EQ ( stats . bytes_in_use , bytes_in_use ) ; <nl> - EXPECT_EQ ( stats . peak_bytes_in_use , peak_bytes_in_use ) ; <nl> - EXPECT_EQ ( stats . num_allocs , num_allocs ) ; <nl> - EXPECT_EQ ( stats . largest_alloc_size , largest_alloc_size ) ; <nl> + absl : : optional < AllocatorStats > stats = a - > GetStats ( ) ; <nl> + EXPECT_TRUE ( stats ) ; <nl> + if ( ! stats ) { <nl> + return ; <nl> + } <nl> + LOG ( INFO ) < < " Alloc stats : " < < std : : endl < < stats - > DebugString ( ) ; <nl> + EXPECT_EQ ( stats - > bytes_in_use , bytes_in_use ) ; <nl> + EXPECT_EQ ( stats - > peak_bytes_in_use , peak_bytes_in_use ) ; <nl> + EXPECT_EQ ( stats - > num_allocs , num_allocs ) ; <nl> + EXPECT_EQ ( stats - > largest_alloc_size , largest_alloc_size ) ; <nl> } <nl> <nl> TEST ( GPUBFCAllocatorTest , NoDups ) { <nl> TEST ( GPUBFCAllocatorTest , AllocationsAndDeallocationsWithGrowth ) { <nl> a . DeallocateRaw ( existing_ptrs [ i ] ) ; <nl> } <nl> <nl> - AllocatorStats stats = a . GetStats ( ) ; <nl> - LOG ( INFO ) < < " Alloc stats : \ n " < < stats . DebugString ( ) ; <nl> + absl : : optional < AllocatorStats > stats = a . GetStats ( ) ; <nl> + if ( stats ) { <nl> + LOG ( INFO ) < < " Alloc stats : \ n " < < stats - > DebugString ( ) ; <nl> + } <nl> } <nl> <nl> TEST ( GPUBFCAllocatorTest , DISABLED_AllocatorReceivesZeroMemory ) { <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_debug_allocator . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_debug_allocator . cc <nl> int64 GPUDebugAllocator : : AllocationId ( const void * ptr ) { <nl> MASK_BYTES ) ; <nl> } <nl> <nl> - AllocatorStats GPUDebugAllocator : : GetStats ( ) { <nl> + absl : : optional < AllocatorStats > GPUDebugAllocator : : GetStats ( ) { <nl> return base_allocator_ - > GetStats ( ) ; <nl> } <nl> <nl> size_t GPUNanResetAllocator : : AllocatedSize ( const void * ptr ) { <nl> return base_allocator_ - > AllocatedSize ( ptr ) ; <nl> } <nl> <nl> - AllocatorStats GPUNanResetAllocator : : GetStats ( ) { <nl> + absl : : optional < AllocatorStats > GPUNanResetAllocator : : GetStats ( ) { <nl> return base_allocator_ - > GetStats ( ) ; <nl> } <nl> <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_debug_allocator . h <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_debug_allocator . h <nl> class GPUDebugAllocator : public Allocator { <nl> size_t RequestedSize ( const void * ptr ) override ; <nl> size_t AllocatedSize ( const void * ptr ) override ; <nl> int64 AllocationId ( const void * ptr ) override ; <nl> - AllocatorStats GetStats ( ) override ; <nl> + absl : : optional < AllocatorStats > GetStats ( ) override ; <nl> void ClearStats ( ) override ; <nl> <nl> / / For testing . <nl> class GPUNanResetAllocator : public Allocator { <nl> void DeallocateRaw ( void * ptr ) override ; <nl> size_t RequestedSize ( const void * ptr ) override ; <nl> size_t AllocatedSize ( const void * ptr ) override ; <nl> - AllocatorStats GetStats ( ) override ; <nl> + absl : : optional < AllocatorStats > GetStats ( ) override ; <nl> void ClearStats ( ) override ; <nl> <nl> private : <nl> mmm a / tensorflow / core / common_runtime / gpu / gpu_device . cc <nl> ppp b / tensorflow / core / common_runtime / gpu / gpu_device . cc <nl> Status BaseGPUDeviceFactory : : CreateGPUDevice ( <nl> tf_gpu_id . value ( ) , " with " , memory_limit , <nl> " bytes of memory . " ) ; <nl> } <nl> - AllocatorStats stats = gpu_allocator - > GetStats ( ) ; <nl> + absl : : optional < AllocatorStats > stats = gpu_allocator - > GetStats ( ) ; <nl> + if ( ! stats ) { <nl> + return errors : : Internal ( " No allocator statistics " ) ; <nl> + } <nl> / / ' memory_limit ' is the required memory size , but if the allocator with given <nl> / / tf_gpu_id was created before , we ' ll use it instead of creating a new one <nl> / / ( as TF gpu device is a shared resource ) , in which case the actual memory <nl> Status BaseGPUDeviceFactory : : CreateGPUDevice ( <nl> / / different ( which should be an error ) . <nl> / / <nl> / / TODO ( laigd ) : report error if memory_limit doesn ' t match stats - > bytes_limit . <nl> - int64 bytes_limit = stats . bytes_limit ; <nl> + int64 bytes_limit = stats - > bytes_limit ? * stats - > bytes_limit : 0 ; <nl> std : : unique_ptr < BaseGPUDevice > gpu_device = CreateGPUDevice ( <nl> options , device_name , static_cast < Bytes > ( bytes_limit ) , dev_locality , <nl> tf_gpu_id , GetShortDeviceDescription ( platform_gpu_id , desc ) , <nl> mmm a / tensorflow / core / common_runtime / process_state . h <nl> ppp b / tensorflow / core / common_runtime / process_state . h <nl> class RecordingAllocator : public Allocator { <nl> bool TracksAllocationSizes ( ) override { return a_ - > TracksAllocationSizes ( ) ; } <nl> size_t RequestedSize ( const void * p ) override { return a_ - > RequestedSize ( p ) ; } <nl> size_t AllocatedSize ( const void * p ) override { return a_ - > AllocatedSize ( p ) ; } <nl> - AllocatorStats GetStats ( ) override { return a_ - > GetStats ( ) ; } <nl> + absl : : optional < AllocatorStats > GetStats ( ) override { return a_ - > GetStats ( ) ; } <nl> void ClearStats ( ) override { a_ - > ClearStats ( ) ; } <nl> ProcessState : : MDMap * mm_ ; / / not owned <nl> Allocator * a_ ; / / not owned <nl> mmm a / tensorflow / core / common_runtime / step_stats_collector . cc <nl> ppp b / tensorflow / core / common_runtime / step_stats_collector . cc <nl> void NodeExecStatsWrapper : : AddAllocation ( <nl> memory - > set_peak_bytes ( std : : get < 1 > ( sizes ) ) ; <nl> memory - > set_live_bytes ( std : : get < 2 > ( sizes ) ) ; <nl> <nl> - AllocatorStats stats = allocator - > GetStats ( ) ; <nl> - memory - > set_allocator_bytes_in_use ( stats . bytes_in_use ) ; <nl> + absl : : optional < AllocatorStats > stats = allocator - > GetStats ( ) ; <nl> + if ( stats ) { <nl> + memory - > set_allocator_bytes_in_use ( stats - > bytes_in_use ) ; <nl> + } <nl> allocations_ . push_back ( std : : make_pair ( memory , tracking_allocator ) ) ; <nl> } <nl> <nl> mmm a / tensorflow / core / framework / allocator . cc <nl> ppp b / tensorflow / core / framework / allocator . cc <nl> string AllocatorStats : : DebugString ( ) const { <nl> " MaxInUse : % 20lld \ n " <nl> " NumAllocs : % 20lld \ n " <nl> " MaxAllocSize : % 20lld \ n " , <nl> - this - > bytes_limit , this - > bytes_in_use , this - > peak_bytes_in_use , <nl> - this - > num_allocs , this - > largest_alloc_size ) ; <nl> + this - > bytes_limit ? * this - > bytes_limit : 0 , this - > bytes_in_use , <nl> + this - > peak_bytes_in_use , this - > num_allocs , this - > largest_alloc_size ) ; <nl> } <nl> <nl> constexpr size_t Allocator : : kAllocatorAlignment ; <nl> class CPUAllocator : public Allocator { <nl> port : : AlignedFree ( ptr ) ; <nl> } <nl> <nl> - AllocatorStats GetStats ( ) override { <nl> + absl : : optional < AllocatorStats > GetStats ( ) override { <nl> mutex_lock l ( mu_ ) ; <nl> return stats_ ; <nl> } <nl> mmm a / tensorflow / core / framework / allocator . h <nl> ppp b / tensorflow / core / framework / allocator . h <nl> limitations under the License . <nl> <nl> # include < limits > <nl> <nl> + # include " absl / strings / string_view . h " <nl> + # include " absl / types / optional . h " <nl> # include " tensorflow / core / framework / numeric_types . h " <nl> # include " tensorflow / core / framework / resource_handle . h " <nl> # include " tensorflow / core / framework / type_traits . h " <nl> struct AllocatorStats { <nl> int64 largest_alloc_size ; / / The largest single allocation seen . <nl> <nl> / / The upper limit of bytes of user allocatable device memory , if such a limit <nl> - / / is known . Certain allocators may return 0 to indicate the limit is unknown . <nl> - int64 bytes_limit ; <nl> + / / is known . <nl> + absl : : optional < int64 > bytes_limit ; <nl> <nl> AllocatorStats ( ) <nl> : num_allocs ( 0 ) , <nl> bytes_in_use ( 0 ) , <nl> peak_bytes_in_use ( 0 ) , <nl> - largest_alloc_size ( 0 ) , <nl> - bytes_limit ( 0 ) { } <nl> + largest_alloc_size ( 0 ) { } <nl> <nl> string DebugString ( ) const ; <nl> } ; <nl> class Allocator { <nl> } <nl> <nl> / / Fills in ' stats ' with statistics collected by this allocator . <nl> - virtual AllocatorStats GetStats ( ) { return AllocatorStats ( ) ; } <nl> + virtual absl : : optional < AllocatorStats > GetStats ( ) { return absl : : nullopt ; } <nl> <nl> / / Clears the internal stats except for the ` in_use ` field . <nl> virtual void ClearStats ( ) { } <nl> mmm a / tensorflow / core / framework / allocator_test . cc <nl> ppp b / tensorflow / core / framework / allocator_test . cc <nl> namespace tensorflow { <nl> <nl> static void CheckStats ( Allocator * a , int64 num_allocs , int64 bytes_in_use , <nl> int64 peak_bytes_in_use , int64 largest_alloc_size ) { <nl> - AllocatorStats stats = a - > GetStats ( ) ; <nl> - LOG ( INFO ) < < " Alloc stats : \ n " < < stats . DebugString ( ) ; <nl> + absl : : optional < AllocatorStats > stats = a - > GetStats ( ) ; <nl> + EXPECT_TRUE ( stats ) ; <nl> + if ( ! stats ) { <nl> + return ; <nl> + } <nl> + LOG ( INFO ) < < " Alloc stats : \ n " < < stats - > DebugString ( ) ; <nl> # if defined ( PLATFORM_GOOGLE ) & & defined ( NDEBUG ) <nl> / / NOTE : allocator stats expectation depends on the system malloc , <nl> / / and can vary as that changes . <nl> static const int64 kSlop = 5 * 1024 ; <nl> - EXPECT_GT ( stats . bytes_in_use , bytes_in_use - kSlop ) ; <nl> - EXPECT_LT ( stats . bytes_in_use , bytes_in_use + kSlop ) ; <nl> - EXPECT_GT ( stats . peak_bytes_in_use , peak_bytes_in_use - kSlop ) ; <nl> - EXPECT_LT ( stats . peak_bytes_in_use , peak_bytes_in_use + kSlop ) ; <nl> - EXPECT_EQ ( stats . num_allocs , num_allocs ) ; <nl> - EXPECT_EQ ( stats . largest_alloc_size , largest_alloc_size ) ; <nl> + EXPECT_GT ( stats - > bytes_in_use , bytes_in_use - kSlop ) ; <nl> + EXPECT_LT ( stats - > bytes_in_use , bytes_in_use + kSlop ) ; <nl> + EXPECT_GT ( stats - > peak_bytes_in_use , peak_bytes_in_use - kSlop ) ; <nl> + EXPECT_LT ( stats - > peak_bytes_in_use , peak_bytes_in_use + kSlop ) ; <nl> + EXPECT_EQ ( stats - > num_allocs , num_allocs ) ; <nl> + EXPECT_EQ ( stats - > largest_alloc_size , largest_alloc_size ) ; <nl> # endif <nl> } <nl> <nl> mmm a / tensorflow / core / framework / tracking_allocator . cc <nl> ppp b / tensorflow / core / framework / tracking_allocator . cc <nl> int64 TrackingAllocator : : AllocationId ( const void * ptr ) { <nl> } <nl> } <nl> <nl> - AllocatorStats TrackingAllocator : : GetStats ( ) { return allocator_ - > GetStats ( ) ; } <nl> + absl : : optional < AllocatorStats > TrackingAllocator : : GetStats ( ) { <nl> + return allocator_ - > GetStats ( ) ; <nl> + } <nl> <nl> void TrackingAllocator : : ClearStats ( ) { allocator_ - > ClearStats ( ) ; } <nl> <nl> mmm a / tensorflow / core / framework / tracking_allocator . h <nl> ppp b / tensorflow / core / framework / tracking_allocator . h <nl> class TrackingAllocator : public Allocator { <nl> size_t RequestedSize ( const void * ptr ) override ; <nl> size_t AllocatedSize ( const void * ptr ) override ; <nl> int64 AllocationId ( const void * ptr ) override ; <nl> - AllocatorStats GetStats ( ) override ; <nl> + absl : : optional < AllocatorStats > GetStats ( ) override ; <nl> void ClearStats ( ) override ; <nl> <nl> / / If the underlying allocator tracks allocation sizes , this returns <nl> mmm a / tensorflow / core / framework / tracking_allocator_test . cc <nl> ppp b / tensorflow / core / framework / tracking_allocator_test . cc <nl> class TestableSizeTrackingAllocator : public Allocator { <nl> EXPECT_NE ( size_map_ . end ( ) , iter ) ; <nl> return iter - > second ; <nl> } <nl> + absl : : optional < AllocatorStats > GetStats ( ) override { return absl : : nullopt ; } <nl> <nl> private : <nl> std : : unordered_map < const void * , size_t > size_map_ ; <nl> class NoMemoryAllocator : public Allocator { <nl> } <nl> void DeallocateRaw ( void * ptr ) override { } <nl> bool TracksAllocationSizes ( ) override { return true ; } <nl> + absl : : optional < AllocatorStats > GetStats ( ) override { return absl : : nullopt ; } <nl> } ; <nl> <nl> TEST ( TrackingAllocatorTest , SimpleNoTracking ) { <nl> mmm a / tensorflow / core / grappler / clusters / single_machine . cc <nl> ppp b / tensorflow / core / grappler / clusters / single_machine . cc <nl> Status SingleMachine : : GetPeakMemoryUsage ( <nl> return Status ( error : : INVALID_ARGUMENT , <nl> " Tracking allocation is not enabled . " ) ; <nl> } <nl> - AllocatorStats stats = allocator - > GetStats ( ) ; <nl> - ( * device_peak_memory ) [ device - > name ( ) ] = stats . peak_bytes_in_use ; <nl> + absl : : optional < AllocatorStats > stats = allocator - > GetStats ( ) ; <nl> + ( * device_peak_memory ) [ device - > name ( ) ] = <nl> + ( stats ? stats - > peak_bytes_in_use : 0 ) ; <nl> } <nl> <nl> return Status : : OK ( ) ; <nl> mmm a / tensorflow / core / kernels / stack . cc <nl> ppp b / tensorflow / core / kernels / stack . cc <nl> void StackPushOp : : ComputeAsync ( OpKernelContext * ctx , DoneCallback done ) { <nl> DeviceContext * device_ctxt = ctx - > op_device_context ( ) ; <nl> auto device = static_cast < tensorflow : : Device * > ( ctx - > device ( ) ) ; <nl> Allocator * allocator = device - > GetAllocator ( alloc_attrs ) ; <nl> - AllocatorStats stats = allocator - > GetStats ( ) ; <nl> - if ( stats . bytes_limit & & <nl> - stats . bytes_in_use > ( stats . bytes_limit * kOccupancy ) ) { <nl> + absl : : optional < AllocatorStats > stats = allocator - > GetStats ( ) ; <nl> + if ( stats & & * stats - > bytes_limit & & <nl> + stats - > bytes_in_use > ( * stats - > bytes_limit * kOccupancy ) ) { <nl> / / Asynchronously copy the tensor from GPU to CPU memory . <nl> / / TODO ( yuanbyu ) : Swap the oldest tensor first . <nl> AllocatorAttributes host_alloc_attrs ; <nl> mmm a / tensorflow / stream_executor / BUILD <nl> ppp b / tensorflow / stream_executor / BUILD <nl> cc_library ( <nl> " / / tensorflow / core : lib " , <nl> " / / tensorflow / stream_executor / lib " , <nl> " / / tensorflow / stream_executor / platform " , <nl> + " @ com_google_absl / / absl / base : core_headers " , <nl> " @ com_google_absl / / absl / types : optional " , <nl> " @ com_google_absl / / absl / types : span " , <nl> ] , <nl> cc_library ( <nl> " / / tensorflow / stream_executor / platform " , <nl> " @ com_google_absl / / absl / base : core_headers " , <nl> " @ com_google_absl / / absl / strings " , <nl> + " @ com_google_absl / / absl / types : optional " , <nl> ] , <nl> ) <nl> <nl> cc_library ( <nl> deps = [ <nl> " / / tensorflow / stream_executor / platform " , <nl> " @ com_google_absl / / absl / strings : str_format " , <nl> + " @ com_google_absl / / absl / types : optional " , <nl> ] , <nl> ) <nl> <nl> mmm a / tensorflow / stream_executor / allocator_stats . cc <nl> ppp b / tensorflow / stream_executor / allocator_stats . cc <nl> string AllocatorStats : : DebugString ( ) const { <nl> " MaxInUse : % 20lld \ n " <nl> " NumAllocs : % 20lld \ n " <nl> " MaxAllocSize : % 20lld \ n " , <nl> - this - > bytes_limit , this - > bytes_in_use , this - > peak_bytes_in_use , <nl> - this - > num_allocs , this - > largest_alloc_size ) ; <nl> + this - > bytes_limit ? * this - > bytes_limit : 0 , this - > bytes_in_use , <nl> + this - > peak_bytes_in_use , this - > num_allocs , this - > largest_alloc_size ) ; <nl> } <nl> <nl> } / / namespace stream_executor <nl> mmm a / tensorflow / stream_executor / allocator_stats . h <nl> ppp b / tensorflow / stream_executor / allocator_stats . h <nl> limitations under the License . <nl> <nl> # include < string > <nl> <nl> + # include " absl / types / optional . h " <nl> # include " tensorflow / stream_executor / platform / port . h " <nl> <nl> namespace stream_executor { <nl> struct AllocatorStats { <nl> int64 largest_alloc_size ; / / The largest single allocation seen . <nl> <nl> / / The upper limit of bytes of user allocatable device memory , if such a limit <nl> - / / is known . Certain allocators may return 0 to indicate the limit is unknown . <nl> - int64 bytes_limit ; <nl> + / / is known . <nl> + absl : : optional < int64 > bytes_limit ; <nl> <nl> AllocatorStats ( ) <nl> : num_allocs ( 0 ) , <nl> bytes_in_use ( 0 ) , <nl> peak_bytes_in_use ( 0 ) , <nl> - largest_alloc_size ( 0 ) , <nl> - bytes_limit ( 0 ) { } <nl> + largest_alloc_size ( 0 ) { } <nl> <nl> string DebugString ( ) const ; <nl> } ; <nl> mmm a / tensorflow / stream_executor / stream_executor_internal . h <nl> ppp b / tensorflow / stream_executor / stream_executor_internal . h <nl> class StreamExecutorInterface { <nl> virtual void * GpuContextHack ( ) { return nullptr ; } <nl> <nl> / / Return allocator statistics . <nl> - virtual AllocatorStats GetAllocatorStats ( ) { return AllocatorStats ( ) ; } <nl> + virtual absl : : optional < AllocatorStats > GetAllocatorStats ( ) { <nl> + return absl : : nullopt ; <nl> + } <nl> <nl> private : <nl> SE_DISALLOW_COPY_AND_ASSIGN ( StreamExecutorInterface ) ; <nl> mmm a / tensorflow / stream_executor / stream_executor_pimpl . cc <nl> ppp b / tensorflow / stream_executor / stream_executor_pimpl . cc <nl> bool StreamExecutor : : UnregisterTraceListener ( TraceListener * listener ) { <nl> return true ; <nl> } <nl> <nl> - AllocatorStats StreamExecutor : : GetAllocatorStats ( ) { <nl> + absl : : optional < AllocatorStats > StreamExecutor : : GetAllocatorStats ( ) { <nl> return implementation_ - > GetAllocatorStats ( ) ; <nl> } <nl> <nl> mmm a / tensorflow / stream_executor / stream_executor_pimpl . h <nl> ppp b / tensorflow / stream_executor / stream_executor_pimpl . h <nl> limitations under the License . <nl> # include < vector > <nl> <nl> # include " absl / base / macros . h " <nl> + # include " absl / types / optional . h " <nl> # include " tensorflow / stream_executor / lib / status . h " <nl> # include " tensorflow / stream_executor / lib / statusor . h " <nl> # include " tensorflow / stream_executor / lib / threadpool . h " <nl> class StreamExecutor { <nl> bool UnregisterTraceListener ( TraceListener * listener ) ; <nl> <nl> / / Return allocator statistics . <nl> - AllocatorStats GetAllocatorStats ( ) ; <nl> + absl : : optional < AllocatorStats > GetAllocatorStats ( ) ; <nl> <nl> private : <nl> template < typename BeginCallT , typename CompleteCallT , <nl>
Automated rollback of commit 9ca8321d68b74ce4f30756366919c80282ca2b68
tensorflow/tensorflow
d10b88cd4a487624221d5960657bc15cf8a0e2d3
2019-02-24T22:58:49Z
mmm a / cocos / platform / android / jni / JniHelper . cpp <nl> ppp b / cocos / platform / android / jni / JniHelper . cpp <nl> THE SOFTWARE . <nl> <nl> # define LOG_TAG " JniHelper " <nl> # define LOGD ( . . . ) __android_log_print ( ANDROID_LOG_DEBUG , LOG_TAG , __VA_ARGS__ ) <nl> + # define LOGE ( . . . ) __android_log_print ( ANDROID_LOG_ERROR , LOG_TAG , __VA_ARGS__ ) <nl> <nl> static pthread_key_t g_key ; <nl> <nl> jclass _getClassID ( const char * className ) { <nl> _jstrClassName ) ; <nl> <nl> if ( NULL = = _clazz ) { <nl> - LOGD ( " Classloader failed to find class of % s " , className ) ; <nl> + LOGE ( " Classloader failed to find class of % s " , className ) ; <nl> + env - > ExceptionClear ( ) ; <nl> } <nl> <nl> env - > DeleteLocalRef ( _jstrClassName ) ; <nl> namespace cocos2d { <nl> <nl> if ( jvm - > AttachCurrentThread ( & _env , NULL ) < 0 ) <nl> { <nl> - LOGD ( " Failed to get the environment using AttachCurrentThread ( ) " ) ; <nl> + LOGE ( " Failed to get the environment using AttachCurrentThread ( ) " ) ; <nl> <nl> return NULL ; <nl> } else { <nl> namespace cocos2d { <nl> <nl> case JNI_EVERSION : <nl> / / Cannot recover from this error <nl> - LOGD ( " JNI interface version 1 . 4 not supported " ) ; <nl> + LOGE ( " JNI interface version 1 . 4 not supported " ) ; <nl> default : <nl> - LOGD ( " Failed to get the environment using GetEnv ( ) " ) ; <nl> + LOGE ( " Failed to get the environment using GetEnv ( ) " ) ; <nl> return NULL ; <nl> } <nl> } <nl> namespace cocos2d { <nl> <nl> JNIEnv * pEnv = JniHelper : : getEnv ( ) ; <nl> if ( ! pEnv ) { <nl> - LOGD ( " Failed to get JNIEnv " ) ; <nl> + LOGE ( " Failed to get JNIEnv " ) ; <nl> return false ; <nl> } <nl> <nl> jclass classID = _getClassID ( className ) ; <nl> if ( ! classID ) { <nl> - LOGD ( " Failed to find class % s " , className ) ; <nl> + LOGE ( " Failed to find class % s " , className ) ; <nl> + pEnv - > ExceptionClear ( ) ; <nl> return false ; <nl> } <nl> <nl> jmethodID methodID = pEnv - > GetStaticMethodID ( classID , methodName , paramCode ) ; <nl> if ( ! methodID ) { <nl> - LOGD ( " Failed to find static method id of % s " , methodName ) ; <nl> + LOGE ( " Failed to find static method id of % s " , methodName ) ; <nl> + pEnv - > ExceptionClear ( ) ; <nl> return false ; <nl> } <nl> <nl> namespace cocos2d { <nl> <nl> jclass classID = pEnv - > FindClass ( className ) ; <nl> if ( ! classID ) { <nl> - LOGD ( " Failed to find class % s " , className ) ; <nl> + LOGE ( " Failed to find class % s " , className ) ; <nl> + pEnv - > ExceptionClear ( ) ; <nl> return false ; <nl> } <nl> <nl> jmethodID methodID = pEnv - > GetMethodID ( classID , methodName , paramCode ) ; <nl> if ( ! methodID ) { <nl> - LOGD ( " Failed to find method id of % s " , methodName ) ; <nl> + LOGE ( " Failed to find method id of % s " , methodName ) ; <nl> + pEnv - > ExceptionClear ( ) ; <nl> return false ; <nl> } <nl> <nl> namespace cocos2d { <nl> <nl> jclass classID = _getClassID ( className ) ; <nl> if ( ! classID ) { <nl> - LOGD ( " Failed to find class % s " , className ) ; <nl> + LOGE ( " Failed to find class % s " , className ) ; <nl> pEnv - > ExceptionClear ( ) ; <nl> return false ; <nl> } <nl> <nl> jmethodID methodID = pEnv - > GetMethodID ( classID , methodName , paramCode ) ; <nl> if ( ! methodID ) { <nl> - LOGD ( " Failed to find method id of % s " , methodName ) ; <nl> + LOGE ( " Failed to find method id of % s " , methodName ) ; <nl> pEnv - > ExceptionClear ( ) ; <nl> return false ; <nl> } <nl>
android uni error clear & more readable log
cocos2d/cocos2d-x
cd892abc7781e1c46fabaf1c19528b1a4e3b647b
2014-05-20T10:52:21Z
new file mode 100644 <nl> index 0000000000 . . 066774d5bf <nl> mmm / dev / null <nl> ppp b / java / src / main / java / com / google / protobuf / nano / ExtendableMessageNano . java <nl> <nl> + / / Protocol Buffers - Google ' s data interchange format <nl> + / / Copyright 2013 Google Inc . All rights reserved . <nl> + / / http : / / code . google . com / p / protobuf / <nl> + / / <nl> + / / Redistribution and use in source and binary forms , with or without <nl> + / / modification , are permitted provided that the following conditions are <nl> + / / met : <nl> + / / <nl> + / / * Redistributions of source code must retain the above copyright <nl> + / / notice , this list of conditions and the following disclaimer . <nl> + / / * Redistributions in binary form must reproduce the above <nl> + / / copyright notice , this list of conditions and the following disclaimer <nl> + / / in the documentation and / or other materials provided with the <nl> + / / distribution . <nl> + / / * Neither the name of Google Inc . nor the names of its <nl> + / / contributors may be used to endorse or promote products derived from <nl> + / / this software without specific prior written permission . <nl> + / / <nl> + / / THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + / / " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + / / LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + / / A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + / / OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + / / SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + / / LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + / / DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + / / THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + / / ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + / / OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + package com . google . protobuf . nano ; <nl> + <nl> + import java . util . ArrayList ; <nl> + import java . util . List ; <nl> + <nl> + / * * <nl> + * Base class of those Protocol Buffer messages that need to store unknown fields , <nl> + * such as extensions . <nl> + * / <nl> + public abstract class ExtendableMessageNano extends MessageNano { <nl> + / * * <nl> + * A container for fields unknown to the message , including extensions . Extension fields can <nl> + * can be accessed through the { @ link getExtension ( ) } and { @ link setExtension ( ) } methods . <nl> + * / <nl> + protected List < UnknownFieldData > unknownFieldData ; <nl> + <nl> + @ Override <nl> + public int getSerializedSize ( ) { <nl> + int size = WireFormatNano . computeWireSize ( unknownFieldData ) ; <nl> + cachedSize = size ; <nl> + return size ; <nl> + } <nl> + <nl> + / * * <nl> + * Gets the value stored in the specified extension of this message . <nl> + * / <nl> + public < T > T getExtension ( Extension < T > extension ) { <nl> + return WireFormatNano . getExtension ( extension , unknownFieldData ) ; <nl> + } <nl> + <nl> + / * * <nl> + * Sets the value of the specified extension of this message . <nl> + * / <nl> + public < T > void setExtension ( Extension < T > extension , T value ) { <nl> + if ( unknownFieldData = = null ) { <nl> + unknownFieldData = new ArrayList < UnknownFieldData > ( ) ; <nl> + } <nl> + WireFormatNano . setExtension ( extension , value , unknownFieldData ) ; <nl> + } <nl> + } <nl> \ No newline at end of file <nl> mmm a / java / src / main / java / com / google / protobuf / nano / MessageNano . java <nl> ppp b / java / src / main / java / com / google / protobuf / nano / MessageNano . java <nl> <nl> * @ author wink @ google . com Wink Saville <nl> * / <nl> public abstract class MessageNano { <nl> + protected int cachedSize = - 1 ; <nl> + <nl> / * * <nl> * Get the number of bytes required to encode this message . <nl> * Returns the cached size or calls getSerializedSize which <nl> <nl> * so the size is only computed once . If a member is modified <nl> * then this could be stale call getSerializedSize if in doubt . <nl> * / <nl> - abstract public int getCachedSize ( ) ; <nl> + public int getCachedSize ( ) { <nl> + if ( cachedSize < 0 ) { <nl> + / / getSerializedSize sets cachedSize <nl> + getSerializedSize ( ) ; <nl> + } <nl> + return cachedSize ; <nl> + } <nl> <nl> / * * <nl> * Computes the number of bytes required to encode this message . <nl> * The size is cached and the cached result can be retrieved <nl> * using getCachedSize ( ) . <nl> * / <nl> - abstract public int getSerializedSize ( ) ; <nl> + public int getSerializedSize ( ) { <nl> + / / This is overridden if the generated message has serialized fields . <nl> + cachedSize = 0 ; <nl> + return 0 ; <nl> + } <nl> <nl> / * * <nl> * Serializes the message and writes it to { @ code output } . This does not <nl> mmm a / src / google / protobuf / compiler / javanano / javanano_file . cc <nl> ppp b / src / google / protobuf / compiler / javanano / javanano_file . cc <nl> void FileGenerator : : Generate ( io : : Printer * printer ) { <nl> printer - > Print ( <nl> " \ n " <nl> " @ SuppressWarnings ( \ " hiding \ " ) \ n " <nl> - " public final class $ classname $ { \ n " <nl> - " \ n " <nl> - " private $ classname $ ( ) { } \ n " , <nl> + " public interface $ classname $ { \ n " , <nl> " classname " , classname_ ) ; <nl> printer - > Indent ( ) ; <nl> <nl> mmm a / src / google / protobuf / compiler / javanano / javanano_message . cc <nl> ppp b / src / google / protobuf / compiler / javanano / javanano_message . cc <nl> void MessageGenerator : : Generate ( io : : Printer * printer ) { <nl> printer - > Print ( <nl> " \ n " <nl> " @ SuppressWarnings ( \ " hiding \ " ) \ n " <nl> - " public final class $ classname $ extends \ n " <nl> - " com . google . protobuf . nano . MessageNano { \ n " , <nl> + " public final class $ classname $ extends \ n " , <nl> " classname " , descriptor_ - > name ( ) ) ; <nl> } else { <nl> printer - > Print ( <nl> " \ n " <nl> - " public static final class $ classname $ extends \ n " <nl> - " com . google . protobuf . nano . MessageNano { \ n " , <nl> + " public static final class $ classname $ extends \ n " , <nl> " classname " , descriptor_ - > name ( ) ) ; <nl> } <nl> + if ( params_ . store_unknown_fields ( ) ) { <nl> + printer - > Print ( <nl> + " com . google . protobuf . nano . ExtendableMessageNano { \ n " ) ; <nl> + } else { <nl> + printer - > Print ( <nl> + " com . google . protobuf . nano . MessageNano { \ n " ) ; <nl> + } <nl> printer - > Indent ( ) ; <nl> printer - > Print ( <nl> " \ n " <nl> void MessageGenerator : : Generate ( io : : Printer * printer ) { <nl> " } \ n " , <nl> " classname " , descriptor_ - > name ( ) ) ; <nl> <nl> - if ( params_ . store_unknown_fields ( ) ) { <nl> - printer - > Print ( <nl> - " \ n " <nl> - " private java . util . List < com . google . protobuf . nano . UnknownFieldData > \ n " <nl> - " unknownFieldData ; \ n " ) ; <nl> - } <nl> - <nl> / / Nested types and extensions <nl> for ( int i = 0 ; i < descriptor_ - > extension_count ( ) ; i + + ) { <nl> ExtensionGenerator ( descriptor_ - > extension ( i ) , params_ ) . Generate ( printer ) ; <nl> void MessageGenerator : : Generate ( io : : Printer * printer ) { <nl> GenerateHashCode ( printer ) ; <nl> } <nl> <nl> - / / If we have an extension range , generate accessors for extensions . <nl> - if ( params_ . store_unknown_fields ( ) <nl> - & & descriptor_ - > extension_range_count ( ) > 0 ) { <nl> - printer - > Print ( <nl> - " \ n " <nl> - " public < T > T getExtension ( com . google . protobuf . nano . Extension < T > extension ) { \ n " <nl> - " return com . google . protobuf . nano . WireFormatNano . getExtension ( \ n " <nl> - " extension , unknownFieldData ) ; \ n " <nl> - " } \ n " <nl> - " \ n " <nl> - " public < T > void setExtension ( com . google . protobuf . nano . Extension < T > extension , T value ) { \ n " <nl> - " if ( unknownFieldData = = null ) { \ n " <nl> - " unknownFieldData = \ n " <nl> - " new java . util . ArrayList < com . google . protobuf . nano . UnknownFieldData > ( ) ; \ n " <nl> - " } \ n " <nl> - " com . google . protobuf . nano . WireFormatNano . setExtension ( \ n " <nl> - " extension , value , unknownFieldData ) ; \ n " <nl> - " } \ n " ) ; <nl> - } <nl> GenerateMessageSerializationMethods ( printer ) ; <nl> GenerateMergeFromMethods ( printer ) ; <nl> GenerateParseFromMethods ( printer ) ; <nl> GenerateMessageSerializationMethods ( io : : Printer * printer ) { <nl> } <nl> <nl> printer - > Outdent ( ) ; <nl> - printer - > Print ( <nl> - " } \ n " <nl> - " \ n " <nl> - " private int cachedSize ; \ n " <nl> - " @ Override \ n " <nl> - " public int getCachedSize ( ) { \ n " <nl> - " if ( cachedSize < 0 ) { \ n " <nl> - " / / getSerializedSize sets cachedSize \ n " <nl> - " getSerializedSize ( ) ; \ n " <nl> - " } \ n " <nl> - " return cachedSize ; \ n " <nl> - " } \ n " <nl> - " \ n " <nl> - " @ Override \ n " <nl> - " public int getSerializedSize ( ) { \ n " <nl> - " int size = 0 ; \ n " ) ; <nl> - printer - > Indent ( ) ; <nl> + printer - > Print ( " } \ n " ) ; <nl> <nl> - for ( int i = 0 ; i < descriptor_ - > field_count ( ) ; i + + ) { <nl> - field_generators_ . get ( sorted_fields [ i ] ) . GenerateSerializedSizeCode ( printer ) ; <nl> - } <nl> + / / Rely on the parent implementation of getSerializedSize if there are no fields to <nl> + / / serialize in this MessageNano . <nl> + if ( descriptor_ - > field_count ( ) ! = 0 ) { <nl> + printer - > Print ( <nl> + " \ n " <nl> + " @ Override \ n " <nl> + " public int getSerializedSize ( ) { \ n " <nl> + " int size = super . getSerializedSize ( ) ; \ n " ) ; <nl> + printer - > Indent ( ) ; <nl> <nl> - if ( params_ . store_unknown_fields ( ) ) { <nl> + for ( int i = 0 ; i < descriptor_ - > field_count ( ) ; i + + ) { <nl> + field_generators_ . get ( sorted_fields [ i ] ) . GenerateSerializedSizeCode ( printer ) ; <nl> + } <nl> + <nl> + printer - > Outdent ( ) ; <nl> printer - > Print ( <nl> - " size + = com . google . protobuf . nano . WireFormatNano . computeWireSize ( unknownFieldData ) ; \ n " ) ; <nl> + " cachedSize = size ; \ n " <nl> + " return size ; \ n " <nl> + " } \ n " ) ; <nl> } <nl> - <nl> - printer - > Outdent ( ) ; <nl> - printer - > Print ( <nl> - " cachedSize = size ; \ n " <nl> - " return size ; \ n " <nl> - " } \ n " ) ; <nl> } <nl> <nl> void MessageGenerator : : GenerateMergeFromMethods ( io : : Printer * printer ) { <nl>
am ac8e2e15 : Merge " Minimize method count for nanoproto . "
protocolbuffers/protobuf
1bca0c0cb9d3c3fce8c67d77c65e9711a9f1a27a
2013-11-12T17:15:57Z
mmm a / dbms / src / Functions / FunctionsURL . cpp <nl> ppp b / dbms / src / Functions / FunctionsURL . cpp <nl> void DecodeURLComponentImpl : : vector ( const ColumnString : : Chars_t & data , const Co <nl> void DecodeURLComponentImpl : : constant ( const std : : string & str , <nl> std : : string & res_data ) <nl> { <nl> - res_data . resize ( str . size ( ) + 15 ) ; / / / This is needed for memcpySmallAllowReadWriteOverflow15 function , that is used inside decodeURL . <nl> - size_t len = decodeURL ( str . data ( ) , str . size ( ) , & res_data [ 0 ] ) ; <nl> - res_data . resize ( len ) ; <nl> + ColumnString src ; <nl> + ColumnString dst ; <nl> + src . insert ( str ) ; <nl> + <nl> + vector ( src . getChars ( ) , src . getOffsets ( ) , dst . getChars ( ) , dst . getOffsets ( ) ) ; <nl> + <nl> + res_data = dst [ 0 ] . get < String > ( ) ; <nl> } <nl> <nl> <nl>
Fixed error ( tnx . artpaul ) [ # METR - 23793 ] .
ClickHouse/ClickHouse
989d542f6605b2aefe9c64a5fe7a1a799ee0ef4e
2016-12-15T20:33:35Z
mmm a / src / random . h <nl> ppp b / src / random . h <nl> <nl> # include < uint256 . h > <nl> <nl> # include < stdint . h > <nl> + # include < limits > <nl> <nl> / * Seed OpenSSL PRNG with additional entropy data * / <nl> void RandAddSeed ( ) ; <nl> class FastRandomContext { <nl> <nl> / * * Generate a random boolean . * / <nl> bool randbool ( ) { return randbits ( 1 ) ; } <nl> + <nl> + / / Compatibility with the C + + 11 UniformRandomBitGenerator concept <nl> + typedef uint64_t result_type ; <nl> + static constexpr uint64_t min ( ) { return 0 ; } <nl> + static constexpr uint64_t max ( ) { return std : : numeric_limits < uint64_t > : : max ( ) ; } <nl> + inline uint64_t operator ( ) ( ) { return rand64 ( ) ; } <nl> } ; <nl> <nl> / * Number of random bytes returned by GetOSRand . <nl> mmm a / src / test / random_tests . cpp <nl> ppp b / src / test / random_tests . cpp <nl> <nl> <nl> # include < boost / test / unit_test . hpp > <nl> <nl> + # include < random > <nl> + # include < algorithm > <nl> + <nl> BOOST_FIXTURE_TEST_SUITE ( random_tests , BasicTestingSetup ) <nl> <nl> BOOST_AUTO_TEST_CASE ( osrandom_tests ) <nl> BOOST_AUTO_TEST_CASE ( fastrandom_randbits ) <nl> } <nl> } <nl> <nl> + / * * Does - it - compile test for compatibility with standard C + + 11 RNG interface . * / <nl> + BOOST_AUTO_TEST_CASE ( stdrandom_test ) <nl> + { <nl> + FastRandomContext ctx ; <nl> + std : : uniform_int_distribution < int > distribution ( 3 , 9 ) ; <nl> + for ( int i = 0 ; i < 100 ; + + i ) { <nl> + int x = distribution ( ctx ) ; <nl> + BOOST_CHECK ( x > = 3 ) ; <nl> + BOOST_CHECK ( x < = 9 ) ; <nl> + <nl> + std : : vector < int > test { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 } ; <nl> + std : : shuffle ( test . begin ( ) , test . end ( ) , ctx ) ; <nl> + for ( int j = 1 ; j < = 10 ; + + j ) { <nl> + BOOST_CHECK ( std : : find ( test . begin ( ) , test . end ( ) , j ) ! = test . end ( ) ) ; <nl> + } <nl> + } <nl> + <nl> + } <nl> + <nl> BOOST_AUTO_TEST_SUITE_END ( ) <nl>
Merge : Make FastRandomContext support standard C + + 11 RNG interface
bitcoin/bitcoin
f686002a8eba820a40ac2f34a6e8f57b2b5cc54c
2018-03-22T16:21:46Z
mmm a / tools / sil - opt / SILOpt . cpp <nl> ppp b / tools / sil - opt / SILOpt . cpp <nl> SDKPath ( " sdk " , llvm : : cl : : desc ( " The path to the SDK for use with the clang " <nl> llvm : : cl : : init ( " " ) ) ; <nl> <nl> static llvm : : cl : : opt < std : : string > <nl> - Target ( " target " , llvm : : cl : : desc ( " target triple " ) ) ; <nl> + Target ( " target " , llvm : : cl : : desc ( " target triple " ) , <nl> + llvm : : cl : : init ( llvm : : sys : : getDefaultTargetTriple ( ) ) ) ; <nl> <nl> static llvm : : cl : : opt < OptGroup > OptimizationGroup ( <nl> llvm : : cl : : desc ( " Predefined optimization groups : " ) , <nl>
Merge remote - tracking branch ' origin / master ' into master - next
apple/swift
ca428a00ef5d5872e7bb59c65db050e782f18c3b
2018-01-06T02:29:43Z
mmm a / Marlin / Conditionals_LCD . h <nl> ppp b / Marlin / Conditionals_LCD . h <nl> <nl> # define LCD_STR_SPECIAL_MAX ' \ x09 ' <nl> / / Maximum here is 0x1f because 0x20 is ' ' ( space ) and the normal charsets begin . <nl> / / Better stay below 0x10 because DISPLAY_CHARSET_HD44780_WESTERN begins here . <nl> + <nl> + / / Symbol characters <nl> + # define LCD_STR_FILAM_DIA " \ xf8 " <nl> + # define LCD_STR_FILAM_MUL " \ xa4 " <nl> # else <nl> / * Custom characters defined in the first 8 characters of the LCD * / <nl> # define LCD_STR_BEDTEMP " \ x00 " / / Print only as a char . This will have ' unexpected ' results when used in a string ! <nl> mmm a / Marlin / ultralcd . cpp <nl> ppp b / Marlin / ultralcd . cpp <nl> extern float zprobe_zoffset ; <nl> <nl> int lcd_preheat_hotend_temp [ 2 ] , lcd_preheat_bed_temp [ 2 ] , lcd_preheat_fan_speed [ 2 ] ; <nl> <nl> - # if ENABLED ( FILAMENT_LCD_DISPLAY ) <nl> + # if ENABLED ( FILAMENT_LCD_DISPLAY ) & & ENABLED ( SDSUPPORT ) <nl> millis_t previous_lcd_status_ms = 0 ; <nl> # endif <nl> <nl> void lcd_status_screen ( ) { <nl> # if ENABLED ( ULTIPANEL ) <nl> <nl> if ( lcd_clicked ) { <nl> - # if ENABLED ( FILAMENT_LCD_DISPLAY ) <nl> + # if ENABLED ( FILAMENT_LCD_DISPLAY ) & & ENABLED ( SDSUPPORT ) <nl> previous_lcd_status_ms = millis ( ) ; / / get status message to show up for a while <nl> # endif <nl> lcd_implementation_init ( / / to maybe revive the LCD if static electricity killed it . <nl> void lcd_finishstatus ( bool persist = false ) { <nl> # endif <nl> lcdDrawUpdate = LCDVIEW_CLEAR_CALL_REDRAW ; <nl> <nl> - # if ENABLED ( FILAMENT_LCD_DISPLAY ) <nl> + # if ENABLED ( FILAMENT_LCD_DISPLAY ) & & ENABLED ( SDSUPPORT ) <nl> previous_lcd_status_ms = millis ( ) ; / / get status message to show up for a while <nl> # endif <nl> } <nl> mmm a / Marlin / ultralcd . h <nl> ppp b / Marlin / ultralcd . h <nl> <nl> <nl> # endif <nl> <nl> - # if ENABLED ( FILAMENT_LCD_DISPLAY ) <nl> + # if ENABLED ( FILAMENT_LCD_DISPLAY ) & & ENABLED ( SDSUPPORT ) <nl> extern millis_t previous_lcd_status_ms ; <nl> # endif <nl> <nl> mmm a / Marlin / ultralcd_impl_DOGM . h <nl> ppp b / Marlin / ultralcd_impl_DOGM . h <nl> static void lcd_implementation_status_screen ( ) { <nl> / / When everything is ok you see a constant ' X ' . <nl> <nl> static char xstring [ 5 ] , ystring [ 5 ] , zstring [ 7 ] ; <nl> + # if ENABLED ( FILAMENT_LCD_DISPLAY ) & & DISABLED ( SDSUPPORT ) <nl> + static char wstring [ 5 ] , mstring [ 4 ] ; <nl> + # endif <nl> <nl> / / At the first page , regenerate the XYZ strings <nl> if ( page . page = = 0 ) { <nl> strcpy ( xstring , ftostr4sign ( current_position [ X_AXIS ] ) ) ; <nl> strcpy ( ystring , ftostr4sign ( current_position [ Y_AXIS ] ) ) ; <nl> strcpy ( zstring , ftostr52sp ( current_position [ Z_AXIS ] + 0 . 00001 ) ) ; <nl> + # if ENABLED ( FILAMENT_LCD_DISPLAY ) & & DISABLED ( SDSUPPORT ) <nl> + strcpy ( wstring , ftostr12ns ( filament_width_meas ) ) ; <nl> + strcpy ( mstring , itostr3 ( 100 . 0 * volumetric_multiplier [ FILAMENT_SENSOR_EXTRUDER_NUM ] ) ) ; <nl> + # endif <nl> } <nl> <nl> if ( PAGE_CONTAINS ( XYZ_FRAME_TOP , XYZ_FRAME_TOP + XYZ_FRAME_HEIGHT - 1 ) ) { <nl> static void lcd_implementation_status_screen ( ) { <nl> u8g . setPrintPos ( 12 , 50 ) ; <nl> lcd_print ( itostr3 ( feedrate_percentage ) ) ; <nl> u8g . print ( ' % ' ) ; <nl> + <nl> + / / <nl> + / / Filament sensor display if SD is disabled <nl> + / / <nl> + # if DISABLED ( SDSUPPORT ) & & ENABLED ( FILAMENT_LCD_DISPLAY ) <nl> + u8g . setPrintPos ( 56 , 50 ) ; <nl> + lcd_print ( wstring ) ; <nl> + u8g . setPrintPos ( 102 , 50 ) ; <nl> + lcd_print ( mstring ) ; <nl> + u8g . print ( ' % ' ) ; <nl> + lcd_setFont ( FONT_MENU ) ; <nl> + u8g . setPrintPos ( 47 , 50 ) ; <nl> + lcd_print ( LCD_STR_FILAM_DIA ) ; <nl> + u8g . setPrintPos ( 93 , 50 ) ; <nl> + lcd_print ( LCD_STR_FILAM_MUL ) ; <nl> + # endif <nl> } <nl> <nl> / / <nl> static void lcd_implementation_status_screen ( ) { <nl> if ( PAGE_CONTAINS ( STATUS_BASELINE + 1 - INFO_FONT_HEIGHT , STATUS_BASELINE ) ) { <nl> u8g . setPrintPos ( 0 , STATUS_BASELINE ) ; <nl> <nl> - # if DISABLED ( FILAMENT_LCD_DISPLAY ) <nl> - lcd_print ( lcd_status_message ) ; <nl> - # else <nl> + # if ENABLED ( FILAMENT_LCD_DISPLAY ) & & ENABLED ( SDSUPPORT ) <nl> if ( PENDING ( millis ( ) , previous_lcd_status_ms + 5000UL ) ) { / / Display both Status message line and Filament display on the last line <nl> lcd_print ( lcd_status_message ) ; <nl> } <nl> else { <nl> - lcd_printPGM ( PSTR ( " dia : " ) ) ; <nl> + lcd_printPGM ( PSTR ( LCD_STR_FILAM_DIA ) ) ; <nl> + u8g . print ( ' : ' ) ; <nl> lcd_print ( ftostr12ns ( filament_width_meas ) ) ; <nl> - lcd_printPGM ( PSTR ( " factor : " ) ) ; <nl> + lcd_printPGM ( PSTR ( " " LCD_STR_FILAM_MUL ) ) ; <nl> + u8g . print ( ' : ' ) ; <nl> lcd_print ( itostr3 ( 100 . 0 * volumetric_multiplier [ FILAMENT_SENSOR_EXTRUDER_NUM ] ) ) ; <nl> u8g . print ( ' % ' ) ; <nl> } <nl> + # else <nl> + lcd_print ( lcd_status_message ) ; <nl> # endif <nl> } <nl> } <nl> mmm a / Marlin / ultralcd_impl_HD44780 . h <nl> ppp b / Marlin / ultralcd_impl_HD44780 . h <nl> void lcd_print ( char c ) { charset_mapper ( c ) ; } <nl> void lcd_erase_line ( const int line ) { <nl> lcd . setCursor ( 0 , line ) ; <nl> for ( uint8_t i = LCD_WIDTH + 1 ; - - i ; ) <nl> - lcd_print ( ' ' ) ; <nl> + lcd . print ( ' ' ) ; <nl> } <nl> <nl> / / Scroll the PSTR ' text ' in a ' len ' wide field for ' time ' milliseconds at position col , line <nl> static void lcd_implementation_status_screen ( ) { <nl> <nl> # if ENABLED ( LCD_PROGRESS_BAR ) <nl> <nl> - if ( card . isFileOpen ( ) ) { <nl> - / / Draw the progress bar if the message has shown long enough <nl> - / / or if there is no message set . <nl> - if ( ELAPSED ( millis ( ) , progress_bar_ms + PROGRESS_BAR_MSG_TIME ) | | ! lcd_status_message [ 0 ] ) <nl> - return lcd_draw_progress_bar ( card . percentDone ( ) ) ; <nl> - } / / card . isFileOpen <nl> + / / Draw the progress bar if the message has shown long enough <nl> + / / or if there is no message set . <nl> + if ( card . isFileOpen ( ) & & ELAPSED ( millis ( ) , progress_bar_ms + PROGRESS_BAR_MSG_TIME ) | | ! lcd_status_message [ 0 ] ) <nl> + return lcd_draw_progress_bar ( card . percentDone ( ) ) ; <nl> <nl> - # elif ENABLED ( FILAMENT_LCD_DISPLAY ) <nl> + # elif ENABLED ( FILAMENT_LCD_DISPLAY ) & & ENABLED ( SDSUPPORT ) <nl> <nl> / / Show Filament Diameter and Volumetric Multiplier % <nl> / / After allowing lcd_status_message to show for 5 seconds <nl> static void lcd_implementation_status_screen ( ) { <nl> return ; <nl> } <nl> <nl> - # endif / / FILAMENT_LCD_DISPLAY <nl> + # endif / / FILAMENT_LCD_DISPLAY & & SDSUPPORT <nl> <nl> lcd_print ( lcd_status_message ) ; <nl> } <nl>
Show filament width sensor values in LCD constantly if SD card support is not enabled .
MarlinFirmware/Marlin
d38b1bc4b1d6fda3f758bd972bcd226dab4c9b65
2017-03-29T09:03:08Z
new file mode 100644 <nl> index 00000000000 . . 13f769c9ca5 <nl> mmm / dev / null <nl> ppp b / ports / 3fd / CONTROL <nl> <nl> + Source : 3fd <nl> + Version : 2 . 6 . 2 <nl> + Description : C + + Framework For Fast Development <nl> + Build - Depends : boost - lockfree ( windows ) , boost - regex ( windows ) , poco ( windows ) , sqlite3 , rapidxml <nl> new file mode 100644 <nl> index 00000000000 . . 69fc60ccb6d <nl> mmm / dev / null <nl> ppp b / ports / 3fd / portfile . cmake <nl> <nl> + include ( vcpkg_common_functions ) <nl> + <nl> + # Check architecture : <nl> + if ( VCPKG_TARGET_ARCHITECTURE STREQUAL " x86 " ) <nl> + set ( BUILD_ARCH " Win32 " ) <nl> + elseif ( VCPKG_TARGET_ARCHITECTURE STREQUAL " x64 " ) <nl> + set ( BUILD_ARCH " x64 " ) <nl> + elseif ( VCPKG_TARGET_ARCHITECTURE STREQUAL " arm " ) <nl> + set ( BUILD_ARCH " ARM " ) <nl> + else ( ) <nl> + message ( FATAL_ERROR " Unsupported architecture : $ { VCPKG_TARGET_ARCHITECTURE } " ) <nl> + endif ( ) <nl> + <nl> + # Check library linkage : <nl> + if ( VCPKG_LIBRARY_LINKAGE STREQUAL " dynamic " ) <nl> + message ( " 3FD only supports static library linkage . Building static . " ) <nl> + set ( VCPKG_LIBRARY_LINKAGE static ) <nl> + endif ( ) <nl> + <nl> + # Check CRT linkage : <nl> + if ( VCPKG_CRT_LINKAGE STREQUAL " static " ) <nl> + message ( FATAL_ERROR " 3FD can only be built with dynamic linkage to CRT ! " ) <nl> + endif ( ) <nl> + <nl> + # Get source code : <nl> + vcpkg_from_github ( <nl> + OUT_SOURCE_PATH SOURCE_PATH <nl> + REPO faburaya / 3FD <nl> + REF v2 . 6 . 2 <nl> + SHA512 a2444cc07d8741540c6071ac59bc8c63785db52e412a843aa18a5dfa0144b5001d428e44bcb520238e3d476440bc74526343f025005f05d534e732645f59cbe0 <nl> + HEAD_REF master <nl> + PATCHES <nl> + " $ { CMAKE_CURRENT_LIST_DIR } / remove - seekpos . patch " <nl> + ) <nl> + <nl> + # Copy the sources to ensure a clean , out - of - source build <nl> + file ( REMOVE_RECURSE $ { CURRENT_BUILDTREES_DIR } / $ { TARGET_TRIPLET } - all ) <nl> + file ( MAKE_DIRECTORY $ { CURRENT_BUILDTREES_DIR } / $ { TARGET_TRIPLET } - all ) <nl> + file ( COPY $ { SOURCE_PATH } DESTINATION $ { CURRENT_BUILDTREES_DIR } / $ { TARGET_TRIPLET } - all ) <nl> + get_filename_component ( LAST_DIR_NAME " $ { SOURCE_PATH } " NAME ) <nl> + set ( SOURCE_PATH " $ { CURRENT_BUILDTREES_DIR } / $ { TARGET_TRIPLET } - all / $ { LAST_DIR_NAME } " ) <nl> + <nl> + # Build : <nl> + if ( VCPKG_CMAKE_SYSTEM_NAME STREQUAL " WindowsStore " ) # UWP : <nl> + vcpkg_build_msbuild ( <nl> + USE_VCPKG_INTEGRATION <nl> + PROJECT_PATH $ { SOURCE_PATH } / 3FD / 3FD . WinRT . UWP . vcxproj <nl> + PLATFORM $ { BUILD_ARCH } <nl> + ) <nl> + elseif ( NOT VCPKG_CMAKE_SYSTEM_NAME ) # Win32 : <nl> + vcpkg_build_msbuild ( <nl> + USE_VCPKG_INTEGRATION <nl> + PROJECT_PATH $ { SOURCE_PATH } / 3FD / 3FD . vcxproj <nl> + PLATFORM $ { BUILD_ARCH } <nl> + TARGET Build <nl> + ) <nl> + else ( ) <nl> + message ( FATAL_ERROR " Unsupported system : 3FD is not currently ported to VCPKG in $ { VCPKG_CMAKE_SYSTEM_NAME } ! " ) <nl> + endif ( ) <nl> + <nl> + # Install : <nl> + file ( GLOB HEADER_FILES LIST_DIRECTORIES false " $ { SOURCE_PATH } / 3FD / * . h " ) <nl> + file ( INSTALL <nl> + $ { HEADER_FILES } <nl> + DESTINATION $ { CURRENT_PACKAGES_DIR } / include / 3FD <nl> + PATTERN " * _impl * . h " EXCLUDE <nl> + PATTERN " * example * . h " EXCLUDE <nl> + PATTERN " stdafx . h " EXCLUDE <nl> + PATTERN " targetver . h " EXCLUDE <nl> + ) <nl> + <nl> + file ( INSTALL $ { SOURCE_PATH } / btree DESTINATION $ { CURRENT_PACKAGES_DIR } / include / 3FD ) <nl> + file ( INSTALL $ { SOURCE_PATH } / OpenCL / CL DESTINATION $ { CURRENT_PACKAGES_DIR } / include / 3FD ) <nl> + <nl> + file ( MAKE_DIRECTORY $ { CURRENT_PACKAGES_DIR } / share / 3FD ) <nl> + file ( INSTALL <nl> + $ { SOURCE_PATH } / 3FD / 3fd - config - template . xml <nl> + DESTINATION $ { CURRENT_PACKAGES_DIR } / share / 3FD <nl> + ) <nl> + <nl> + if ( VCPKG_CMAKE_SYSTEM_NAME STREQUAL " WindowsStore " ) # Visual C + + , UWP app : <nl> + file ( INSTALL <nl> + $ { SOURCE_PATH } / 3FD / $ { BUILD_ARCH } / Debug / 3FD . WinRT . UWP / 3FD . WinRT . UWP . lib <nl> + $ { SOURCE_PATH } / 3FD / $ { BUILD_ARCH } / Debug / 3FD . WinRT . UWP / _3FD_WinRT_UWP . pri <nl> + $ { SOURCE_PATH } / 3FD / $ { BUILD_ARCH } / Debug / WinRT . UWP / 3FD . WinRT . UWP . pdb <nl> + DESTINATION $ { CURRENT_PACKAGES_DIR } / debug / lib <nl> + ) <nl> + file ( INSTALL <nl> + $ { SOURCE_PATH } / 3FD / $ { BUILD_ARCH } / Release / 3FD . WinRT . UWP / 3FD . WinRT . UWP . lib <nl> + $ { SOURCE_PATH } / 3FD / $ { BUILD_ARCH } / Release / 3FD . WinRT . UWP / _3FD_WinRT_UWP . pri <nl> + $ { SOURCE_PATH } / 3FD / $ { BUILD_ARCH } / Release / WinRT . UWP / 3FD . WinRT . UWP . pdb <nl> + DESTINATION $ { CURRENT_PACKAGES_DIR } / lib <nl> + ) <nl> + else ( ) # Visual C + + , Win32 app : <nl> + file ( INSTALL <nl> + $ { SOURCE_PATH } / 3FD / $ { BUILD_ARCH } / Debug / 3FD . lib <nl> + $ { SOURCE_PATH } / 3FD / $ { BUILD_ARCH } / Debug / 3FD . pdb <nl> + DESTINATION $ { CURRENT_PACKAGES_DIR } / debug / lib <nl> + ) <nl> + file ( INSTALL <nl> + $ { SOURCE_PATH } / 3FD / $ { BUILD_ARCH } / Release / 3FD . lib <nl> + $ { SOURCE_PATH } / 3FD / $ { BUILD_ARCH } / Release / 3FD . pdb <nl> + DESTINATION $ { CURRENT_PACKAGES_DIR } / lib <nl> + ) <nl> + endif ( ) <nl> + <nl> + # Handle copyright <nl> + file ( INSTALL $ { SOURCE_PATH } / LICENSE DESTINATION $ { CURRENT_PACKAGES_DIR } / share / 3fd RENAME copyright ) <nl> + file ( INSTALL $ { SOURCE_PATH } / Acknowledgements . txt DESTINATION $ { CURRENT_PACKAGES_DIR } / share / 3fd ) <nl> + <nl> + vcpkg_copy_pdbs ( ) <nl> new file mode 100644 <nl> index 00000000000 . . 6c77cdc64a5 <nl> mmm / dev / null <nl> ppp b / ports / 3fd / remove - seekpos . patch <nl> <nl> + diff - - git a / 3fd / web_wws_impl_host . cpp b / 3fd / web_wws_impl_host . cpp <nl> + index b87dc75 . . 1cff45f 100644 <nl> + mmm a / 3fd / web_wws_impl_host . cpp <nl> ppp + b / 3fd / web_wws_impl_host . cpp <nl> + namespace wws <nl> + throw AppException < std : : runtime_error > ( oss . str ( ) ) ; <nl> + } <nl> + <nl> + - const auto fileSizeBytes = inputStream . seekg ( 0 , std : : ios : : end ) . tellg ( ) . seekpos ( ) ; / / move cursor to the end to get the zize <nl> + + const auto fileSizeBytes = inputStream . seekg ( 0 , std : : ios : : end ) . tellg ( ) ; / / move cursor to the end to get the zize <nl> + <nl> + / / File is not trunked : <nl> + if ( fileSizeBytes > 0 ) <nl> mmm a / scripts / cmake / vcpkg_build_msbuild . cmake <nl> ppp b / scripts / cmake / vcpkg_build_msbuild . cmake <nl> <nl> # # [ OPTIONS < / p : ZLIB_INCLUDE_PATH = X > . . . ] <nl> # # [ OPTIONS_RELEASE < / p : ZLIB_LIB = X > . . . ] <nl> # # [ OPTIONS_DEBUG < / p : ZLIB_LIB = X > . . . ] <nl> + # # [ USE_VCPKG_INTEGRATION ] <nl> # # ) <nl> # # ` ` ` <nl> # # <nl> function ( vcpkg_build_msbuild ) <nl> endif ( ) <nl> <nl> if ( _csc_USE_VCPKG_INTEGRATION ) <nl> - list ( APPEND _csc_OPTIONS / p : ForceImportBeforeCppTargets = $ { VCPKG_ROOT_DIR } / scripts / buildsystems / msbuild / vcpkg . targets ) <nl> + list ( <nl> + APPEND _csc_OPTIONS <nl> + / p : ForceImportBeforeCppTargets = $ { VCPKG_ROOT_DIR } / scripts / buildsystems / msbuild / vcpkg . targets <nl> + " / p : VcpkgTriplet = $ { TARGET_TRIPLET } " <nl> + ) <nl> endif ( ) <nl> <nl> if ( NOT DEFINED VCPKG_BUILD_TYPE OR VCPKG_BUILD_TYPE STREQUAL " release " ) <nl>
[ 3fd ] Initial port of 3FD project ( )
microsoft/vcpkg
4ebdbe7e88ad93423e9d0d75eb066ecf8ab5fcad
2018-07-03T13:39:13Z
mmm a / UnitTests / HttpInterface / rest - edge - spec . rb <nl> ppp b / UnitTests / HttpInterface / rest - edge - spec . rb <nl> <nl> <nl> context " known collection name : " do <nl> before do <nl> - @ cn = " UnitTestsCollectionEdge " <nl> - @ cid = ArangoDB . create_collection ( @ cn , true , 3 ) # type 3 = edge collection <nl> + @ ce = " UnitTestsCollectionEdge " <nl> + @ eid = ArangoDB . create_collection ( @ ce , true , 3 ) # type 3 = edge collection <nl> + @ cv = " UnitTestsCollectionVertex " <nl> + @ vid = ArangoDB . create_collection ( @ cv , true , 2 ) # type 2 = document collection <nl> end <nl> <nl> after do <nl> - ArangoDB . drop_collection ( @ cn ) <nl> + ArangoDB . drop_collection ( @ ce ) <nl> + ArangoDB . drop_collection ( @ cv ) <nl> end <nl> <nl> it " creating an edge " do <nl> - cmd = " / _api / document ? collection = # { @ cid } " <nl> + cmd = " / _api / document ? collection = # { @ vid } " <nl> <nl> # create first vertex <nl> body = " { \ " a \ " : 1 } " <nl> <nl> id2 = doc . parsed_response [ ' _id ' ] <nl> <nl> # create edge <nl> - cmd = " / _api / edge ? collection = # { @ cid } & from = # { id1 } & to = # { id2 } " <nl> + cmd = " / _api / edge ? collection = # { @ eid } & from = # { id1 } & to = # { id2 } " <nl> body = " { } " <nl> doc = ArangoDB . log_post ( " # { prefix } - create - edge " , cmd , : body = > body ) <nl> <nl> <nl> doc . headers [ ' content - type ' ] . should eq ( " application / json ; charset = utf - 8 " ) <nl> <nl> # create another edge <nl> - cmd = " / _api / edge ? collection = # { @ cid } & from = # { id1 } & to = # { id2 } " <nl> + cmd = " / _api / edge ? collection = # { @ eid } & from = # { id1 } & to = # { id2 } " <nl> body = " { \ " e \ " : 1 } " <nl> doc = ArangoDB . log_post ( " # { prefix } - create - edge " , cmd , : body = > body ) <nl> <nl> <nl> doc . headers [ ' content - type ' ] . should eq ( " application / json ; charset = utf - 8 " ) <nl> <nl> # create third edge <nl> - cmd = " / _api / edge ? collection = # { @ cid } & from = # { id2 } & to = # { id1 } " <nl> + cmd = " / _api / edge ? collection = # { @ eid } & from = # { id2 } & to = # { id1 } " <nl> body = " { \ " e \ " : 2 } " <nl> doc = ArangoDB . log_post ( " # { prefix } - create - edge " , cmd , : body = > body ) <nl> <nl> <nl> doc . headers [ ' content - type ' ] . should eq ( " application / json ; charset = utf - 8 " ) <nl> <nl> # check ANY edges <nl> - cmd = " / _api / edges / # { @ cid } ? vertex = # { id1 } " <nl> + cmd = " / _api / edges / # { @ eid } ? vertex = # { id1 } " <nl> doc = ArangoDB . log_get ( " # { prefix } - read - edges - any " , cmd ) ; <nl> <nl> doc . code . should eq ( 200 ) <nl> <nl> doc . parsed_response [ ' edges ' ] . length . should be ( 3 ) <nl> <nl> # check IN edges <nl> - cmd = " / _api / edges / # { @ cid } ? vertex = # { id1 } & direction = in " <nl> + cmd = " / _api / edges / # { @ eid } ? vertex = # { id1 } & direction = in " <nl> doc = ArangoDB . log_get ( " # { prefix } - read - edges - in " , cmd ) ; <nl> <nl> doc . code . should eq ( 200 ) <nl> <nl> doc . parsed_response [ ' edges ' ] . length . should be ( 1 ) <nl> <nl> # check OUT edges <nl> - cmd = " / _api / edges / # { @ cid } ? vertex = # { id1 } & direction = out " <nl> + cmd = " / _api / edges / # { @ eid } ? vertex = # { id1 } & direction = out " <nl> doc = ArangoDB . log_get ( " # { prefix } - read - edges - out " , cmd ) ; <nl> <nl> doc . code . should eq ( 200 ) <nl> mmm a / arangod / RestHandler / RestDocumentHandler . cpp <nl> ppp b / arangod / RestHandler / RestDocumentHandler . cpp <nl> bool RestDocumentHandler : : createDocument ( ) { <nl> } <nl> <nl> / / find and load collection given by name or identifier <nl> - int res = useCollection ( collection , create ) ; <nl> + int res = useCollection ( collection , getCollectionType ( ) , create ) ; <nl> <nl> if ( res ! = TRI_ERROR_NO_ERROR ) { <nl> - releaseCollection ( ) ; <nl> return false ; <nl> } <nl> <nl> bool RestDocumentHandler : : createDocument ( ) { <nl> / / outside write transaction <nl> / / . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <nl> <nl> - / / release collection and free json <nl> + / / release collection <nl> releaseCollection ( ) ; <nl> <nl> / / generate result <nl> bool RestDocumentHandler : : readSingleDocument ( bool generateBody ) { <nl> string collection = suffix [ 0 ] ; <nl> string did = suffix [ 1 ] ; <nl> <nl> - / / find and load collection given by name oder identifier <nl> - int res = useCollection ( collection ) ; <nl> + / / find and load collection given by name or identifier <nl> + int res = useCollection ( collection , getCollectionType ( ) ) ; <nl> <nl> if ( res ! = TRI_ERROR_NO_ERROR ) { <nl> - releaseCollection ( ) ; <nl> return false ; <nl> } <nl> <nl> bool RestDocumentHandler : : readSingleDocument ( bool generateBody ) { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> bool RestDocumentHandler : : readAllDocuments ( ) { <nl> - <nl> / / extract the cid <nl> bool found ; <nl> string collection = _request - > value ( " collection " , found ) ; <nl> <nl> - / / find and load collection given by name oder identifier <nl> - int res = useCollection ( collection ) ; <nl> + / / find and load collection given by name or identifier <nl> + int res = useCollection ( collection , getCollectionType ( ) ) ; <nl> <nl> if ( res ! = TRI_ERROR_NO_ERROR ) { <nl> - releaseCollection ( ) ; <nl> return false ; <nl> } <nl> <nl> bool RestDocumentHandler : : modifyDocument ( bool isPatch ) { <nl> / / extract or chose the update policy <nl> TRI_doc_update_policy_e policy = extractUpdatePolicy ( ) ; <nl> <nl> - / / find and load collection given by name oder identifier <nl> - int res = useCollection ( collection ) ; <nl> + / / find and load collection given by name or identifier <nl> + int res = useCollection ( collection , getCollectionType ( ) ) ; <nl> <nl> if ( res ! = TRI_ERROR_NO_ERROR ) { <nl> - releaseCollection ( ) ; <nl> return false ; <nl> } <nl> <nl> bool RestDocumentHandler : : deleteDocument ( ) { <nl> return false ; <nl> } <nl> <nl> - / / find and load collection given by name oder identifier <nl> - int res = useCollection ( collection ) ; <nl> + / / find and load collection given by name or identifier <nl> + int res = useCollection ( collection , getCollectionType ( ) ) ; <nl> <nl> if ( res ! = TRI_ERROR_NO_ERROR ) { <nl> - releaseCollection ( ) ; <nl> return false ; <nl> } <nl> <nl> mmm a / arangod / RestHandler / RestDocumentHandler . h <nl> ppp b / arangod / RestHandler / RestDocumentHandler . h <nl> namespace triagens { <nl> <nl> protected : <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief get collection type <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + virtual TRI_col_type_e getCollectionType ( ) const { <nl> + return TRI_COL_TYPE_SIMPLE_DOCUMENT ; <nl> + } <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief creates a document <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> namespace triagens { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> virtual bool checkDocument ( ) ; <nl> + <nl> } ; <nl> } <nl> } <nl> mmm a / arangod / RestHandler / RestEdgeHandler . cpp <nl> ppp b / arangod / RestHandler / RestEdgeHandler . cpp <nl> bool RestEdgeHandler : : createDocument ( ) { <nl> } <nl> <nl> / / find and load collection given by name or identifier <nl> - int res = useCollection ( collection , create ) ; <nl> + int res = useCollection ( collection , getCollectionType ( ) , create ) ; <nl> <nl> if ( res ! = TRI_ERROR_NO_ERROR ) { <nl> - releaseCollection ( ) ; <nl> return false ; <nl> } <nl> <nl> mmm a / arangod / RestHandler / RestEdgeHandler . h <nl> ppp b / arangod / RestHandler / RestEdgeHandler . h <nl> namespace triagens { <nl> / / - - SECTION - - protected methods <nl> / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ addtogroup ArangoDB <nl> + / / / @ { <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + protected : <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief get collection type <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + virtual TRI_col_type_e getCollectionType ( ) const { <nl> + return TRI_COL_TYPE_SIMPLE_EDGE ; <nl> + } <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ } <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + / / - - SECTION - - private methods <nl> + / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ addtogroup ArangoDB <nl> / / / @ { <nl> mmm a / arangod / RestHandler / RestImportHandler . cpp <nl> ppp b / arangod / RestHandler / RestImportHandler . cpp <nl> bool RestImportHandler : : createByArray ( ) { <nl> bool reuseId = found ? StringUtils : : boolean ( valueStr ) : false ; <nl> <nl> / / find and load collection given by name or identifier <nl> - int res = useCollection ( collection , create ) ; <nl> + int res = useCollection ( collection , TRI_COL_TYPE_SIMPLE_DOCUMENT , create ) ; <nl> <nl> if ( res ! = TRI_ERROR_NO_ERROR ) { <nl> - releaseCollection ( ) ; <nl> - <nl> / / error is already generated by useCollection ! <nl> return false ; <nl> } <nl> bool RestImportHandler : : createByList ( ) { <nl> } <nl> <nl> / / find and load collection given by name or identifier <nl> - int res = useCollection ( collection , create ) ; <nl> + int res = useCollection ( collection , TRI_COL_TYPE_SIMPLE_DOCUMENT , create ) ; <nl> <nl> if ( res ! = TRI_ERROR_NO_ERROR ) { <nl> - releaseCollection ( ) ; <nl> - <nl> if ( keys ) { <nl> TRI_FreeJson ( TRI_UNKNOWN_MEM_ZONE , keys ) ; <nl> } <nl> mmm a / arangod / RestHandler / RestVocbaseBaseHandler . cpp <nl> ppp b / arangod / RestHandler / RestVocbaseBaseHandler . cpp <nl> TRI_doc_update_policy_e RestVocbaseBaseHandler : : extractUpdatePolicy ( ) { <nl> } <nl> } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief uses a collection , loading or manifesting and locking it <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - int RestVocbaseBaseHandler : : useCollection ( string const & name , bool create ) { <nl> - _collection = 0 ; <nl> - _documentCollection = 0 ; <nl> - <nl> - / / sanity check <nl> - if ( name . empty ( ) ) { <nl> - generateError ( HttpResponse : : BAD , <nl> - TRI_ERROR_HTTP_CORRUPTED_JSON , <nl> - " collection identifier is empty " ) ; <nl> - return TRI_set_errno ( TRI_ERROR_HTTP_CORRUPTED_JSON ) ; <nl> - } <nl> - <nl> - / / try to find the collection <nl> - if ( isdigit ( name [ 0 ] ) ) { <nl> - TRI_voc_cid_t id = StringUtils : : uint64 ( name ) ; <nl> - <nl> - _collection = TRI_LookupCollectionByIdVocBase ( _vocbase , id ) ; <nl> - } <nl> - else { <nl> - _collection = TRI_FindCollectionByNameVocBase ( _vocbase , name . c_str ( ) , create ) ; <nl> - } <nl> - <nl> - if ( _collection = = 0 ) { <nl> - generateCollectionNotFound ( name ) ; <nl> - return TRI_set_errno ( TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND ) ; <nl> - } <nl> - <nl> - / / and use the collection <nl> - int res = TRI_UseCollectionVocBase ( _vocbase , const_cast < TRI_vocbase_col_s * > ( _collection ) ) ; <nl> - <nl> - if ( res = = TRI_ERROR_NO_ERROR ) { <nl> - _documentCollection = _collection - > _collection ; <nl> - assert ( _documentCollection ! = 0 ) ; <nl> - } <nl> - <nl> - return res ; <nl> - } <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief releases a collection <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> int RestVocbaseBaseHandler : : parseDocumentId ( string const & handle , <nl> return TRI_errno ( ) ; <nl> } <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief uses a collection , loading or manifesting and locking it <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + int RestVocbaseBaseHandler : : useCollection ( string const & name , <nl> + TRI_col_type_e type , <nl> + bool create ) { <nl> + _collection = 0 ; <nl> + _documentCollection = 0 ; <nl> + <nl> + / / sanity check <nl> + if ( name . empty ( ) ) { <nl> + generateError ( HttpResponse : : BAD , <nl> + TRI_ERROR_HTTP_CORRUPTED_JSON , <nl> + " collection identifier is empty " ) ; <nl> + return TRI_set_errno ( TRI_ERROR_HTTP_CORRUPTED_JSON ) ; <nl> + } <nl> + <nl> + / / try to find the collection <nl> + if ( isdigit ( name [ 0 ] ) ) { <nl> + TRI_voc_cid_t id = StringUtils : : uint64 ( name ) ; <nl> + <nl> + _collection = TRI_LookupCollectionByIdVocBase ( _vocbase , id ) ; <nl> + } <nl> + else { <nl> + if ( type = = TRI_COL_TYPE_SIMPLE_DOCUMENT ) { <nl> + _collection = TRI_FindDocumentCollectionByNameVocBase ( _vocbase , name . c_str ( ) , create ) ; <nl> + } <nl> + else if ( type = = TRI_COL_TYPE_SIMPLE_EDGE ) { <nl> + _collection = TRI_FindEdgeCollectionByNameVocBase ( _vocbase , name . c_str ( ) , create ) ; <nl> + } <nl> + } <nl> + <nl> + if ( _collection = = 0 ) { <nl> + generateCollectionNotFound ( name ) ; <nl> + return TRI_set_errno ( TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND ) ; <nl> + } <nl> + <nl> + / / and use the collection <nl> + int res = TRI_UseCollectionVocBase ( _vocbase , const_cast < TRI_vocbase_col_s * > ( _collection ) ) ; <nl> + <nl> + if ( res = = TRI_ERROR_NO_ERROR ) { <nl> + assert ( _collection ! = 0 ) ; <nl> + <nl> + _documentCollection = _collection - > _collection ; <nl> + assert ( _documentCollection ! = 0 ) ; <nl> + } <nl> + <nl> + return res ; <nl> + } <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ } <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / arangod / RestHandler / RestVocbaseBaseHandler . h <nl> ppp b / arangod / RestHandler / RestVocbaseBaseHandler . h <nl> namespace triagens { <nl> TRI_doc_update_policy_e extractUpdatePolicy ( ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief creates or loads a collection <nl> + / / / @ brief creates or loads an edge collection <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - int useCollection ( string const & name , bool create = false ) ; <nl> + int useEdgeCollection ( string const & name , bool create = false ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief releases a collection <nl> namespace triagens { <nl> TRI_voc_cid_t & cid , <nl> TRI_voc_did_t & did ) ; <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief creates or loads a collection <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + int useCollection ( string const & name , TRI_col_type_e type , bool create = false ) ; <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ } <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / arangod / VocBase / document - collection . c <nl> ppp b / arangod / VocBase / document - collection . c <nl> static TRI_doc_mptr_t CreateJson ( TRI_doc_collection_t * collection , <nl> TRI_doc_mptr_t result ; <nl> TRI_voc_did_t did = 0 ; <nl> TRI_voc_rid_t rid = 0 ; <nl> - <nl> + <nl> shaped = TRI_ShapedJsonJson ( collection - > _shaper , json ) ; <nl> <nl> if ( shaped = = 0 ) { <nl> mmm a / arangod / VocBase / vocbase . c <nl> ppp b / arangod / VocBase / vocbase . c <nl> static int ScanPath ( TRI_vocbase_t * vocbase , char const * path ) { <nl> / / / @ brief bears a new collection or returns an existing one by name <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - static TRI_vocbase_col_t * BearCollectionVocBase ( TRI_vocbase_t * vocbase , char const * name ) { <nl> + static TRI_vocbase_col_t * BearCollectionVocBase ( TRI_vocbase_t * vocbase , <nl> + char const * name , <nl> + TRI_col_type_e type ) { <nl> union { void const * v ; TRI_vocbase_col_t * c ; } found ; <nl> TRI_vocbase_col_t * collection ; <nl> TRI_col_parameter_t parameter ; <nl> static TRI_vocbase_col_t * BearCollectionVocBase ( TRI_vocbase_t * vocbase , char co <nl> } <nl> <nl> / / create a new collection <nl> - collection = AddCollection ( vocbase , TRI_COL_TYPE_SIMPLE_DOCUMENT , name , TRI_NewTickVocBase ( ) , NULL ) ; <nl> + collection = AddCollection ( vocbase , type , name , TRI_NewTickVocBase ( ) , NULL ) ; <nl> <nl> if ( collection = = NULL ) { <nl> TRI_WRITE_UNLOCK_COLLECTIONS_VOCBASE ( vocbase ) ; <nl> static int ManifestCollectionVocBase ( TRI_vocbase_t * vocbase , TRI_vocbase_col_t * <nl> } <nl> } <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief finds a collection by name or creates it <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + static TRI_vocbase_col_t * FindCollectionByNameVocBase ( TRI_vocbase_t * vocbase , <nl> + char const * name , <nl> + bool bear , <nl> + TRI_col_type_e type ) { <nl> + union { void const * v ; TRI_vocbase_col_t * c ; } found ; <nl> + <nl> + TRI_READ_LOCK_COLLECTIONS_VOCBASE ( vocbase ) ; <nl> + found . v = TRI_LookupByKeyAssociativePointer ( & vocbase - > _collectionsByName , name ) ; <nl> + TRI_READ_UNLOCK_COLLECTIONS_VOCBASE ( vocbase ) ; <nl> + <nl> + if ( found . v ! = NULL ) { <nl> + return found . c ; <nl> + } <nl> + <nl> + if ( ! bear ) { <nl> + TRI_set_errno ( TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND ) ; <nl> + return NULL ; <nl> + } <nl> + <nl> + return BearCollectionVocBase ( vocbase , name , type ) ; <nl> + } <nl> + <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief loads an existing ( document ) collection <nl> / / / <nl> TRI_vocbase_col_t * TRI_LookupCollectionByIdVocBase ( TRI_vocbase_t * vocbase , TRI_ <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief finds a ( document ) collection by name <nl> + / / / @ brief finds a collection by name <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> TRI_vocbase_col_t * TRI_FindCollectionByNameVocBase ( TRI_vocbase_t * vocbase , char const * name , bool bear ) { <nl> - union { void const * v ; TRI_vocbase_col_t * c ; } found ; <nl> + return TRI_FindDocumentCollectionByNameVocBase ( vocbase , name , bear ) ; <nl> + } <nl> <nl> - TRI_READ_LOCK_COLLECTIONS_VOCBASE ( vocbase ) ; <nl> - found . v = TRI_LookupByKeyAssociativePointer ( & vocbase - > _collectionsByName , name ) ; <nl> - TRI_READ_UNLOCK_COLLECTIONS_VOCBASE ( vocbase ) ; <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief finds a document collection by name <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - if ( found . v ! = NULL ) { <nl> - return found . c ; <nl> - } <nl> + TRI_vocbase_col_t * TRI_FindDocumentCollectionByNameVocBase ( TRI_vocbase_t * vocbase , char const * name , bool bear ) { <nl> + return FindCollectionByNameVocBase ( vocbase , name , bear , TRI_COL_TYPE_SIMPLE_DOCUMENT ) ; <nl> + } <nl> <nl> - if ( ! bear ) { <nl> - TRI_set_errno ( TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND ) ; <nl> - return NULL ; <nl> - } <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief finds an edge collection by name <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - return BearCollectionVocBase ( vocbase , name ) ; <nl> + TRI_vocbase_col_t * TRI_FindEdgeCollectionByNameVocBase ( TRI_vocbase_t * vocbase , char const * name , bool bear ) { <nl> + return FindCollectionByNameVocBase ( vocbase , name , bear , TRI_COL_TYPE_SIMPLE_EDGE ) ; <nl> } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> mmm a / arangod / VocBase / vocbase . h <nl> ppp b / arangod / VocBase / vocbase . h <nl> TRI_vocbase_col_t * TRI_LookupCollectionByNameVocBase ( TRI_vocbase_t * , char const <nl> TRI_vocbase_col_t * TRI_LookupCollectionByIdVocBase ( TRI_vocbase_t * , TRI_voc_cid_t ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief finds a ( document ) collection by name <nl> + / / / @ brief finds a collection by name <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> TRI_vocbase_col_t * TRI_FindCollectionByNameVocBase ( TRI_vocbase_t * , char const * , bool bear ) ; <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief finds a document collection by name <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + TRI_vocbase_col_t * TRI_FindDocumentCollectionByNameVocBase ( TRI_vocbase_t * , char const * , bool bear ) ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief finds an edge collection by name <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + TRI_vocbase_col_t * TRI_FindEdgeCollectionByNameVocBase ( TRI_vocbase_t * , char const * , bool bear ) ; <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief creates a new ( document ) collection from parameter set <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl>
issue
arangodb/arangodb
ed395ab8ce3c2257ee47ae49c40809c298a1efe1
2012-08-28T11:06:04Z
new file mode 100644 <nl> index 00000000000 . . 811ee2ee7ec <nl> mmm / dev / null <nl> ppp b / src / rdb_protocol / err . hpp <nl> <nl> + # include < list > <nl> + # include < string > <nl> + <nl> + # include " utils . hpp " <nl> + <nl> + # include " rdb_protocol / ql2 . pb . h " <nl> + <nl> + # ifndef RDB_PROTOCOL_ERR_HPP_ <nl> + # define RDB_PROTOCOL_ERR_HPP_ <nl> + namespace ql { <nl> + <nl> + void _runtime_check ( const char * test , const char * file , int line , <nl> + bool pred , std : : string msg = " " ) ; <nl> + # define rcheck ( pred , msg ) \ <nl> + _runtime_check ( stringify ( pred ) , __FILE__ , __LINE__ , pred , msg ) <nl> + / / TODO : do something smarter ? <nl> + # define rfail ( args . . . ) rcheck ( false , strprintf ( args ) ) <nl> + / / TODO : make this crash in debug mode <nl> + # define r_sanity_check ( test ) rcheck ( test , " SANITY_CHECK " ) <nl> + <nl> + struct backtrace_t { <nl> + struct frame_t { <nl> + public : <nl> + frame_t ( int _pos ) : type ( POS ) , pos ( _pos ) { } <nl> + frame_t ( const std : : string & _opt ) : type ( OPT ) , opt ( _opt ) { } <nl> + Response2_Frame toproto ( ) const ; <nl> + private : <nl> + enum type_t { POS = 0 , OPT = 1 } ; <nl> + type_t type ; <nl> + int pos ; <nl> + std : : string opt ; <nl> + } ; <nl> + std : : list < frame_t > frames ; <nl> + } ; <nl> + <nl> + class exc_t : public std : : exception { <nl> + public : <nl> + exc_t ( const std : : string & _msg ) : msg ( _msg ) { } <nl> + virtual ~ exc_t ( ) throw ( ) { } <nl> + backtrace_t backtrace ; <nl> + const char * what ( ) const throw ( ) { return msg . c_str ( ) ; } <nl> + private : <nl> + const std : : string msg ; <nl> + } ; <nl> + <nl> + void fill_error ( Response2 * res , Response2_ResponseType type , std : : string msg , <nl> + const backtrace_t & bt = backtrace_t ( ) ) ; <nl> + <nl> + } <nl> + # endif / / RDB_PROTOCOL_ERR_HPP_ <nl> mmm a / src / rdb_protocol / term . cc <nl> ppp b / src / rdb_protocol / term . cc <nl> val_t * term_t : : eval ( bool _use_cached_val ) { <nl> } <nl> <nl> val_t * term_t : : new_val ( datum_t * d ) { return env - > add_and_ret ( d , this ) ; } <nl> + val_t * term_t : : new_val ( datum_stream_t * s ) { return env - > add_and_ret ( s , this ) ; } <nl> val_t * term_t : : new_val ( uuid_t db ) { return env - > add_and_ret ( db , this ) ; } <nl> val_t * term_t : : new_val ( table_t * t ) { return env - > add_and_ret ( t , this ) ; } <nl> val_t * term_t : : new_val ( func_t * f ) { return env - > add_and_ret ( f , this ) ; } <nl> mmm a / src / rdb_protocol / term . hpp <nl> ppp b / src / rdb_protocol / term . hpp <nl> <nl> <nl> namespace ql { <nl> class env_t ; <nl> + class datum_stream_t ; <nl> class func_t ; <nl> class val_t ; <nl> class table_t ; <nl> class term_t { <nl> val_t * eval ( bool _use_cached_val ) ; <nl> <nl> val_t * new_val ( datum_t * d ) ; <nl> + val_t * new_val ( datum_stream_t * s ) ; <nl> val_t * new_val ( uuid_t db ) ; <nl> val_t * new_val ( table_t * t ) ; <nl> val_t * new_val ( func_t * f ) ; <nl> new file mode 100644 <nl> index 00000000000 . . d5e54b4a459 <nl> mmm / dev / null <nl> ppp b / src / rdb_protocol / terms / db_table . hpp <nl> <nl> + # include " rdb_protocol / op . hpp " <nl> + namespace ql { <nl> + class db_term_t : public op_term_t { <nl> + public : <nl> + db_term_t ( env_t * env , const Term2 * term ) : op_term_t ( env , term , argspec_t ( 1 ) ) { } <nl> + private : <nl> + virtual val_t * eval_impl ( ) { <nl> + std : : string name = arg ( 0 ) - > as_datum ( ) - > as_str ( ) ; <nl> + return new_val ( get_db_uuid ( env , name ) ) ; <nl> + } <nl> + RDB_NAME ( " DB " ) <nl> + } ; <nl> + <nl> + <nl> + static const char * const table_optargs [ ] = { " use_outdated " } ; <nl> + class table_term_t : public op_term_t { <nl> + public : <nl> + table_term_t ( env_t * env , const Term2 * term ) <nl> + : op_term_t ( env , term , argspec_t ( 2 ) , LEGAL_OPTARGS ( table_optargs ) ) { } <nl> + private : <nl> + virtual val_t * eval_impl ( ) { <nl> + val_t * t = optarg ( " use_outdated " , 0 ) ; <nl> + bool use_outdated = t ? t - > as_datum ( ) - > as_bool ( ) : false ; <nl> + uuid_t db = arg ( 0 ) - > as_db ( ) ; <nl> + std : : string name = arg ( 1 ) - > as_datum ( ) - > as_str ( ) ; <nl> + return new_val ( new table_t ( env , db , name , use_outdated ) ) ; <nl> + } <nl> + RDB_NAME ( " table " ) <nl> + } ; <nl> + <nl> + } <nl> mmm a / src / rdb_protocol / transform_visitors . cc <nl> ppp b / src / rdb_protocol / transform_visitors . cc <nl> void transform_visitor_t : : operator ( ) ( Builtin_Range range ) const { <nl> } <nl> } <nl> <nl> - void transform_visitor_t : : operator ( ) ( ql : : wire_func_t & func ) const { <nl> + void transform_visitor_t : : operator ( ) ( UNUSED ql : : wire_func_t & func ) const { <nl> + / * <nl> ql : : func_t * f = func . compile ( ql_env ) ; <nl> ql : : datum_t arg ( json ) ; <nl> std : : vector < ql : : datum_t * > args ; <nl> args . push_back ( & arg ) ; <nl> ql : : val_t * v = f - > call ( args ) ; <nl> out - > push_back ( v - > as_datum ( ) - > as_json ( ) ) ; <nl> + * / <nl> } <nl> <nl> terminal_initializer_visitor_t : : terminal_initializer_visitor_t ( rget_read_response_t : : result_t * _out , <nl> mmm a / src / rdb_protocol / val . cc <nl> ppp b / src / rdb_protocol / val . cc <nl> datum_stream_t : : datum_stream_t ( env_t * _env , bool use_outdated , <nl> { } <nl> <nl> datum_stream_t : : datum_stream_t ( datum_stream_t * src , func_t * f ) <nl> - : env ( src - > env ) , json_stream ( src - > json_stream ) { <nl> - query_language : : scopes_t _s ; <nl> - query_language : : backtrace_t _b ; <nl> - rdb_protocol_details : : transform_variant_t trans ( wire_func_t ( env , f ) ) ; <nl> - json_stream = json_stream - > add_transformation ( trans , 0 , env , _s , _b ) ; <nl> + : env ( src - > env ) , trans ( wire_func_t ( env , f ) ) { <nl> + json_stream = src - > json_stream ; <nl> + / / json_stream = src - > json_stream - > add_transformation ( trans , 0 , env , _s , _b ) ; <nl> } <nl> <nl> const datum_t * datum_stream_t : : next ( ) { <nl> val_t : : val_t ( const datum_t * _datum , const term_t * _parent ) <nl> : type ( type_t : : DATUM ) , datum ( _datum ) , parent ( _parent ) { <nl> guarantee ( _datum ) ; <nl> } <nl> + val_t : : val_t ( datum_stream_t * _sequence , const term_t * _parent ) <nl> + : type ( type_t : : SEQUENCE ) , sequence ( _sequence ) , parent ( _parent ) { <nl> + guarantee ( _sequence ) ; <nl> + } <nl> val_t : : val_t ( table_t * _table , const term_t * _parent ) <nl> : type ( type_t : : TABLE ) , table ( _table ) , parent ( _parent ) { <nl> guarantee ( _table ) ; <nl> val_t : : val_t ( uuid_t _db , const term_t * _parent ) <nl> } <nl> val_t : : val_t ( func_t * _func , const term_t * _parent ) <nl> : type ( type_t : : FUNC ) , func ( _func ) , parent ( _parent ) { <nl> + guarantee ( _func ) ; <nl> } <nl> <nl> uuid_t get_db_uuid ( env_t * env , const std : : string & dbs ) { <nl> mmm a / src / rdb_protocol / val . hpp <nl> ppp b / src / rdb_protocol / val . hpp <nl> class datum_stream_t { <nl> / / We have to do a const_cast here to make bosot happy <nl> void register_data ( const datum_t * d ) { data . push_back ( const_cast < datum_t * > ( d ) ) ; } <nl> boost : : ptr_vector < datum_t > data ; <nl> + <nl> + rdb_protocol_details : : transform_variant_t trans ; <nl> + query_language : : scopes_t _s ; <nl> + query_language : : backtrace_t _b ; <nl> } ; <nl> <nl> class table_t { <nl> class val_t { <nl> type_t get_type ( ) const ; <nl> <nl> val_t ( const datum_t * _datum , const term_t * _parent ) ; <nl> + val_t ( datum_stream_t * _sequence , const term_t * _parent ) ; <nl> val_t ( table_t * _table , const term_t * _parent ) ; <nl> val_t ( uuid_t _db , const term_t * _parent ) ; <nl> val_t ( func_t * _func , const term_t * _parent ) ; <nl> mmm a / src / rpc / mailbox / typed . hpp <nl> ppp b / src / rpc / mailbox / typed . hpp <nl> <nl> # ifndef RPC_MAILBOX_TYPED_HPP_ <nl> # define RPC_MAILBOX_TYPED_HPP_ <nl> <nl> - / * This file is automatically generated by ' scripts / generate_rpc_templates . py ' . <nl> - Please modify ' scripts / generate_rpc_templates . py ' instead of modifying this file . * / <nl> + / * This file is automatically generated by ' . . / scripts / generate_rpc_templates . py ' . <nl> + Please modify ' . . / scripts / generate_rpc_templates . py ' instead of modifying this file . * / <nl> <nl> # include " errors . hpp " <nl> # include < boost / bind . hpp > <nl>
1
rethinkdb/rethinkdb
d58834ea3cdf8140367486ea23e64d1b858b3cdc
2013-01-02T22:30:11Z
mmm a / xbmc / addons / GUIViewStateAddonBrowser . cpp <nl> ppp b / xbmc / addons / GUIViewStateAddonBrowser . cpp <nl> CGUIViewStateAddonBrowser : : CGUIViewStateAddonBrowser ( const CFileItemList & items ) <nl> } <nl> SetViewAsControl ( DEFAULT_VIEW_AUTO ) ; <nl> <nl> - SetSortOrder ( SortOrderAscending ) ; <nl> LoadViewState ( items . GetPath ( ) , WINDOW_ADDON_BROWSER ) ; <nl> } <nl> <nl> mmm a / xbmc / filesystem / AddonsDirectory . cpp <nl> ppp b / xbmc / filesystem / AddonsDirectory . cpp <nl> bool CAddonsDirectory : : GetDirectory ( const CURL & url , CFileItemList & items ) <nl> if ( ! GetRecentlyUpdatedAddons ( addons ) ) <nl> return false ; <nl> <nl> - std : : sort ( addons . begin ( ) , addons . end ( ) , <nl> - [ ] ( const AddonPtr & a , const AddonPtr & b ) { return a - > LastUpdated ( ) > b - > LastUpdated ( ) ; } ) ; <nl> - <nl> CAddonsDirectory : : GenerateAddonListing ( path , addons , items , g_localizeStrings . Get ( 24004 ) ) ; <nl> return true ; <nl> <nl>
Merge pull request from tamland / fix_addons_sortorder
xbmc/xbmc
207db6afbb39934677f089ec116240ecb5f5b612
2016-07-02T17:03:44Z
mmm a / tensorflow / python / grappler / layout_optimizer_test . py <nl> ppp b / tensorflow / python / grappler / layout_optimizer_test . py <nl> <nl> from __future__ import division <nl> from __future__ import print_function <nl> <nl> + import numpy as np <nl> + <nl> from tensorflow . core . protobuf import config_pb2 <nl> from tensorflow . core . protobuf import rewriter_config_pb2 <nl> + from tensorflow . core . protobuf import saver_pb2 <nl> from tensorflow . python . client import session <nl> from tensorflow . python . framework import constant_op <nl> from tensorflow . python . framework import dtypes <nl> <nl> from tensorflow . python . ops import math_ops <nl> from tensorflow . python . ops import nn <nl> from tensorflow . python . ops import random_ops <nl> + from tensorflow . python . ops import variables <nl> from tensorflow . python . platform import test <nl> from tensorflow . python . training import gradient_descent <nl> - from tensorflow . python . training import saver <nl> + from tensorflow . python . training import saver as saver_lib <nl> <nl> <nl> def weight ( shape ) : <nl> def loop ( ) : <nl> return outputs <nl> <nl> <nl> - def get_config ( ) : <nl> + def get_config ( layout_optimizer = True ) : <nl> rewrite_options = rewriter_config_pb2 . RewriterConfig ( <nl> - optimize_tensor_layout = True ) <nl> + optimize_tensor_layout = layout_optimizer ) <nl> graph_options = config_pb2 . GraphOptions ( <nl> rewrite_options = rewrite_options , build_cost_model = 1 ) <nl> config = config_pb2 . ConfigProto ( graph_options = graph_options ) <nl> def get_config ( ) : <nl> class LayoutOptimizerTest ( test . TestCase ) : <nl> " " " Tests the Grappler layout optimizer . " " " <nl> <nl> + def _train ( self , checkpoint_path , layout_optimizer = False , restore = False ) : <nl> + ops . reset_default_graph ( ) <nl> + graph = ops . get_default_graph ( ) <nl> + with session . Session ( <nl> + config = get_config ( layout_optimizer ) , graph = graph ) as sess : <nl> + batch = 2 <nl> + height = 6 <nl> + width = 7 <nl> + input_channels = 3 <nl> + shape = [ batch , height , width , input_channels ] <nl> + image = array_ops . placeholder ( dtype = ' float32 ' , shape = shape ) <nl> + conv1 = conv_layers . conv2d ( image , 32 , [ 3 , 3 ] ) <nl> + conv2 = conv_layers . conv2d ( conv1 , 32 , [ 3 , 3 ] ) <nl> + optimizer = gradient_descent . GradientDescentOptimizer ( 0 . 01 ) <nl> + loss = math_ops . reduce_mean ( conv2 ) <nl> + train_op = optimizer . minimize ( loss ) <nl> + saver = saver_lib . Saver ( write_version = saver_pb2 . SaverDef . V2 ) <nl> + <nl> + if restore : <nl> + saver . restore ( sess , checkpoint_path ) <nl> + else : <nl> + sess . run ( variables . global_variables_initializer ( ) ) <nl> + <nl> + np . random . seed ( 0 ) <nl> + for _ in range ( 2 ) : <nl> + image_val = np . random . rand ( * shape ) . astype ( np . float32 ) <nl> + sess . run ( [ loss , train_op ] , feed_dict = { image : image_val } ) <nl> + <nl> + if restore : <nl> + all_vars = ops . get_collection ( ops . GraphKeys . GLOBAL_VARIABLES ) <nl> + all_vars_values = [ var . eval ( session = sess ) for var in all_vars ] <nl> + return all_vars_values <nl> + else : <nl> + saver . save ( sess , checkpoint_path ) <nl> + <nl> def testTwoConvLayers ( self ) : <nl> if test . is_gpu_available ( cuda_only = True ) : <nl> random_seed . set_random_seed ( 0 ) <nl> def testGradient ( self ) : <nl> train_op = optimizer . minimize ( loss ) <nl> graph = ops . get_default_graph ( ) <nl> graph . add_to_collection ( ' train_op ' , train_op ) <nl> - meta_graph = saver . export_meta_graph ( graph_def = graph . as_graph_def ( ) ) <nl> + meta_graph = saver_lib . export_meta_graph ( graph_def = graph . as_graph_def ( ) ) <nl> <nl> rewrite_options = rewriter_config_pb2 . RewriterConfig ( <nl> optimize_tensor_layout = True ) <nl> def testGradient ( self ) : <nl> self . assertEqual ( node . attr [ ' data_format ' ] . s , ' NCHW ' ) <nl> self . assertEqual ( found , 5 ) <nl> <nl> + def testCheckpointCompatibility ( self ) : <nl> + checkpoint_path = self . get_temp_dir ( ) <nl> + self . _train ( checkpoint_path ) <nl> + vars_expected = self . _train ( checkpoint_path , restore = True ) <nl> + vars_layout_optimized = self . _train ( <nl> + checkpoint_path , restore = True , layout_optimizer = True ) <nl> + <nl> + for var_expected , var_layout_optimized in zip ( vars_expected , <nl> + vars_layout_optimized ) : <nl> + self . assertAllClose ( var_expected , var_layout_optimized , atol = 1e - 6 ) <nl> + <nl> <nl> if __name__ = = ' __main__ ' : <nl> test . main ( ) <nl>
Add a checkpoint compatibility test for layout optimizer .
tensorflow/tensorflow
2c26c98f8d1f15d064c76548393137f058043dc1
2017-11-14T06:36:28Z
mmm a / Marlin / src / feature / fwretract . cpp <nl> ppp b / Marlin / src / feature / fwretract . cpp <nl> float FWRetract : : retract_length , / / M207 S - G10 Retract len <nl> FWRetract : : swap_retract_length , / / M207 W - G10 Swap Retract length <nl> FWRetract : : swap_retract_recover_length , / / M208 W - G11 Swap Recover length <nl> FWRetract : : swap_retract_recover_feedrate_mm_s , / / M208 R - G11 Swap Recover feedrate <nl> - FWRetract : : hop_amount ; <nl> + FWRetract : : current_hop ; <nl> <nl> void FWRetract : : reset ( ) { <nl> autoretract_enabled = false ; <nl> void FWRetract : : reset ( ) { <nl> swap_retract_length = RETRACT_LENGTH_SWAP ; <nl> swap_retract_recover_length = RETRACT_RECOVER_LENGTH_SWAP ; <nl> swap_retract_recover_feedrate_mm_s = RETRACT_RECOVER_FEEDRATE_SWAP ; <nl> - hop_amount = 0 . 0 ; <nl> + current_hop = 0 . 0 ; <nl> <nl> for ( uint8_t i = 0 ; i < EXTRUDERS ; + + i ) { <nl> retracted [ i ] = false ; <nl> void FWRetract : : retract ( const bool retracting <nl> # endif <nl> ) { <nl> <nl> - static float hop_amount = 0 . 0 ; / / Total amount lifted , for use in recover <nl> + static float current_hop = 0 . 0 ; / / Total amount lifted , for use in recover <nl> <nl> / / Prevent two retracts or recovers in a row <nl> if ( retracted [ active_extruder ] = = retracting ) return ; <nl> void FWRetract : : retract ( const bool retracting <nl> } <nl> SERIAL_ECHOLNPAIR ( " current_position [ z ] " , current_position [ Z_AXIS ] ) ; <nl> SERIAL_ECHOLNPAIR ( " current_position [ e ] " , current_position [ E_AXIS ] ) ; <nl> - SERIAL_ECHOLNPAIR ( " hop_amount " , hop_amount ) ; <nl> + SERIAL_ECHOLNPAIR ( " current_hop " , current_hop ) ; <nl> / / * / <nl> <nl> const float old_feedrate_mm_s = feedrate_mm_s , <nl> void FWRetract : : retract ( const bool retracting <nl> prepare_move_to_destination ( ) ; / / set_current_to_destination <nl> <nl> / / Is a Z hop set , and has the hop not yet been done ? <nl> - if ( retract_zlift > 0 . 01 & & ! hop_amount ) { / / Apply hop only once <nl> - hop_amount + = retract_zlift ; / / Add to the hop total ( again , only once ) <nl> + if ( retract_zlift > 0 . 01 & & ! current_hop ) { / / Apply hop only once <nl> + current_hop + = retract_zlift ; / / Add to the hop total ( again , only once ) <nl> destination [ Z_AXIS ] + = retract_zlift ; / / Raise Z by the zlift ( M207 Z ) amount <nl> feedrate_mm_s = planner . max_feedrate_mm_s [ Z_AXIS ] ; / / Maximum Z feedrate <nl> prepare_move_to_destination ( ) ; / / Raise up , set_current_to_destination <nl> void FWRetract : : retract ( const bool retracting <nl> } <nl> else { <nl> / / If a hop was done and Z hasn ' t changed , undo the Z hop <nl> - if ( hop_amount ) { <nl> - current_position [ Z_AXIS ] + = hop_amount ; / / Restore the actual Z position <nl> + if ( current_hop ) { <nl> + current_position [ Z_AXIS ] + = current_hop ; / / Restore the actual Z position <nl> SYNC_PLAN_POSITION_KINEMATIC ( ) ; / / Unspoof the position planner <nl> feedrate_mm_s = planner . max_feedrate_mm_s [ Z_AXIS ] ; / / Z feedrate to max <nl> prepare_move_to_destination ( ) ; / / Lower Z , set_current_to_destination <nl> - hop_amount = 0 . 0 ; / / Clear the hop amount <nl> + current_hop = 0 . 0 ; / / Clear the hop amount <nl> } <nl> <nl> destination [ E_AXIS ] + = ( base_retract + ( swapping ? swap_retract_recover_length : retract_recover_length ) ) * renormalize ; <nl> void FWRetract : : retract ( const bool retracting <nl> } <nl> SERIAL_ECHOLNPAIR ( " current_position [ z ] " , current_position [ Z_AXIS ] ) ; <nl> SERIAL_ECHOLNPAIR ( " current_position [ e ] " , current_position [ E_AXIS ] ) ; <nl> - SERIAL_ECHOLNPAIR ( " hop_amount " , hop_amount ) ; <nl> + SERIAL_ECHOLNPAIR ( " current_hop " , current_hop ) ; <nl> / / * / <nl> <nl> } <nl> mmm a / Marlin / src / feature / fwretract . h <nl> ppp b / Marlin / src / feature / fwretract . h <nl> class FWRetract { <nl> swap_retract_length , / / M207 W - G10 Swap Retract length <nl> swap_retract_recover_length , / / M208 W - G11 Swap Recover length <nl> swap_retract_recover_feedrate_mm_s , / / M208 R - G11 Swap Recover feedrate <nl> - hop_amount ; <nl> + current_hop ; <nl> <nl> FWRetract ( ) { reset ( ) ; } <nl> <nl> mmm a / Marlin / src / module / motion . cpp <nl> ppp b / Marlin / src / module / motion . cpp <nl> void homeaxis ( const AxisEnum axis ) { <nl> <nl> / / Clear retracted status if homing the Z axis <nl> # if ENABLED ( FWRETRACT ) <nl> - if ( axis = = Z_AXIS ) fwretract . hop_amount = 0 . 0 ; <nl> + if ( axis = = Z_AXIS ) fwretract . current_hop = 0 . 0 ; <nl> # endif <nl> <nl> # if ENABLED ( DEBUG_LEVELING_FEATURE ) <nl>
hop_amount = > current_hop
MarlinFirmware/Marlin
0c01099f17685cffc3777e1bf89363f39492022f
2018-09-09T02:20:37Z
mmm a / tensorflow / lite / python / BUILD <nl> ppp b / tensorflow / lite / python / BUILD <nl> py_test ( <nl> name = " lite_test " , <nl> srcs = [ " lite_test . py " ] , <nl> data = [ " @ tflite_mobilenet_ssd_quant_protobuf / / : tflite_graph . pb " ] , <nl> + shard_count = 4 , <nl> srcs_version = " PY2AND3 " , <nl> tags = [ <nl> " no_oss " , <nl> mmm a / tensorflow / lite / python / lite . py <nl> ppp b / tensorflow / lite / python / lite . py <nl> def convert ( self ) : <nl> if self . _has_valid_tensors ( ) : <nl> for tensor in self . _input_tensors : <nl> shape = tensor . get_shape ( ) <nl> - if not shape or not shape . as_list ( ) : <nl> + if not shape : <nl> raise ValueError ( " Provide an input shape for input array " <nl> " ' { 0 } ' . " . format ( _tensor_name ( tensor ) ) ) <nl> + # Note that shape_list might be empty for scalar shapes . <nl> shape_list = shape . as_list ( ) <nl> if None in shape_list [ 1 : ] : <nl> raise ValueError ( <nl> " None is only supported in the 1st dimension . Tensor ' { 0 } ' has " <nl> " invalid shape ' { 1 } ' . " . format ( _tensor_name ( tensor ) , shape_list ) ) <nl> - elif shape_list [ 0 ] is None : <nl> + elif shape_list and shape_list [ 0 ] is None : <nl> self . _set_batch_size ( batch_size = 1 ) <nl> <nl> # Get quantization stats . Ensures there is one stat per name if the stats <nl> mmm a / tensorflow / lite / python / lite_test . py <nl> ppp b / tensorflow / lite / python / lite_test . py <nl> def testFloat ( self ) : <nl> self . assertTrue ( ( [ 1 , 16 , 16 , 3 ] = = output_details [ 0 ] [ ' shape ' ] ) . all ( ) ) <nl> self . assertEqual ( ( 0 . , 0 . ) , output_details [ 0 ] [ ' quantization ' ] ) <nl> <nl> + def testString ( self ) : <nl> + in_tensor = array_ops . placeholder ( shape = [ 4 ] , dtype = dtypes . string ) <nl> + out_tensor = array_ops . reshape ( in_tensor , shape = [ 2 , 2 ] ) <nl> + sess = session . Session ( ) <nl> + <nl> + # Convert model and ensure model is not None . <nl> + converter = lite . TFLiteConverter . from_session ( sess , [ in_tensor ] , <nl> + [ out_tensor ] ) <nl> + tflite_model = converter . convert ( ) <nl> + self . assertTrue ( tflite_model ) <nl> + <nl> + # Check values from converted model . <nl> + interpreter = Interpreter ( model_content = tflite_model ) <nl> + interpreter . allocate_tensors ( ) <nl> + <nl> + input_details = interpreter . get_input_details ( ) <nl> + self . assertEqual ( 1 , len ( input_details ) ) <nl> + self . assertEqual ( ' Placeholder ' , input_details [ 0 ] [ ' name ' ] ) <nl> + self . assertEqual ( np . object_ , input_details [ 0 ] [ ' dtype ' ] ) <nl> + self . assertTrue ( ( [ 4 ] = = input_details [ 0 ] [ ' shape ' ] ) . all ( ) ) <nl> + <nl> + output_details = interpreter . get_output_details ( ) <nl> + self . assertEqual ( 1 , len ( output_details ) ) <nl> + self . assertEqual ( ' Reshape ' , output_details [ 0 ] [ ' name ' ] ) <nl> + self . assertEqual ( np . object_ , output_details [ 0 ] [ ' dtype ' ] ) <nl> + self . assertTrue ( ( [ 2 , 2 ] = = output_details [ 0 ] [ ' shape ' ] ) . all ( ) ) <nl> + # TODO ( b / 122659643 ) : Test setting / getting string data via the python <nl> + # interpreter API after support has been added . <nl> + <nl> def testQuantization ( self ) : <nl> in_tensor_1 = array_ops . placeholder ( <nl> shape = [ 1 , 16 , 16 , 3 ] , dtype = dtypes . float32 , name = ' inputA ' ) <nl> def testSizeNoneInvalid ( self ) : <nl> self . assertEqual ( ' Provide an input shape for input array \ ' Placeholder \ ' . ' , <nl> str ( error . exception ) ) <nl> <nl> - def testSizeEmptyInvalid ( self ) : <nl> + def testScalarValid ( self ) : <nl> + # Construct a graph using a scalar ( empty shape ) input . <nl> in_tensor = array_ops . placeholder ( dtype = dtypes . float32 , shape = [ ] ) <nl> out_tensor = in_tensor + in_tensor <nl> sess = session . Session ( ) <nl> <nl> - # Test empty shape . <nl> + # Test conversion with the scalar input shape . <nl> converter = lite . TFLiteConverter . from_session ( sess , [ in_tensor ] , <nl> [ out_tensor ] ) <nl> - with self . assertRaises ( ValueError ) as error : <nl> - converter . convert ( ) <nl> - self . assertEqual ( ' Provide an input shape for input array \ ' Placeholder \ ' . ' , <nl> - str ( error . exception ) ) <nl> + tflite_model = converter . convert ( ) <nl> + self . assertTrue ( tflite_model ) <nl> + <nl> + # Check values from converted model . <nl> + interpreter = Interpreter ( model_content = tflite_model ) <nl> + interpreter . allocate_tensors ( ) <nl> + <nl> + input_details = interpreter . get_input_details ( ) <nl> + self . assertEqual ( 1 , len ( input_details ) ) <nl> + self . assertEqual ( ' Placeholder ' , input_details [ 0 ] [ ' name ' ] ) <nl> + self . assertEqual ( np . float32 , input_details [ 0 ] [ ' dtype ' ] ) <nl> + self . assertTrue ( ( [ ] = = input_details [ 0 ] [ ' shape ' ] ) . all ( ) ) <nl> + <nl> + output_details = interpreter . get_output_details ( ) <nl> + self . assertEqual ( 1 , len ( output_details ) ) <nl> + self . assertEqual ( ' add ' , output_details [ 0 ] [ ' name ' ] ) <nl> + self . assertEqual ( np . float32 , output_details [ 0 ] [ ' dtype ' ] ) <nl> + self . assertTrue ( ( [ ] = = input_details [ 0 ] [ ' shape ' ] ) . all ( ) ) <nl> + <nl> + # Validate inference using the scalar inputs / outputs . <nl> + test_input = np . array ( 4 . 0 , dtype = np . float32 ) <nl> + expected_output = np . array ( 8 . 0 , dtype = np . float32 ) <nl> + interpreter . set_tensor ( input_details [ 0 ] [ ' index ' ] , test_input ) <nl> + interpreter . invoke ( ) <nl> + <nl> + output_data = interpreter . get_tensor ( output_details [ 0 ] [ ' index ' ] ) <nl> + self . assertTrue ( ( expected_output = = output_data ) . all ( ) ) <nl> <nl> def testSizeInvalid ( self ) : <nl> in_tensor = array_ops . placeholder ( <nl>
Allow scalar shapes in the Python converter API
tensorflow/tensorflow
a7443e6ad57bd60693cf58a9dad3f462fdf5bc3d
2019-01-11T00:34:26Z
mmm a / examples / cpp_module / test . cpp <nl> ppp b / examples / cpp_module / test . cpp <nl> using namespace std ; <nl> <nl> extern " C " <nl> { <nl> - # include " swoole . h " <nl> + # include " swoole . h " <nl> # include " module . h " <nl> - int swModule_init ( swModule * ) ; <nl> + int swModule_init ( swModule * ) ; <nl> } <nl> <nl> swVal * cppMethod ( swModule * module , swString * args , int argc ) ; <nl>
code format .
swoole/swoole-src
180c55d55d7e91d8d1d3728191455e0158551201
2016-07-13T08:13:20Z
mmm a / src / json . hpp <nl> ppp b / src / json . hpp <nl> struct has_mapped_type <nl> std : : is_integral < decltype ( detect ( std : : declval < T > ( ) ) ) > : : value ; <nl> } ; <nl> <nl> + template < template < typename , typename > class JSONSerializer , typename Json , <nl> + typename T > <nl> + struct has_from_json <nl> + { <nl> + private : <nl> + template < typename U , typename = decltype ( uncvref_t < U > : : from_json ( <nl> + std : : declval < Json > ( ) , std : : declval < T & > ( ) ) ) > <nl> + static int detect ( U & & ) ; <nl> + <nl> + static void detect ( . . . ) ; <nl> + <nl> + public : <nl> + static constexpr bool value = std : : is_integral < decltype ( <nl> + detect ( std : : declval < JSONSerializer < T , void > > ( ) ) ) > : : value ; <nl> + } ; <nl> + <nl> + template < template < typename , typename > class JSONSerializer , typename Json , <nl> + typename T > <nl> + struct has_to_json <nl> + { <nl> + private : <nl> + template < typename U , typename = decltype ( uncvref_t < U > : : to_json ( <nl> + std : : declval < Json & > ( ) , std : : declval < T > ( ) ) ) > <nl> + static int detect ( U & & ) ; <nl> + <nl> + static void detect ( . . . ) ; <nl> + <nl> + public : <nl> + static constexpr bool value = std : : is_integral < decltype ( <nl> + detect ( std : : declval < JSONSerializer < T , void > > ( ) ) ) > : : value ; <nl> + } ; <nl> + <nl> void to_json ( ) ; <nl> void from_json ( ) ; <nl> <nl> class basic_json <nl> } <nl> <nl> / / constructor chosen when JSONSerializer : : to_json exists for type T <nl> - template < typename T , typename = decltype ( JSONSerializer < uncvref_t < T > > : : to_json ( std : : declval < basic_json & > ( ) , std : : declval < uncvref_t < T > > ( ) ) ) > <nl> + template < typename T , typename = typename std : : enable_if < detail : : has_to_json < <nl> + JSONSerializer , basic_json , uncvref_t < T > > : : value > : : type > <nl> explicit basic_json ( T & & val ) <nl> { <nl> JSONSerializer < uncvref_t < T > > : : to_json ( * this , std : : forward < T > ( val ) ) ; <nl> class basic_json <nl> <nl> @ since version 1 . 0 . 0 <nl> * / <nl> - template < typename ValueType , typename std : : enable_if < <nl> - not std : : is_pointer < ValueType > : : value , int > : : type = 0 > <nl> - auto get ( ) const - > decltype ( this - > get_impl ( static_cast < ValueType * > ( nullptr ) ) ) <nl> - { <nl> - return get_impl ( static_cast < ValueType * > ( nullptr ) ) ; <nl> - } <nl> - <nl> - template < typename ValueType , typename = decltype ( JSONSerializer < uncvref_t < ValueType > > : : from_json ( std : : declval < basic_json > ( ) , std : : declval < ValueType & > ( ) ) ) > <nl> - auto get ( ) const - > uncvref_t < ValueType > <nl> - { <nl> + template < typename ValueType , <nl> + typename std : : enable_if < <nl> + not std : : is_pointer < ValueType > : : value and <nl> + not detail : : has_from_json < JSONSerializer , basic_json , <nl> + uncvref_t < ValueType > > : : value , <nl> + int > : : type = 0 > <nl> + auto get ( ) const <nl> + - > decltype ( this - > get_impl ( static_cast < ValueType * > ( nullptr ) ) ) { <nl> + return get_impl ( static_cast < ValueType * > ( nullptr ) ) ; <nl> + } <nl> + <nl> + template < typename ValueType , <nl> + typename = enable_if_t < detail : : has_from_json < <nl> + JSONSerializer , basic_json , uncvref_t < ValueType > > : : value > > <nl> + auto get ( ) const - > uncvref_t < ValueType > { <nl> using type = uncvref_t < ValueType > ; <nl> - static_assert ( std : : is_default_constructible < type > : : value & & std : : is_copy_constructible < type > : : value , <nl> - " user - defined types must be DefaultConstructible and CopyConstructible when used with get " ) ; <nl> + static_assert ( std : : is_default_constructible < type > : : value & & <nl> + std : : is_copy_constructible < type > : : value , <nl> + " user - defined types must be DefaultConstructible and " <nl> + " CopyConstructible when used with get " ) ; <nl> type ret ; <nl> JSONSerializer < type > : : from_json ( * this , ret ) ; <nl> return ret ; <nl> mmm a / test / src / unit - udt . cpp <nl> ppp b / test / src / unit - udt . cpp <nl> TEST_CASE ( " from_json free function " , " [ udt ] " ) <nl> } <nl> } <nl> } <nl> + <nl> + / / custom serializer , uses adl by default <nl> + template < typename T , typename = void > <nl> + struct my_serializer ; <nl> + <nl> + template < > <nl> + struct my_serializer < udt : : pod_type > <nl> + { <nl> + template < typename Json > <nl> + static void from_json ( Json const & j , udt : : pod_type & val ) <nl> + { <nl> + nlohmann : : from_json ( j , val ) ; <nl> + } <nl> + <nl> + template < typename Json > <nl> + static void to_json ( Json & j , udt : : pod_type const & val ) <nl> + { <nl> + nlohmann : : to_json ( j , val ) ; <nl> + } <nl> + } ; <nl> + <nl> + using my_json = nlohmann : : basic_json < std : : map , std : : vector , std : : string , bool , <nl> + std : : int64_t , std : : uint64_t , double , <nl> + std : : allocator , my_serializer > ; <nl> + <nl> + namespace udt <nl> + { <nl> + void to_json ( my_json & j , pod_type const & val ) <nl> + { <nl> + j = my_json { { " a " , val . a } , { " b " , val . b } , { " c " , val . c } } ; <nl> + } <nl> + <nl> + void from_json ( my_json const & j , pod_type & val ) <nl> + { <nl> + val = { j [ " a " ] . get < int > ( ) , j [ " b " ] . get < char > ( ) , j [ " c " ] . get < short > ( ) } ; <nl> + } <nl> + } <nl> + <nl> + TEST_CASE ( " custom serializer " ) <nl> + { <nl> + <nl> + <nl> + SECTION ( " default use works like default serializer " ) <nl> + { <nl> + udt : : pod_type pod { 1 , 2 , 3 } ; <nl> + auto const j = my_json { pod } ; <nl> + <nl> + auto const j2 = json { pod } ; <nl> + CHECK ( j . dump ( ) = = j2 . dump ( ) ) ; <nl> + <nl> + auto const pod2 = j . get < udt : : pod_type > ( ) ; <nl> + auto const pod3 = j2 . get < udt : : pod_type > ( ) ; <nl> + CHECK ( pod2 = = pod3 ) ; <nl> + CHECK ( pod2 = = pod ) ; <nl> + } <nl> + } <nl>
add basic test for custom serializer
nlohmann/json
178441cdfd12a8b7f4c49778259470bd40d4f32a
2017-01-21T15:14:21Z
mmm a / test / cpp / qps / smoke_test . cc <nl> ppp b / test / cpp / qps / smoke_test . cc <nl> <nl> <nl> # include < grpc / support / log . h > <nl> <nl> + # include < signal . h > <nl> + <nl> # include " test / cpp / qps / driver . h " <nl> # include " test / cpp / qps / report . h " <nl> <nl> static void RunQPS ( ) { <nl> } / / namespace grpc <nl> <nl> int main ( int argc , char * * argv ) { <nl> + signal ( SIGPIPE , SIG_IGN ) ; <nl> using namespace grpc : : testing ; <nl> RunSynchronousStreamingPingPong ( ) ; <nl> RunSynchronousUnaryPingPong ( ) ; <nl>
Merge pull request from ctiller / sigign
grpc/grpc
6ea9e290e3b29b75359dc8b6cae92f9d96f25b66
2015-04-27T23:34:56Z
mmm a / . gitignore <nl> ppp b / . gitignore <nl> cppclient / watchmanclient . pc <nl> * . filters <nl> / . vs <nl> / external <nl> + / common <nl> + / eden <nl> new file mode 100644 <nl> index 000000000 . . a668bbac2 <nl> mmm / dev / null <nl> ppp b / CMake / ThriftCppLibrary . cmake <nl> <nl> + # NOTE : If you change this file , fbcode / fboss / github / ThriftCppLibrary . cmake also <nl> + # needs to be changed . TODO : this should be handled via shipit . <nl> + function ( add_thrift_cpp2_library LIB_NAME THRIFT_FILE ) <nl> + # Parse the arguments <nl> + set ( SERVICES ) <nl> + set ( DEPENDS ) <nl> + set ( GEN_ARGS ) <nl> + set ( mode " UNSET " ) <nl> + foreach ( arg IN LISTS ARGN ) <nl> + if ( " $ { arg } " STREQUAL " SERVICES " ) <nl> + set ( mode " SERVICES " ) <nl> + elseif ( " $ { arg } " STREQUAL " DEPENDS " ) <nl> + set ( mode " DEPENDS " ) <nl> + elseif ( " $ { arg } " STREQUAL " OPTIONS " ) <nl> + set ( mode " OPTIONS " ) <nl> + else ( ) <nl> + if ( " $ { mode } " STREQUAL " SERVICES " ) <nl> + list ( APPEND SERVICES " $ { arg } " ) <nl> + elseif ( " $ { mode } " STREQUAL " DEPENDS " ) <nl> + list ( APPEND DEPENDS " $ { arg } " ) <nl> + elseif ( " $ { mode } " STREQUAL " OPTIONS " ) <nl> + list ( APPEND GEN_ARGS " $ { arg } " ) <nl> + else ( ) <nl> + message ( <nl> + FATAL_ERROR <nl> + " expected SERVICES , DEPENDS , or OPTIONS argument , found $ { arg } " <nl> + ) <nl> + endif ( ) <nl> + endif ( ) <nl> + endforeach ( ) <nl> + <nl> + get_filename_component ( base $ { THRIFT_FILE } NAME_WE ) <nl> + get_filename_component ( <nl> + output_dir <nl> + $ { CMAKE_CURRENT_BINARY_DIR } / $ { THRIFT_FILE } <nl> + DIRECTORY <nl> + ) <nl> + <nl> + list ( APPEND GEN_ARGS " include_prefix = $ { output_dir } " ) <nl> + # CMake 3 . 12 is finally getting a list ( JOIN ) function , but until then <nl> + # treating the list as a string and replacing the semicolons is good enough . <nl> + string ( REPLACE " ; " " , " GEN_ARG_STR " $ { GEN_ARGS } " ) <nl> + <nl> + # Compute the list of generated files <nl> + list ( APPEND generated_headers <nl> + $ { output_dir } / gen - cpp2 / $ { base } _constants . h <nl> + $ { output_dir } / gen - cpp2 / $ { base } _constants . cpp <nl> + $ { output_dir } / gen - cpp2 / $ { base } _types . h <nl> + $ { output_dir } / gen - cpp2 / $ { base } _types . tcc <nl> + $ { output_dir } / gen - cpp2 / $ { base } _types_custom_protocol . h <nl> + ) <nl> + list ( APPEND generated_sources <nl> + $ { output_dir } / gen - cpp2 / $ { base } _data . h <nl> + $ { output_dir } / gen - cpp2 / $ { base } _data . cpp <nl> + $ { output_dir } / gen - cpp2 / $ { base } _types . cpp <nl> + ) <nl> + foreach ( service IN LISTS SERVICES ) <nl> + list ( APPEND generated_headers <nl> + $ { output_dir } / gen - cpp2 / $ { service } . h <nl> + $ { output_dir } / gen - cpp2 / $ { service } . tcc <nl> + $ { output_dir } / gen - cpp2 / $ { service } AsyncClient . h <nl> + $ { output_dir } / gen - cpp2 / $ { service } _custom_protocol . h <nl> + ) <nl> + list ( APPEND generated_sources <nl> + $ { output_dir } / gen - cpp2 / $ { service } . cpp <nl> + $ { output_dir } / gen - cpp2 / $ { service } AsyncClient . cpp <nl> + $ { output_dir } / gen - cpp2 / $ { service } _processmap_binary . cpp <nl> + $ { output_dir } / gen - cpp2 / $ { service } _processmap_compact . cpp <nl> + ) <nl> + endforeach ( ) <nl> + <nl> + # Emit the rule to run the thrift compiler <nl> + add_custom_command ( <nl> + OUTPUT <nl> + $ { generated_headers } <nl> + $ { generated_sources } <nl> + COMMAND <nl> + $ { CMAKE_COMMAND } - E make_directory $ { output_dir } <nl> + COMMAND <nl> + $ { FBTHRIFT_COMPILER } <nl> + - - strict <nl> + - - templates $ { FBTHRIFT_TEMPLATES_DIR } <nl> + - - gen " mstch_cpp2 : $ { GEN_ARG_STR } " <nl> + - I $ { CMAKE_SOURCE_DIR } <nl> + - o $ { output_dir } <nl> + $ { CMAKE_CURRENT_SOURCE_DIR } / $ { THRIFT_FILE } <nl> + WORKING_DIRECTORY <nl> + $ { CMAKE_BINARY_DIR } <nl> + MAIN_DEPENDENCY <nl> + $ { THRIFT_FILE } <nl> + DEPENDS <nl> + $ { DEPENDS } <nl> + ) <nl> + <nl> + # Now emit the library rule to compile the sources <nl> + add_library ( $ { LIB_NAME } STATIC <nl> + $ { generated_sources } <nl> + ) <nl> + set_property ( <nl> + TARGET $ { LIB_NAME } <nl> + PROPERTY PUBLIC_HEADER <nl> + $ { generated_headers } <nl> + ) <nl> + target_include_directories ( <nl> + $ { LIB_NAME } <nl> + PUBLIC <nl> + $ { CMAKE_SOURCE_DIR } <nl> + $ { CMAKE_BINARY_DIR } <nl> + ) <nl> + target_link_libraries ( <nl> + $ { LIB_NAME } <nl> + PUBLIC <nl> + $ { DEPENDS } <nl> + FBThrift : : thriftcpp2 <nl> + ) <nl> + endfunction ( ) <nl> mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> <nl> # This is an experimental cmakefile and doesn ' t offer all of the options <nl> # available in the configure script ! At this time you should prefer to <nl> # use the configure script to build and install watchman ! <nl> - cmake_minimum_required ( VERSION 3 . 0 . 2 FATAL_ERROR ) <nl> + cmake_minimum_required ( VERSION 3 . 1 . 3 FATAL_ERROR ) <nl> + <nl> set ( CMAKE_MODULE_PATH " $ { CMAKE_CURRENT_SOURCE_DIR } / CMake " $ { CMAKE_MODULE_PATH } ) <nl> <nl> + set ( CMAKE_CXX_STANDARD 14 ) <nl> + <nl> # Tell CMake to also look in the directories where getdeps . py installs <nl> # our third - party dependencies . <nl> list ( APPEND CMAKE_PREFIX_PATH " $ { CMAKE_CURRENT_SOURCE_DIR } / external / install " ) <nl> include_directories ( $ { CMAKE_CURRENT_SOURCE_DIR } ) <nl> include_directories ( $ { CMAKE_CURRENT_BINARY_DIR } ) <nl> include_directories ( " $ { CMAKE_CURRENT_SOURCE_DIR } / external / install / include " ) <nl> <nl> + if ( NOT WIN32 ) <nl> + set ( WANT_THRIFT ON ) <nl> + else ( ) <nl> + set ( WANT_THRIFT OFF ) <nl> + endif ( ) <nl> + <nl> + # Determine whether we are the git repo produced by shipit , a staging <nl> + # area produced by shipit in the FB internal CI , or whether <nl> + # we are building in the source monorepo . <nl> + # For the FB internal CI flavor running shipit , CMAKE_CURRENT_SOURCE_DIR <nl> + # will have a value like " . . . . / shipit_projects / watchman " . <nl> + if ( IS_DIRECTORY " $ { CMAKE_CURRENT_SOURCE_DIR } / . git " OR <nl> + " $ { CMAKE_CURRENT_SOURCE_DIR } " MATCHES " shipit_projects " ) <nl> + set ( IS_SHIPPED_IT TRUE ) <nl> + else ( ) <nl> + set ( IS_SHIPPED_IT FALSE ) <nl> + endif ( ) <nl> + <nl> + # If we ' re building from inside the monorepo , make the local directory <nl> + # look like the shipit - transformed source in the git repo . <nl> + # On windows we do a dumb recursive copy of the files because we cannot <nl> + # guarantee that we ' ll be successful in setting up a symlink . <nl> + # On everything else we set up a simple symlink . <nl> + # In theory we can tell cmake to add a non - child subdir and avoid the <nl> + # copy / symlink thing , but we ' d need to teach various targets how to resolve <nl> + # the path and that is rather a lot of work ( I spent a couple of hours on this <nl> + # before throwing in the towel ) . <nl> + function ( maybe_shipit_dir MONOREPO_RELATIVE_PATH ) <nl> + get_filename_component ( base " $ { MONOREPO_RELATIVE_PATH } " NAME ) <nl> + if ( NOT IS_SHIPPED_IT AND <nl> + NOT IS_DIRECTORY $ { CMAKE_CURRENT_SOURCE_DIR } / $ { base } ) <nl> + if ( WIN32 ) <nl> + file ( COPY <nl> + " $ { CMAKE_CURRENT_SOURCE_DIR } / $ { MONOREPO_RELATIVE_PATH } " <nl> + DESTINATION $ { CMAKE_CURRENT_SOURCE_DIR } ) <nl> + else ( ) <nl> + execute_process ( COMMAND <nl> + ln - s $ { MONOREPO_RELATIVE_PATH } $ { CMAKE_CURRENT_SOURCE_DIR } / $ { base } ) <nl> + endif ( ) <nl> + endif ( ) <nl> + endfunction ( ) <nl> + <nl> + if ( WANT_THRIFT ) <nl> + # We use shipit to mirror in these locations from the monorepo <nl> + maybe_shipit_dir ( " . . / fboss / common " ) <nl> + maybe_shipit_dir ( " . . / eden " ) <nl> + endif ( ) <nl> + <nl> set ( PACKAGE_VERSION " 4 . 9 . 4 " ) <nl> set ( WATCHMAN_VERSION_OVERRIDE " " CACHE STRING " Use this version code for \ <nl> Watchman instead of the default ( $ { PACKAGE_VERSION } ) " ) <nl> set ( PACKAGE_STRING " $ { PACKAGE_NAME } $ { PACKAGE_VERSION } " ) <nl> set ( PACKAGE_TARNAME " $ { PACKAGE_NAME } - $ { PACKAGE_VERSION } " ) <nl> set ( PACKAGE_BUGREPORT " https : / / github . com / facebook / watchman / issues " ) <nl> project ( $ { PACKAGE_NAME } CXX C ) <nl> + include ( ThriftCppLibrary ) <nl> include ( CheckFunctionExists ) <nl> include ( CheckIncludeFiles ) <nl> include ( CheckStructHasMember ) <nl> if ( THREADS_FOUND AND NOT TARGET Threads : : Threads ) <nl> endif ( ) <nl> endif ( ) <nl> <nl> - <nl> find_package ( OpenSSL ) <nl> <nl> # This block is for cmake 3 . 0 which doesn ' t define the OpenSSL : : Crypto <nl> find_package ( Glog REQUIRED ) <nl> find_package ( LibEvent REQUIRED ) <nl> get_filename_component ( LIBEVENT_LIBDIR " $ { LIBEVENT_LIB } " DIRECTORY ) <nl> link_directories ( $ { LIBEVENT_LIBDIR } ) <nl> - <nl> find_package ( folly CONFIG REQUIRED ) <nl> <nl> + <nl> + if ( WANT_THRIFT ) <nl> + find_package ( fizz CONFIG REQUIRED ) <nl> + find_package ( wangle CONFIG REQUIRED ) <nl> + find_package ( FBThrift CONFIG REQUIRED ) <nl> + find_package ( yarpl CONFIG REQUIRED ) <nl> + find_package ( rsocket CONFIG REQUIRED ) <nl> + endif ( ) <nl> + <nl> find_package ( PythonInterp REQUIRED ) <nl> message ( STATUS " Found python $ { PYTHON_VERSION_STRING } " ) <nl> <nl> else ( ) <nl> set ( CMAKE_CXX_FLAGS_RELEASE " $ { CMAKE_CXX_FLAGS_COMMON } - O3 " ) <nl> endif ( ) <nl> <nl> + if ( WANT_THRIFT ) <nl> + add_subdirectory ( common ) <nl> + endif ( ) <nl> + <nl> add_library ( wildmatch STATIC <nl> thirdparty / wildmatch / wildmatch . c <nl> thirdparty / wildmatch / wildmatch . h <nl> endif ( ) <nl> add_library ( testsupport STATIC $ { testsupport_sources } ) <nl> target_link_libraries ( testsupport log string jansson Folly : : folly glog : : glog ) <nl> <nl> + if ( WANT_THRIFT ) <nl> + add_thrift_cpp2_library ( <nl> + eden_service_thrift <nl> + eden / fs / service / eden . thrift <nl> + SERVICES <nl> + EdenService <nl> + DEPENDS <nl> + fb303_thrift_cpp2 <nl> + ) <nl> + add_thrift_cpp2_library ( <nl> + streamingeden_thrift <nl> + eden / fs / service / streamingeden . thrift <nl> + SERVICES <nl> + StreamingEdenService <nl> + DEPENDS <nl> + eden_service_thrift <nl> + ) <nl> + endif ( ) <nl> + <nl> list ( APPEND watchman_sources <nl> ChildProcess . cpp <nl> ContentHash . cpp <nl> saved_state / SavedStateInterface . cpp <nl> scm / Mercurial . cpp <nl> scm / SCM . cpp <nl> watcher / auto . cpp <nl> - # watcher / eden . cpp <nl> watcher / fsevents . cpp <nl> watcher / inotify . cpp <nl> watcher / kqueue . cpp <nl> watcher / portfs . cpp <nl> ) <nl> <nl> - if ( CMAKE_SYSTEM_NAME STREQUAL " Windows " ) <nl> + if ( WANT_THRIFT ) <nl> + # We currently only support talking to eden on posix systems <nl> + list ( APPEND watchman_sources watcher / eden . cpp ) <nl> + endif ( ) <nl> + <nl> + if ( WIN32 ) <nl> list ( APPEND watchman_sources <nl> stream_win . cpp <nl> watcher / win32 . cpp <nl> endif ( ) <nl> add_executable ( watchman $ { watchman_sources } ) <nl> target_link_libraries ( <nl> watchman <nl> - log hash string err jansson wildmatch Folly : : folly glog : : glog ) <nl> + log <nl> + hash <nl> + string <nl> + err <nl> + jansson <nl> + wildmatch <nl> + Folly : : folly <nl> + glog : : glog <nl> + ) <nl> + <nl> + if ( WANT_THRIFT ) <nl> + target_link_libraries ( <nl> + watchman <nl> + streamingeden_thrift <nl> + $ { YARPL_LIBRARIES } <nl> + rsocket : : ReactiveSocket <nl> + FBThrift : : thriftcpp2 <nl> + ) <nl> + endif ( ) <nl> + <nl> if ( CMAKE_SYSTEM_NAME STREQUAL " Darwin " ) <nl> target_link_libraries ( watchman " - framework CoreServices " ) <nl> elseif ( CMAKE_SYSTEM_NAME STREQUAL " Windows " ) <nl> mmm a / build / fbcode_builder_config . py <nl> ppp b / build / fbcode_builder_config . py <nl> <nl> <nl> import os <nl> <nl> + import specs . fbthrift as fbthrift <nl> import specs . folly as folly <nl> import specs . gmock as gmock <nl> from shell_quoting import ShellQuoted , path_join <nl> def fbcode_builder_spec ( builder ) : <nl> projects = builder . option ( " projects_dir " ) <nl> <nl> return { <nl> - " depends_on " : [ gmock , folly ] , <nl> + " depends_on " : [ gmock , folly , fbthrift ] , <nl> " steps " : [ <nl> builder . fb_github_cmake_install ( " watchman / _build " , " . . " ) , <nl> builder . step ( <nl> new file mode 100644 <nl> index 000000000 . . 7f870ab98 <nl> mmm / dev / null <nl> ppp b / common / . gitignore <nl> <nl> + / CMakeFiles <nl> + / CTestTestfile . cmake <nl> + / Makefile <nl> + / cmake_install . cmake <nl> + / lib * . a <nl> new file mode 100644 <nl> index 000000000 . . 0edf1ccba <nl> mmm / dev / null <nl> ppp b / common / fb303 / if / . gitignore <nl> @ @ - 0 , 0 + 1 @ @ <nl> + / gen - cpp2 <nl> mmm a / getdeps . py <nl> ppp b / getdeps . py <nl> def _build ( self , project ) : <nl> self . _run_cmd ( [ " make " , " install " ] ) <nl> <nl> <nl> - class JeMallocBuilder ( BuilderBase ) : <nl> - def __init__ ( self , subdir = None , env = None , args = None ) : <nl> - super ( JeMallocBuilder , self ) . __init__ ( subdir = subdir , env = env ) <nl> - self . args = args or [ ] <nl> - <nl> - def _build ( self , project ) : <nl> - self . _run_cmd ( <nl> - [ " . / autogen . sh " ] + [ " - - prefix = " + project . opts . install_dir ] + self . args <nl> - ) <nl> - self . _run_cmd ( [ " make " , " - j % s " % project . opts . num_jobs ] ) <nl> - self . _run_cmd ( [ " make " , " install_bin " , " install_include " , " install_lib " ] ) <nl> - <nl> - <nl> class CMakeBuilder ( BuilderBase ) : <nl> def __init__ ( self , subdir = None , env = None , defines = None ) : <nl> super ( CMakeBuilder , self ) . __init__ ( subdir = subdir , env = env , build_dir = " _build " ) <nl> def _build ( self , project ) : <nl> defines = { <nl> " CMAKE_INSTALL_PREFIX " : project . opts . install_dir , <nl> " BUILD_SHARED_LIBS " : " OFF " , <nl> + # Some of the deps ( rsocket ) default to UBSAN enabled if left <nl> + # unspecified . Some of the deps fail to compile in release mode <nl> + # due to warning - > error promotion . RelWithDebInfo is the happy <nl> + # medium . <nl> + " CMAKE_BUILD_TYPE " : " RelWithDebInfo " , <nl> } <nl> <nl> # If any of these env vars are set , set the corresponding cmake def . <nl> def install_vcpkg ( pkgs ) : <nl> <nl> <nl> def get_projects ( opts ) : <nl> - projects = [ ] <nl> - if os . path . exists ( " / usr / include / jemalloc / jemalloc . h " ) : <nl> - # ubuntu 16 has a very old jemalloc installed , and folly doesn ' t <nl> - # currently have a way to be told not to try linking against it . <nl> - # To workaround this snafu , we build our own current version <nl> - # of jemalloc for folly to find and use . <nl> - # If we don ' t have jemalloc installed we don ' t need to install it . <nl> - # Confusing ! <nl> - projects . append ( <nl> - Project ( <nl> - " jemalloc " , <nl> - opts , <nl> - GitUpdater ( " https : / / github . com / jemalloc / jemalloc . git " ) , <nl> - JeMallocBuilder ( ) , <nl> - ) <nl> - ) <nl> - <nl> - projects + = [ <nl> + projects = [ <nl> Project ( <nl> " mstch " , <nl> opts , <nl> def get_projects ( opts ) : <nl> ] <nl> <nl> if not is_win ( ) : <nl> - # Ubuntu 16 also has an old version of zstd , so build our own . <nl> + # Ubuntu 16 has an old version of zstd , so build our own . <nl> # We can ' t use the MakeBuilder on windows , but we can get zstd <nl> # from vcpkg so we ' re ok there . <nl> - projects . append ( <nl> + projects + = [ <nl> Project ( <nl> " zstd " , <nl> opts , <nl> GitUpdater ( " https : / / github . com / facebook / zstd . git " ) , <nl> MakeBuilder ( ) , <nl> ) <nl> - ) <nl> + ] <nl> <nl> projects + = [ <nl> # TODO : see if we can get get a faster and / or static build working <nl> def get_projects ( opts ) : <nl> ) <nl> ] <nl> <nl> - # We ' ll add this in a later diff ; there are some glog issues in these <nl> - # projects that need to be resolved before we can guarantee success <nl> - need_thrift = False <nl> + need_thrift = not is_win ( ) <nl> if need_thrift : <nl> projects + = [ <nl> Project ( <nl> " libsodium " , <nl> opts , <nl> GitUpdater ( " https : / / github . com / jedisct1 / libsodium . git " ) , <nl> - AutoconfBuilder ( ) , <nl> + AutoconfBuilder ( args = [ " - - disable - shared " ] ) , <nl> ) , <nl> Project ( <nl> " fizz " , <nl> mmm a / watcher / eden . cpp <nl> ppp b / watcher / eden . cpp <nl> class EdenView : public QueryableView { <nl> } ; <nl> <nl> std : : shared_ptr < watchman : : QueryableView > detectEden ( w_root_t * root ) { <nl> - if ( root - > fs_type ! = " fuse " ) { <nl> + if ( root - > fs_type ! = " fuse " & & root - > fs_type ! = " osxfuse_eden " ) { <nl> throw std : : runtime_error ( to < std : : string > ( " not a FUSE file system " ) ) ; <nl> } <nl> <nl>
watchman : pull in thrift in the oss build for eden support
facebook/watchman
631bdda241a136d975883de7d78aacce59dc0721
2019-02-05T05:38:10Z
mmm a / tensorflow / contrib / data / kernels / prefetching_kernels . cc <nl> ppp b / tensorflow / contrib / data / kernels / prefetching_kernels . cc <nl> class FunctionBufferingResource : public ResourceBase { <nl> source_device_ ( source_device ) , <nl> target_device_ ( target_device ) , <nl> func_args_ ( func_args ) , <nl> - thread_pool_ ( new thread : : ThreadPool ( Env : : Default ( ) , ThreadOptions ( ) , <nl> - " buffer_resource " , thread_pool_size , <nl> - false / * low_latency_hint * / ) ) , <nl> handle_ ( kInvalidHandle ) , <nl> is_buffering_ ( false ) , <nl> end_of_sequence_ ( false ) , <nl> cancelled_ ( false ) { <nl> - runner_ = [ this ] ( std : : function < void ( ) > c ) { <nl> - thread_pool_ - > Schedule ( std : : move ( c ) ) ; <nl> - } ; <nl> + if ( thread_pool_size > 0 ) { <nl> + thread_pool_ = new thread : : ThreadPool ( Env : : Default ( ) , ThreadOptions ( ) , <nl> + " buffer_resource " , thread_pool_size , <nl> + false / * low_latency_hint * / ) ; <nl> + runner_ = [ this ] ( std : : function < void ( ) > c ) { <nl> + thread_pool_ - > Schedule ( std : : move ( c ) ) ; <nl> + } ; <nl> + } <nl> } <nl> <nl> ~ FunctionBufferingResource ( ) override { <nl> class FunctionBufferingResource : public ResourceBase { <nl> cond_var_ . wait ( l ) ; <nl> } <nl> } <nl> - delete thread_pool_ ; <nl> + if ( thread_pool_ ! = nullptr ) { <nl> + delete thread_pool_ ; <nl> + } <nl> } <nl> <nl> string DebugString ( ) override { <nl> class FunctionBufferingResource : public ResourceBase { <nl> FunctionLibraryRuntime : : Options opts ; <nl> / / Copied from CapturedFunction : : generate_step_id ( ) ; <nl> opts . step_id = - std : : abs ( static_cast < int64 > ( random : : New64 ( ) ) ) ; <nl> - opts . runner = & runner_ ; <nl> + if ( runner_ ! = nullptr ) { <nl> + opts . runner = & runner_ ; <nl> + } <nl> opts . source_device = source_device_ ; <nl> AllocatorAttributes arg_alloc_attr ; <nl> arg_alloc_attr . set_on_host ( true ) ; <nl> class FunctionBufferingResource : public ResourceBase { <nl> const string source_device_ ; <nl> const string target_device_ ; <nl> const std : : vector < Tensor > func_args_ ; <nl> - thread : : ThreadPool * thread_pool_ ; <nl> + thread : : ThreadPool * thread_pool_ = nullptr ; <nl> FunctionLibraryRuntime : : Handle handle_ GUARDED_BY ( mu_ ) ; <nl> std : : deque < BufferElement > buffer_ GUARDED_BY ( mu_ ) ; <nl> std : : deque < FunctionBufferCallback > requests_ GUARDED_BY ( mu_ ) ; <nl> mmm a / tensorflow / contrib / data / python / ops / prefetching_ops . py <nl> ppp b / tensorflow / contrib / data / python / ops / prefetching_ops . py <nl> def function_buffering_resource ( string_arg , <nl> target_device , <nl> f , <nl> buffer_size , <nl> - thread_pool_size = 1 , <nl> + thread_pool_size = 0 , <nl> container = " " , <nl> shared_name = None , <nl> name = None ) : <nl>
Allowing the FunctionBufferingResource to be passed in thread_pool_size = 0 in which case we wouldn ' t pass in a runner to the FLR : : Run call and rely on the underlying device threadpool instead .
tensorflow/tensorflow
dee1bc350ac0826822161f211f7fa8a1e1ae62f0
2018-03-19T22:11:07Z
mmm a / xbmc / cores / dvdplayer / DVDDemuxers / DVDDemuxFFmpeg . cpp <nl> ppp b / xbmc / cores / dvdplayer / DVDDemuxers / DVDDemuxFFmpeg . cpp <nl> bool CDVDDemuxFFmpeg : : SeekChapter ( int chapter , double * startpts ) <nl> <nl> AVChapter * ch = m_pFormatContext - > chapters [ chapter - 1 ] ; <nl> double dts = ConvertTimestamp ( ch - > start , ch - > time_base . den , ch - > time_base . num ) ; <nl> - return SeekTime ( DVD_TIME_TO_MSEC ( dts ) , false , startpts ) ; <nl> + return SeekTime ( DVD_TIME_TO_MSEC ( dts ) , true , startpts ) ; <nl> # else <nl> return false ; <nl> # endif <nl>
changed : do a backward seek when jumping to chapters .
xbmc/xbmc
a456036c5e81f4cec27b83b93445678cf68b576b
2011-12-04T13:58:01Z
YAML Metadata Warning: empty or missing yaml metadata in repo card (https://huggingface.co./docs/hub/datasets-cards)

This dataset is the CSV version of the original MCMD (Multi-programming-language Commit Message Dataset) provided by Tao et al. in their paper "On the Evaluation of Commit Message Generation Models: An Experimental Study". The original version of the dataset can be found in Zenodo.

Downloads last month
64