Class: CNTK::DistributedLearner
- Inherits:
-
((swig_class *) SWIGTYPE_p_std__shared_ptrT_CNTK__Learner_t->clientdata)->klass
- Object
- ((swig_class *) SWIGTYPE_p_std__shared_ptrT_CNTK__Learner_t->clientdata)->klass
- CNTK::DistributedLearner
- Defined in:
- ext/cntk/cntk_wrap.cxx
Instance Method Summary collapse
- #get_communicator(*args) ⇒ Object
- #learning_rate(*args) ⇒ Object
- #parallelization_after(*args) ⇒ Object
- #reset_learning_rate(*args) ⇒ Object
- #reset_smoothed_gradients(*args) ⇒ Object
- #update(*args, self) ⇒ Object
Instance Method Details
#get_communicator(*args) ⇒ Object
62063 62064 62065 62066 62067 62068 62069 62070 62071 62072 62073 62074 62075 62076 62077 62078 62079 62080 62081 62082 62083 62084 62085 62086 62087 62088 62089 62090 62091 62092 62093 62094 62095 62096 62097 62098 62099 62100 62101 62102 62103 62104 62105 62106 62107 62108 62109 62110 62111 62112 62113 62114 62115 62116 62117 |
# File 'ext/cntk/cntk_wrap.cxx', line 62063
SWIGINTERN VALUE
_wrap_DistributedLearner_get_communicator(int argc, VALUE *argv, VALUE self) {
CNTK::DistributedLearner *arg1 = (CNTK::DistributedLearner *) 0 ;
void *argp1 = 0 ;
int res1 = 0 ;
std::shared_ptr< CNTK::DistributedLearner > tempshared1 ;
std::shared_ptr< CNTK::DistributedLearner > *smartarg1 = 0 ;
CNTK::DistributedCommunicatorPtr result;
VALUE vresult = Qnil;
if ((argc < 0) || (argc > 0)) {
rb_raise(rb_eArgError, "wrong # of arguments(%d for 0)",argc); SWIG_fail;
}
{
swig_ruby_owntype newmem = {
0, 0
};
res1 = SWIG_ConvertPtrAndOwn(self, &argp1, SWIGTYPE_p_std__shared_ptrT_CNTK__DistributedLearner_t, 0 | 0 , &newmem);
if (!SWIG_IsOK(res1)) {
SWIG_exception_fail(SWIG_ArgError(res1), Ruby_Format_TypeError( "", "CNTK::DistributedLearner *","GetCommunicator", 1, self ));
}
if (newmem.own & SWIG_CAST_NEW_MEMORY) {
tempshared1 = *reinterpret_cast< std::shared_ptr< CNTK::DistributedLearner > * >(argp1);
delete reinterpret_cast< std::shared_ptr< CNTK::DistributedLearner > * >(argp1);
arg1 = const_cast< CNTK::DistributedLearner * >(tempshared1.get());
} else {
smartarg1 = reinterpret_cast< std::shared_ptr< CNTK::DistributedLearner > * >(argp1);
arg1 = const_cast< CNTK::DistributedLearner * >((smartarg1 ? smartarg1->get() : 0));
}
}
{
try {
result = (arg1)->GetCommunicator();
}
catch (const std::runtime_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (const std::invalid_argument &e) {
SWIG_exception(SWIG_ValueError,e.what());
}
catch (const std::logic_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (...) {
SWIG_exception(SWIG_UnknownError,"Runtime exception");
}
}
{
std::shared_ptr< CNTK::DistributedCommunicator > *smartresult = result ? new std::shared_ptr< CNTK::DistributedCommunicator >(result) : 0;
vresult = SWIG_NewPointerObj(SWIG_as_voidptr(smartresult), SWIGTYPE_p_std__shared_ptrT_CNTK__DistributedCommunicator_t, SWIG_POINTER_OWN);
}
return vresult;
fail:
return Qnil;
}
|
#learning_rate(*args) ⇒ Object
62336 62337 62338 62339 62340 62341 62342 62343 62344 62345 62346 62347 62348 62349 62350 62351 62352 62353 62354 62355 62356 62357 62358 62359 62360 62361 62362 62363 62364 62365 62366 62367 62368 62369 62370 62371 62372 62373 62374 62375 62376 62377 62378 62379 62380 62381 62382 62383 62384 62385 62386 62387 |
# File 'ext/cntk/cntk_wrap.cxx', line 62336
SWIGINTERN VALUE
_wrap_DistributedLearner_learning_rate(int argc, VALUE *argv, VALUE self) {
CNTK::DistributedLearner *arg1 = (CNTK::DistributedLearner *) 0 ;
void *argp1 = 0 ;
int res1 = 0 ;
std::shared_ptr< CNTK::DistributedLearner > tempshared1 ;
std::shared_ptr< CNTK::DistributedLearner > *smartarg1 = 0 ;
double result;
VALUE vresult = Qnil;
if ((argc < 0) || (argc > 0)) {
rb_raise(rb_eArgError, "wrong # of arguments(%d for 0)",argc); SWIG_fail;
}
{
swig_ruby_owntype newmem = {
0, 0
};
res1 = SWIG_ConvertPtrAndOwn(self, &argp1, SWIGTYPE_p_std__shared_ptrT_CNTK__DistributedLearner_t, 0 | 0 , &newmem);
if (!SWIG_IsOK(res1)) {
SWIG_exception_fail(SWIG_ArgError(res1), Ruby_Format_TypeError( "", "CNTK::DistributedLearner *","LearningRate", 1, self ));
}
if (newmem.own & SWIG_CAST_NEW_MEMORY) {
tempshared1 = *reinterpret_cast< std::shared_ptr< CNTK::DistributedLearner > * >(argp1);
delete reinterpret_cast< std::shared_ptr< CNTK::DistributedLearner > * >(argp1);
arg1 = const_cast< CNTK::DistributedLearner * >(tempshared1.get());
} else {
smartarg1 = reinterpret_cast< std::shared_ptr< CNTK::DistributedLearner > * >(argp1);
arg1 = const_cast< CNTK::DistributedLearner * >((smartarg1 ? smartarg1->get() : 0));
}
}
{
try {
result = (double)(arg1)->LearningRate();
}
catch (const std::runtime_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (const std::invalid_argument &e) {
SWIG_exception(SWIG_ValueError,e.what());
}
catch (const std::logic_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (...) {
SWIG_exception(SWIG_UnknownError,"Runtime exception");
}
}
vresult = SWIG_From_double(static_cast< double >(result));
return vresult;
fail:
return Qnil;
}
|
#parallelization_after(*args) ⇒ Object
62441 62442 62443 62444 62445 62446 62447 62448 62449 62450 62451 62452 62453 62454 62455 62456 62457 62458 62459 62460 62461 62462 62463 62464 62465 62466 62467 62468 62469 62470 62471 62472 62473 62474 62475 62476 62477 62478 62479 62480 62481 62482 62483 62484 62485 62486 62487 62488 62489 62490 62491 62492 |
# File 'ext/cntk/cntk_wrap.cxx', line 62441
SWIGINTERN VALUE
_wrap_DistributedLearner_parallelization_after(int argc, VALUE *argv, VALUE self) {
CNTK::DistributedLearner *arg1 = (CNTK::DistributedLearner *) 0 ;
void *argp1 = 0 ;
int res1 = 0 ;
std::shared_ptr< CNTK::DistributedLearner > tempshared1 ;
std::shared_ptr< CNTK::DistributedLearner > *smartarg1 = 0 ;
size_t result;
VALUE vresult = Qnil;
if ((argc < 0) || (argc > 0)) {
rb_raise(rb_eArgError, "wrong # of arguments(%d for 0)",argc); SWIG_fail;
}
{
swig_ruby_owntype newmem = {
0, 0
};
res1 = SWIG_ConvertPtrAndOwn(self, &argp1, SWIGTYPE_p_std__shared_ptrT_CNTK__DistributedLearner_t, 0 | 0 , &newmem);
if (!SWIG_IsOK(res1)) {
SWIG_exception_fail(SWIG_ArgError(res1), Ruby_Format_TypeError( "", "CNTK::DistributedLearner *","ParallelizationAfter", 1, self ));
}
if (newmem.own & SWIG_CAST_NEW_MEMORY) {
tempshared1 = *reinterpret_cast< std::shared_ptr< CNTK::DistributedLearner > * >(argp1);
delete reinterpret_cast< std::shared_ptr< CNTK::DistributedLearner > * >(argp1);
arg1 = const_cast< CNTK::DistributedLearner * >(tempshared1.get());
} else {
smartarg1 = reinterpret_cast< std::shared_ptr< CNTK::DistributedLearner > * >(argp1);
arg1 = const_cast< CNTK::DistributedLearner * >((smartarg1 ? smartarg1->get() : 0));
}
}
{
try {
result = (arg1)->ParallelizationAfter();
}
catch (const std::runtime_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (const std::invalid_argument &e) {
SWIG_exception(SWIG_ValueError,e.what());
}
catch (const std::logic_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (...) {
SWIG_exception(SWIG_UnknownError,"Runtime exception");
}
}
vresult = SWIG_From_size_t(static_cast< size_t >(result));
return vresult;
fail:
return Qnil;
}
|
#reset_learning_rate(*args) ⇒ Object
62274 62275 62276 62277 62278 62279 62280 62281 62282 62283 62284 62285 62286 62287 62288 62289 62290 62291 62292 62293 62294 62295 62296 62297 62298 62299 62300 62301 62302 62303 62304 62305 62306 62307 62308 62309 62310 62311 62312 62313 62314 62315 62316 62317 62318 62319 62320 62321 62322 62323 62324 62325 62326 62327 62328 62329 62330 62331 62332 62333 |
# File 'ext/cntk/cntk_wrap.cxx', line 62274
SWIGINTERN VALUE
_wrap_DistributedLearner_reset_learning_rate(int argc, VALUE *argv, VALUE self) {
CNTK::DistributedLearner *arg1 = (CNTK::DistributedLearner *) 0 ;
CNTK::LearningRateSchedule *arg2 = 0 ;
void *argp1 = 0 ;
int res1 = 0 ;
std::shared_ptr< CNTK::DistributedLearner > tempshared1 ;
std::shared_ptr< CNTK::DistributedLearner > *smartarg1 = 0 ;
void *argp2 ;
int res2 = 0 ;
if ((argc < 1) || (argc > 1)) {
rb_raise(rb_eArgError, "wrong # of arguments(%d for 1)",argc); SWIG_fail;
}
{
swig_ruby_owntype newmem = {
0, 0
};
res1 = SWIG_ConvertPtrAndOwn(self, &argp1, SWIGTYPE_p_std__shared_ptrT_CNTK__DistributedLearner_t, 0 | 0 , &newmem);
if (!SWIG_IsOK(res1)) {
SWIG_exception_fail(SWIG_ArgError(res1), Ruby_Format_TypeError( "", "CNTK::DistributedLearner *","ResetLearningRate", 1, self ));
}
if (newmem.own & SWIG_CAST_NEW_MEMORY) {
tempshared1 = *reinterpret_cast< std::shared_ptr< CNTK::DistributedLearner > * >(argp1);
delete reinterpret_cast< std::shared_ptr< CNTK::DistributedLearner > * >(argp1);
arg1 = const_cast< CNTK::DistributedLearner * >(tempshared1.get());
} else {
smartarg1 = reinterpret_cast< std::shared_ptr< CNTK::DistributedLearner > * >(argp1);
arg1 = const_cast< CNTK::DistributedLearner * >((smartarg1 ? smartarg1->get() : 0));
}
}
res2 = SWIG_ConvertPtr(argv[0], &argp2, SWIGTYPE_p_CNTK__TrainingParameterScheduleT_double_t, 0 );
if (!SWIG_IsOK(res2)) {
SWIG_exception_fail(SWIG_ArgError(res2), Ruby_Format_TypeError( "", "CNTK::LearningRateSchedule const &","ResetLearningRate", 2, argv[0] ));
}
if (!argp2) {
SWIG_exception_fail(SWIG_ValueError, Ruby_Format_TypeError("invalid null reference ", "CNTK::LearningRateSchedule const &","ResetLearningRate", 2, argv[0]));
}
arg2 = reinterpret_cast< CNTK::LearningRateSchedule * >(argp2);
{
try {
(arg1)->ResetLearningRate((CNTK::LearningRateSchedule const &)*arg2);
}
catch (const std::runtime_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (const std::invalid_argument &e) {
SWIG_exception(SWIG_ValueError,e.what());
}
catch (const std::logic_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (...) {
SWIG_exception(SWIG_UnknownError,"Runtime exception");
}
}
return Qnil;
fail:
return Qnil;
}
|
#reset_smoothed_gradients(*args) ⇒ Object
62390 62391 62392 62393 62394 62395 62396 62397 62398 62399 62400 62401 62402 62403 62404 62405 62406 62407 62408 62409 62410 62411 62412 62413 62414 62415 62416 62417 62418 62419 62420 62421 62422 62423 62424 62425 62426 62427 62428 62429 62430 62431 62432 62433 62434 62435 62436 62437 62438 |
# File 'ext/cntk/cntk_wrap.cxx', line 62390
SWIGINTERN VALUE
_wrap_DistributedLearner_reset_smoothed_gradients(int argc, VALUE *argv, VALUE self) {
CNTK::DistributedLearner *arg1 = (CNTK::DistributedLearner *) 0 ;
void *argp1 = 0 ;
int res1 = 0 ;
std::shared_ptr< CNTK::DistributedLearner > tempshared1 ;
std::shared_ptr< CNTK::DistributedLearner > *smartarg1 = 0 ;
if ((argc < 0) || (argc > 0)) {
rb_raise(rb_eArgError, "wrong # of arguments(%d for 0)",argc); SWIG_fail;
}
{
swig_ruby_owntype newmem = {
0, 0
};
res1 = SWIG_ConvertPtrAndOwn(self, &argp1, SWIGTYPE_p_std__shared_ptrT_CNTK__DistributedLearner_t, 0 | 0 , &newmem);
if (!SWIG_IsOK(res1)) {
SWIG_exception_fail(SWIG_ArgError(res1), Ruby_Format_TypeError( "", "CNTK::DistributedLearner *","ResetSmoothedGradients", 1, self ));
}
if (newmem.own & SWIG_CAST_NEW_MEMORY) {
tempshared1 = *reinterpret_cast< std::shared_ptr< CNTK::DistributedLearner > * >(argp1);
delete reinterpret_cast< std::shared_ptr< CNTK::DistributedLearner > * >(argp1);
arg1 = const_cast< CNTK::DistributedLearner * >(tempshared1.get());
} else {
smartarg1 = reinterpret_cast< std::shared_ptr< CNTK::DistributedLearner > * >(argp1);
arg1 = const_cast< CNTK::DistributedLearner * >((smartarg1 ? smartarg1->get() : 0));
}
}
{
try {
(arg1)->ResetSmoothedGradients();
}
catch (const std::runtime_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (const std::invalid_argument &e) {
SWIG_exception(SWIG_ValueError,e.what());
}
catch (const std::logic_error &e) {
SWIG_exception(SWIG_RuntimeError,e.what());
}
catch (...) {
SWIG_exception(SWIG_UnknownError,"Runtime exception");
}
}
return Qnil;
fail:
return Qnil;
}
|
#update(*args, self) ⇒ Object
62571 62572 62573 62574 62575 62576 62577 62578 62579 62580 62581 62582 62583 62584 62585 62586 62587 62588 62589 62590 62591 62592 62593 62594 62595 62596 62597 62598 62599 62600 62601 62602 62603 62604 62605 62606 62607 62608 62609 62610 62611 62612 62613 62614 62615 62616 62617 62618 62619 62620 62621 62622 62623 62624 62625 62626 62627 62628 62629 62630 62631 62632 62633 62634 62635 62636 62637 62638 62639 62640 62641 62642 62643 62644 62645 62646 62647 62648 62649 62650 62651 62652 |
# File 'ext/cntk/cntk_wrap.cxx', line 62571
SWIGINTERN VALUE _wrap_DistributedLearner_update(int nargs, VALUE *args, VALUE self) {
int argc;
VALUE argv[5];
int ii;
argc = nargs + 1;
argv[0] = self;
if (argc > 5) SWIG_fail;
for (ii = 1; (ii < argc); ++ii) {
argv[ii] = args[ii-1];
}
if (argc == 3) {
int _v;
int res = SWIG_ConvertPtr(argv[0], 0, SWIGTYPE_p_std__shared_ptrT_CNTK__DistributedLearner_t, 0);
_v = SWIG_CheckState(res);
if (_v) {
void *vptr = 0;
int res = SWIG_ConvertPtr(argv[1], &vptr, SWIGTYPE_p_std__unordered_mapT_CNTK__Parameter_std__shared_ptrT_CNTK__NDArrayView_t_std__hashT_CNTK__Parameter_t_std__equal_toT_CNTK__Parameter_t_std__allocatorT_std__pairT_CNTK__Parameter_const_std__shared_ptrT_CNTK__NDArrayView_t_t_t_t, 0);
_v = SWIG_CheckState(res);
if (_v) {
void *vptr = 0;
int res = SWIG_ConvertPtr(argv[2], &vptr, SWIGTYPE_p_CNTK__MinibatchInfo, 0);
_v = SWIG_CheckState(res);
if (_v) {
return _wrap_DistributedLearner_update__SWIG_2(nargs, args, self);
}
}
}
}
if (argc == 3) {
int _v;
int res = SWIG_ConvertPtr(argv[0], 0, SWIGTYPE_p_std__shared_ptrT_CNTK__DistributedLearner_t, 0);
_v = SWIG_CheckState(res);
if (_v) {
void *vptr = 0;
int res = SWIG_ConvertPtr(argv[1], &vptr, SWIGTYPE_p_std__unordered_mapT_CNTK__Parameter_std__shared_ptrT_CNTK__NDArrayView_t_std__hashT_CNTK__Parameter_t_std__equal_toT_CNTK__Parameter_t_std__allocatorT_std__pairT_CNTK__Parameter_const_std__shared_ptrT_CNTK__NDArrayView_t_t_t_t, 0);
_v = SWIG_CheckState(res);
if (_v) {
{
int res = SWIG_AsVal_size_t(argv[2], NULL);
_v = SWIG_CheckState(res);
}
if (_v) {
return _wrap_DistributedLearner_update__SWIG_1(nargs, args, self);
}
}
}
}
if (argc == 4) {
int _v;
int res = SWIG_ConvertPtr(argv[0], 0, SWIGTYPE_p_std__shared_ptrT_CNTK__DistributedLearner_t, 0);
_v = SWIG_CheckState(res);
if (_v) {
void *vptr = 0;
int res = SWIG_ConvertPtr(argv[1], &vptr, SWIGTYPE_p_std__unordered_mapT_CNTK__Parameter_std__shared_ptrT_CNTK__NDArrayView_t_std__hashT_CNTK__Parameter_t_std__equal_toT_CNTK__Parameter_t_std__allocatorT_std__pairT_CNTK__Parameter_const_std__shared_ptrT_CNTK__NDArrayView_t_t_t_t, 0);
_v = SWIG_CheckState(res);
if (_v) {
{
int res = SWIG_AsVal_size_t(argv[2], NULL);
_v = SWIG_CheckState(res);
}
if (_v) {
{
int res = SWIG_AsVal_bool(argv[3], NULL);
_v = SWIG_CheckState(res);
}
if (_v) {
return _wrap_DistributedLearner_update__SWIG_0(nargs, args, self);
}
}
}
}
}
fail:
Ruby_Format_OverloadedError( argc, 5, "DistributedLearner.update",
" bool DistributedLearner.update(std::unordered_map< CNTK::Parameter,CNTK::NDArrayViewPtr,std::hash< CNTK::Parameter >,std::equal_to< CNTK::Parameter >,std::allocator< std::pair< CNTK::Parameter const,CNTK::NDArrayViewPtr > > > &gradientValues, size_t minibatchSampleCount, bool sweepEnd)\n"
" bool DistributedLearner.update(std::unordered_map< CNTK::Parameter,CNTK::NDArrayViewPtr,std::hash< CNTK::Parameter >,std::equal_to< CNTK::Parameter >,std::allocator< std::pair< CNTK::Parameter const,CNTK::NDArrayViewPtr > > > &gradientValues, size_t minibatchSampleCount)\n"
" bool DistributedLearner.update(std::unordered_map< CNTK::Parameter,CNTK::NDArrayViewPtr,std::hash< CNTK::Parameter >,std::equal_to< CNTK::Parameter >,std::allocator< std::pair< CNTK::Parameter const,CNTK::NDArrayViewPtr > > > &gradientValues, CNTK::MinibatchInfo &minibatch)\n");
return Qnil;
}
|