Load file(../data/ADFA-LD/Training_Data_Master/UTD-0796.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0797.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0798.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0799.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0800.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0801.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0802.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0803.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0804.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0805.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0806.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0807.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0808.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0809.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0810.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0811.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0812.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0813.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0814.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0815.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0816.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0817.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0818.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0819.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0820.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0821.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0822.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0823.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0824.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0825.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0826.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0827.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0828.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0829.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0830.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0831.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0832.txt)
Load file(../data/ADFA-LD/Training_Data_Master/UTD-0833.txt)
Traceback (most recent call last):
File "D:\Users\Administrator\eclipsFaySon\secuit\code\8-2.py", line 76, in
score=model_selection.cross_val_score(logreg, x, y, n_jobs=-1, cv=10)
File "D:\ProgramData\Anaconda2\lib\site-packages\sklearn\model_selection_validation.py", line 342, in cross_val_score
pre_dispatch=pre_dispatch)
File "D:\ProgramData\Anaconda2\lib\site-packages\sklearn\model_selection_validation.py", line 206, in cross_validate
for train, test in cv.split(X, y, groups))
File "D:\ProgramData\Anaconda2\lib\site-packages\sklearn\externals\joblib\parallel.py", line 789, in call
self.retrieve()
File "D:\ProgramData\Anaconda2\lib\site-packages\sklearn\externals\joblib\parallel.py", line 740, in retrieve
raise exception
sklearn.externals.joblib.my_exceptions.JoblibValueError: JoblibValueError
Multiprocessing exception:
...........................................................................
D:\Users\Administrator\eclipsFaySon\secuit\code\8-2.py in ()
71 solver='sgd', verbose=10, tol=1e-4, random_state=1,
72 learning_rate_init=.1)
73
74 logreg = linear_model.LogisticRegression(C=1e5)
75
---> 76 score=model_selection.cross_val_score(logreg, x, y, n_jobs=-1, cv=10)
77 print np.mean(score)
78
79
80
...........................................................................
D:\ProgramData\Anaconda2\lib\site-packages\sklearn\model_selection_validation.py in cross_val_score(estimator=LogisticRegression(C=100000.0, class_weight=None...linear', tol=0.0001, verbose=0, warm_start=False), X=array([[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0,...0],
[0, 0, 0, ..., 0, 0, 0]], dtype=int64), y=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...], groups=None, scoring=None, cv=10, n_jobs=-1, verbose=0, fit_params=None, pre_dispatch='2n_jobs')
337 cv_results = cross_validate(estimator=estimator, X=X, y=y, groups=groups,
338 scoring={'score': scorer}, cv=cv,
339 return_train_score=False,
340 n_jobs=n_jobs, verbose=verbose,
341 fit_params=fit_params,
--> 342 pre_dispatch=pre_dispatch)
pre_dispatch = '2n_jobs'
343 return cv_results['test_score']
344
345
346 def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
...........................................................................
D:\ProgramData\Anaconda2\lib\site-packages\sklearn\model_selection_validation.py in cross_validate(estimator=LogisticRegression(C=100000.0, class_weight=None...linear', tol=0.0001, verbose=0, warm_start=False), X=array([[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0,...0],
[0, 0, 0, ..., 0, 0, 0]], dtype=int64), y=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...], groups=None, scoring={'score': }, cv=StratifiedKFold(n_splits=10, random_state=None, shuffle=False), n_jobs=-1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', return_train_score=False)
201 scores = parallel(
202 delayed(_fit_and_score)(
203 clone(estimator), X, y, scorers, train, test, verbose, None,
204 fit_params, return_train_score=return_train_score,
205 return_times=True)
--> 206 for train, test in cv.split(X, y, groups))
cv.split = <bound method StratifiedKFold.split of Stratifie...d(n_splits=10, random_state=None, shuffle=False)>
X = array([[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0,...0],
[0, 0, 0, ..., 0, 0, 0]], dtype=int64)
y = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...]
groups = None
207
208 if return_train_score:
209 train_scores, test_scores, fit_times, score_times = zip(*scores)
210 train_scores = _aggregate_score_dicts(train_scores)
...........................................................................
D:\ProgramData\Anaconda2\lib\site-packages\sklearn\externals\joblib\parallel.py in call(self=Parallel(n_jobs=-1), iterable=<generator object >)
784 if pre_dispatch == "all" or n_jobs == 1:
785 # The iterable was consumed all at once by the above for loop.
786 # No need to wait for async callbacks to trigger to
787 # consumption.
788 self._iterating = False
--> 789 self.retrieve()
self.retrieve = <bound method Parallel.retrieve of Parallel(n_jobs=-1)>
790 # Make sure that we get a last message telling us we are done
791 elapsed_time = time.time() - self._start_time
792 self._print('Done %3i out of %3i | elapsed: %s finished',
793 (len(self._output), len(self._output),
Sub-process traceback:
ValueError Wed Apr 3 08:53:56 2019
PID: 16256 Python 2.7.15: D:\ProgramData\Anaconda2\python.exe
...........................................................................
D:\ProgramData\Anaconda2\lib\site-packages\sklearn\externals\joblib\parallel.py in call(self=<sklearn.externals.joblib.parallel.BatchedCalls object>)
126 def init(self, iterator_slice):
127 self.items = list(iterator_slice)
128 self._size = len(self.items)
129
130 def call(self):
--> 131 return [func(*args, **kwargs) for func, args, kwargs in self.items]
func =
args = (LogisticRegression(C=100000.0, class_weight=None...linear', tol=0.0001, verbose=0, warm_start=False), array([[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0,...0],
[0, 0, 0, ..., 0, 0, 0]], dtype=int64), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...], {'score': }, array([ 84, 85, 86, 87, 88, 89, 90, 91, ... 826, 827, 828, 829, 830, 831, 832], dtype=int64), array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1..., 77, 78, 79, 80, 81, 82, 83],
dtype=int64), 0, None, None)
kwargs = {'return_times': True, 'return_train_score': False}
self.items = [(, (LogisticRegression(C=100000.0, class_weight=None...linear', tol=0.0001, verbose=0, warm_start=False), array([[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0,...0],
[0, 0, 0, ..., 0, 0, 0]], dtype=int64), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...], {'score': }, array([ 84, 85, 86, 87, 88, 89, 90, 91, ... 826, 827, 828, 829, 830, 831, 832], dtype=int64), array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1..., 77, 78, 79, 80, 81, 82, 83],
dtype=int64), 0, None, None), {'return_times': True, 'return_train_score': False})]
132
133 def len(self):
134 return self._size
135
...........................................................................
D:\ProgramData\Anaconda2\lib\site-packages\sklearn\model_selection_validation.py in _fit_and_score(estimator=LogisticRegression(C=100000.0, class_weight=None...linear', tol=0.0001, verbose=0, warm_start=False), X=array([[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0,...0],
[0, 0, 0, ..., 0, 0, 0]], dtype=int64), y=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...], scorer={'score': }, train=array([ 84, 85, 86, 87, 88, 89, 90, 91, ... 826, 827, 828, 829, 830, 831, 832], dtype=int64), test=array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 1..., 77, 78, 79, 80, 81, 82, 83],
dtype=int64), verbose=0, parameters=None, fit_params={}, return_train_score=False, return_parameters=False, return_n_test_samples=False, return_times=True, error_score='raise')
453
454 try:
455 if y_train is None:
456 estimator.fit(X_train, **fit_params)
457 else:
--> 458 estimator.fit(X_train, y_train, **fit_params)
estimator.fit = <bound method LogisticRegression.fit of Logistic...inear', tol=0.0001, verbose=0, warm_start=False)>
X_train = array([[ 0, 0, 0, ..., 0, 0, 0],
... 0, 0, 0, ..., 0, 0, 0]], dtype=int64)
y_train = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...]
fit_params = {}
459
460 except Exception as e:
461 # Note fit time as time until error
462 fit_time = time.time() - start_time
...........................................................................
D:\ProgramData\Anaconda2\lib\site-packages\sklearn\linear_model\logistic.py in fit(self=LogisticRegression(C=100000.0, class_weight=None...linear', tol=0.0001, verbose=0, warm_start=False), X=array([[ 0., 0., 0., ..., 0., 0., 0.]... [ 0., 0., 0., ..., 0., 0., 0.]]), y=array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,...0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0]), sample_weight=None)
1232 " = {}.".format(self.n_jobs))
1233 self.coef_, self.intercept_, n_iter_ = fit_liblinear(
1234 X, y, self.C, self.fit_intercept, self.intercept_scaling,
1235 self.class_weight, self.penalty, self.dual, self.verbose,
1236 self.max_iter, self.tol, self.random_state,
-> 1237 sample_weight=sample_weight)
sample_weight = None
1238 self.n_iter = np.array([n_iter_])
1239 return self
1240
1241 if self.solver in ['sag', 'saga']:
...........................................................................
D:\ProgramData\Anaconda2\lib\site-packages\sklearn\svm\base.py in fit_liblinear(X=array([[ 0., 0., 0., ..., 0., 0., 0.]... [ 0., 0., 0., ..., 0., 0., 0.]]), y=array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,...0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0]), C=100000.0, fit_intercept=True, intercept_scaling=1, class_weight=None, penalty='l2', dual=False, verbose=0, max_iter=100, tol=0.0001, random_state=None, multi_class='ovr', loss='logistic_regression', epsilon=0.1, sample_weight=None)
848 y_ind = enc.fit_transform(y)
849 classes = enc.classes_
850 if len(classes_) < 2:
851 raise ValueError("This solver needs samples of at least 2 classes"
852 " in the data, but the data contains only one"
--> 853 " class: %r" % classes_[0])
classes_ = array([0])
854
855 class_weight_ = compute_class_weight(class_weight, classes_, y)
856 else:
857 class_weight_ = np.empty(0, dtype=np.float64)
ValueError: This solver needs samples of at least 2 classes in the data, but the data contains only one class: 0