10000 Add files via upload · BNTechie/Predictive-modeling@209476e · GitHub
[go: up one dir, main page]

Skip to content

Commit 209476e

Browse files
authored
Add files via upload
1 parent f2dbbab commit 209476e

File tree

2 files changed

+248
-0
lines changed

2 files changed

+248
-0
lines changed
Lines changed: 230 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,230 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"metadata": {},
6+
"source": [
7+
"Project type: Clustering, clasification\n",
8+
"Problem statement:This project aims to predict human activity \n",
9+
"1-Walking, \n",
10+
"2-Walking upstairs, \n",
11+
"3-Walking downstairs, \n",
12+
"4-Sitting, \n",
13+
"5-Standing or \n",
14+
"6-Laying \n",
15+
"by using the smartphone’s sensors. Meaning that by using the following methods, the smartphone can detect what \n",
16+
"we are doing at the moment."
17+
]
18+
},
19+
{
20+
"cell_type": "code",
21+
"execution_count": null,
22+
"metadata": {},
23+
"outputs": [],
24+
"source": [
25+
"##dataset https://archive.ics.uci.edu/ml/datasets/human+activity+recognition+using+smartphones\n",
26+
"# lstm model\n",
27+
"from numpy import mean\n",
28+
"from numpy import std\n",
29+
"from numpy import dstack\n",
30+
"from pandas import read_csv\n",
31+
"from kera 10000 s.models import Sequential\n",
32+
"from keras.layers import Dense\n",
33+
"from keras.layers import Flatten\n",
34+
"from keras.layers import Dropout\n",
35+
"from keras.layers import LSTM\n",
36+
"from keras.utils import to_categorical\n",
37+
"from matplotlib import pyplot"
38+
]
39+
},
40+
{
41+
"cell_type": "code",
42+
"execution_count": 2,
43+
"metadata": {},
44+
"outputs": [],
45+
"source": [
46+
"# load a single file as a numpy array\n",
47+
"def load_file(filepath):\n",
48+
"\tdataframe = read_csv(filepath, header=None, delim_whitespace=True)\n",
49+
"\treturn dataframe.values"
50+
]
51+
},
52+
{
53+
"cell_type": "code",
54+
"execution_count": 3,
55+
"metadata": {},
56+
"outputs": [],
57+
"source": [
58+
"# load a list of files and return as a 3d numpy array\n",
59+
"def load_group(filenames, prefix=''):\n",
60+
"\tloaded = list()\n",
61+
"\tfor name in filenames:\n",
62+
"\t\tdata = load_file(prefix + name)\n",
63+
"\t\tloaded.append(data)\n",
64+
"\t# stack group so that features are the 3rd dimension\n",
65+
"\tloaded = dstack(loaded)\n",
66+
"\treturn loaded"
67+
]
68+
},
69+
{
70+
"cell_type": "code",
71+
"execution_count": 4,
72+
"metadata": {},
73+
"outputs": [],
74+
"source": [
75+
"# load a dataset group, such as train or test\n",
76+
"def load_dataset_group(group, prefix=''):\n",
77+
"\tfilepath = prefix + group + '/Inertial Signals/'\n",
78+
"\t# load all 9 files as a single array\n",
79+
"\tfilenames = list()\n",
80+
"\t# total acceleration\n",
81+
"\tfilenames += ['total_acc_x_'+group+'.txt', 'total_acc_y_'+group+'.txt', 'total_acc_z_'+group+'.txt']\n",
82+
"\t# body acceleration\n",
83+
"\tfilenames += ['body_acc_x_'+group+'.txt', 'body_acc_y_'+group+'.txt', 'body_acc_z_'+group+'.txt']\n",
84+
"\t# body gyroscope\n",
85+
"\tfilenames += ['body_gyro_x_'+group+'.txt', 'body_gyro_y_'+group+'.txt', 'body_gyro_z_'+group+'.txt']\n",
86+
"\t# load input data\n",
87+
"\tX = load_group(filenames, filepath)\n",
88+
"\t# load class output\n",
89+
"\ty = load_file(prefix + group + '/y_'+group+'.txt')\n",
90+
"\treturn X, y"
91+
]
92+
},
93+
{
94+
"cell_type": "code",
95+
"execution_count": 5,
96+
"metadata": {},
97+
"outputs": [],
98+
"source": [
99+
"# load the dataset, returns train and test X and y elements\n",
100+
"def load_dataset(prefix=''):\n",
101+
"\t# load all train\n",
102+
"\ttrainX, trainy = load_dataset_group('train', prefix + 'HARDataset/')\n",
103+
"\tprint(trainX.shape, trainy.shape)\n",
104+
"\t# load all test\n",
105+
"\ttestX, testy = load_dataset_group('test', prefix + 'HARDataset/')\n",
106+
"\tprint(testX.shape, testy.shape)\n",
107+
"\t# zero-offset class values\n",
108+
"\ttrainy = trainy - 1\n",
109+
"\ttesty = testy - 1\n",
110+
"\t# one hot encode y\n",
111+
"\ttrainy = to_categorical(trainy)\n",
112+
"\ttesty = to_categorical(testy)\n",
113+
"\tprint(trainX.shape, trainy.shape, testX.shape, testy.shape)\n",
114+
"\treturn trainX, trainy, testX, testy\n",
115+
"\n"
116+
]
117+
},
118+
{
119+
"cell_type": "code",
120+
"execution_count": 6,
121+
"metadata": {},
122+
"outputs": [],
123+
"source": [
124+
"# fit and evaluate a model\n",
125+
"def evaluate_model(trainX, trainy, testX, testy):\n",
126+
"\tverbose, epochs, batch_size = 0, 15, 64\n",
127+
"\tn_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1]\n",
128+
"\tmodel = Sequential()\n",
129+
"\tmodel.add(LSTM(100, input_shape=(n_timesteps,n_features)))\n",
130+
"\tmodel.add(Dropout(0.5))\n",
131+
"\tmodel.add(Dense(100, activation='relu'))\n",
132+
"\tmodel.add(Dense(n_outputs, activation='softmax'))\n",
133+
"\tmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n",
134+
"\t# fit network\n",
135+
"\tmodel.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose)\n",
136+
"\t# evaluate model\n",
137+
"\t_, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=0)\n",
138+
"\treturn accuracy"
139+
]
140+
},
141+
{
142+
"cell_type": "code",
143+
"execution_count": 7,
144+
"metadata": {},
145+
"outputs": [],
146+
"source": [
147+
"# summarize scores\n",
148+
"def summarize_results(scores):\n",
149+
"\tprint(scores)\n",
150+
"\tm, s = mean(scores), std(scores)\n",
151+
"\tprint('Accuracy: %.3f%% (+/-%.3f)' % (m, s))"
152+
]
153+
},
154+
{
155+
"cell_type": "code",
156+
"execution_count": 8,
157+
"metadata": {},
158+
"outputs": [],
159+
"source": [
160+
"# run an experiment\n",
161+
"def run_experiment(repeats=10):\n",
162+
"\t# load data\n",
163+
"\ttrainX, trainy, testX, testy = load_dataset()\n",
164+
"\t# repeat experiment\n",
165+
"\tscores = list()\n",
166+
"\tfor r in range(repeats):\n",
167+
"\t\tscore = evaluate_model(trainX, trainy, testX, testy)\n",
168+
"\t\tscore = score * 100.0\n",
169+
"\t\tprint('>#%d: %.3f' % (r+1, score))\n",
170+
"\t\tscores.append(score)\n",
171+
"\t# summarize results\n",
172+
"\tsummarize_results(scores)"
173+
]
174+
},
175+
{
176+
"cell_type": "code",
177+
"execution_count": 9,
178+
"metadata": {},
179+
"outputs": [
180+
{
181+
"name": "stdout",
182+
"output_type": "stream",
183+
"text": [
184+
"(7352, 128, 9) (7352, 1)\n",
185+
"(2947, 128, 9) (2947, 1)\n",
186+
"(7352, 128, 9) (7352, 6) (2947, 128, 9) (2947, 6)\n",
187+
">#1: 90.261\n",
188+
">#2: 90.770\n",
189+
">#3: 88.293\n",
190+
">#4: 91.856\n",
191+
">#5: 90.363\n",
192+
">#6: 90.702\n",
193+
">#7: 91.144\n",
194+
">#8: 91.110\n",
195+
">#9: 89.515\n",
196+
">#10: 90.906\n",
197+
"[90.26128053665161, 90.77027440071106, 88.29317688941956, 91.85612201690674, 90.3630793094635, 90.7024085521698, 91.14353656768799, 91.10960364341736, 89.51476216316223, 90.90600609779358]\n",
198+
"Accuracy: 90.492% (+/-0.939)\n"
199+
]
200+
}
201+
],
202+
"source": [
203+
"\n",
204+
"# run the experiment\n",
205+
"run_experiment()"
206+
]
207+
}
208+
],
209+
"metadata": {
210+
"kernelspec": {
211+
"display_name": "Python 3",
212+
"language": "python",
213+
"name": "python3"
214+
},
215+
"language_info": {
216+
"codemirror_mode": {
217+
"name": "ipython",
218+
"version": 3
219+
},
220+
"file_extension": ".py",
221+
"mimetype": "text/x-python",
222+
"name": "python",
223+
"nbconvert_exporter": "python",
224+
"pygments_lexer": "ipython3",
225+
"version": "3.7.6"
226+
}
227+
},
228+
"nbformat": 4,
229+
"nbformat_minor": 4
230+
}
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
# Human-activity-recongnition-using-LSTM-network
2+
3+
Human Activity Recognition, or HAR for short, is the problem of predicting what a person is doing based on a trace of their movement using sensors.
4+
5+
## Objective:
6+
This problem is about the classifying sequences of accelerometer data recorded by smart phones into known well-defined movements.
7+
8+
The raw data is not available, a pre-processed version of the dataset was made available. The pre-processing steps included:
9+
10+
1. Pre-processing accelerometer and gyroscope using noise filters.
11+
2. Splitting data into fixed windows of 2.56 seconds (128 data points) with 50% overlap.Splitting of accelerometer data into gravitational (total) and body motion components.
12+
13+
The dataset can be found at Kaggle website:https://archive.ics.uci.edu/ml/datasets/human+activity+recognition+using+smartphones
14+
15+
### About the dataset
16+
17+
"The experiments have been carried out with a group of 30 volunteers within an age bracket of 19-48 years. Each person performed six activities (WALKING, WALKING_UPSTAIRS, WALKING_DOWNSTAIRS, SITTING, STANDING, LAYING) wearing a smartphone (Samsung Galaxy S II) on the waist. Using its embedded accelerometer and gyroscope, we captured 3-axial linear acceleration and 3-axial angular velocity at a constant rate of 50Hz. The experiments have been video-recorded to label the data manually. The obtained dataset has been randomly partitioned into two sets, where 70% of the volunteers was selected for generating the training data and 30% the test data." (taken from above mentioned link)
18+

0 commit comments

Comments
 (0)
0