martinnnuez commited on
Commit
5fe24db
·
1 Parent(s): b9ae369

Upload 6 files

Browse files
Files changed (7) hide show
  1. .gitattributes +1 -0
  2. app.py +95 -0
  3. dv.bin +3 -0
  4. model.None +3 -0
  5. predict.ipynb +160 -0
  6. requirements.txt +5 -0
  7. scaler.bin +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model.None filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import pandas as pd
3
+ import numpy as np
4
+ import xgboost as xgb
5
+ import gradio as gr
6
+ import pathlib
7
+ plt = platform.system()
8
+ if plt == 'Linux': pathlib.WindowsPath = pathlib.PosixPath
9
+
10
+ model_path = "model.None"
11
+ model = xgb.Booster()
12
+ model.load_model(model_path)
13
+
14
+ dv_path = "dv.bin"
15
+ with open(dv_path, 'rb') as f_out:
16
+ dv = pickle.load(f_out)
17
+
18
+ scaler_path = "scaler.bin"
19
+ with open(scaler_path, 'rb') as f_out:
20
+ scaler = pickle.load(f_out)
21
+
22
+ def preprocess(data):
23
+ """Preprocessing of the data"""
24
+ # turn json input to dataframe
25
+ data = pd.DataFrame([data])
26
+
27
+ # define numerical and categorical features
28
+ numerical = ["X1", "X2", "X3", "X4", "X5", "X7"]
29
+ categorical = ["X6", "X8"]
30
+
31
+ # preprocess numerical features
32
+ X_num = scaler.transform(data[numerical])
33
+ # preprocess categorical features
34
+ data[categorical] = data[categorical].astype("string")
35
+ X_dicts = data[categorical].to_dict(orient="records")
36
+ X_cat = dv.transform(X_dicts)
37
+ # concatenate both
38
+ X = np.concatenate((X_num, X_cat), axis=1)
39
+
40
+ return X
41
+
42
+
43
+ def predict(X):
44
+ """make predictions"""
45
+ pred = model.predict(X)
46
+ print('prediction', pred[0])
47
+ return float(pred[0])
48
+
49
+ def main(X1,X2,X3,X4,X5,X6,X7,X8):
50
+ """request input, preprocess it and make prediction"""
51
+ input_data = {
52
+ "X1": X1,
53
+ "X2": X2,
54
+ "X3": X3,
55
+ "X4": X4,
56
+ "X5": X5,
57
+ "X6": X6,
58
+ "X7": X7,
59
+ "X8": X8
60
+ }
61
+ features = preprocess(input_data)
62
+ features_2 = xgb.DMatrix(features)
63
+ pred = predict(features_2)
64
+
65
+ result = {'heat load': pred}
66
+
67
+ return pred
68
+
69
+ def classify_image(img):
70
+ pred,idx,probs = learn.predict(img)
71
+ return dict(zip(categories,map(float,probs)))
72
+
73
+ #create input and output objects
74
+ #input
75
+ input1 = gr.inputs.Number()
76
+ input2 = gr.inputs.Number()
77
+ input3 = gr.inputs.Number()
78
+ input4 = gr.inputs.Number()
79
+ input5 = gr.inputs.Number()
80
+ input6 = gr.inputs.Number()
81
+ input7 = gr.inputs.Number()
82
+ input8 = gr.inputs.Number()
83
+
84
+ #output object
85
+ output = gr.outputs.Textbox()
86
+
87
+ intf = gr.Interface(title = "Energy Efficiency",
88
+ description = "The objective of this project is to predict the Heating Load based on various building features.",
89
+ fn=main,
90
+ inputs=[input1,input2,input3,input4,input5,input6,input7,input8],
91
+ outputs=[output],
92
+ live=True,
93
+ enable_queue=True
94
+ )
95
+ intf.launch()
dv.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eef98b808540e1d26b0de3b99d3fec1014b2086de88e3b89687974be202df9a1
3
+ size 323
model.None ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8291f264931fb723654a8aa531e560b9f8e9a617dc0726beebcc32a93751cce9
3
+ size 2764170
predict.ipynb ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 8,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import pickle\n",
10
+ "import pandas as pd\n",
11
+ "import numpy as np"
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "code",
16
+ "execution_count": 9,
17
+ "metadata": {},
18
+ "outputs": [],
19
+ "source": [
20
+ "import xgboost as xgb\n",
21
+ "model_path = \"model.None\"\n",
22
+ "model = xgb.Booster()\n",
23
+ "model.load_model(model_path)"
24
+ ]
25
+ },
26
+ {
27
+ "cell_type": "code",
28
+ "execution_count": 2,
29
+ "metadata": {},
30
+ "outputs": [],
31
+ "source": [
32
+ "dv_path = \"dv.bin\"\n",
33
+ "with open(dv_path, 'rb') as f_out:\n",
34
+ " dv = pickle.load(f_out)"
35
+ ]
36
+ },
37
+ {
38
+ "cell_type": "code",
39
+ "execution_count": 6,
40
+ "metadata": {},
41
+ "outputs": [],
42
+ "source": [
43
+ "scaler_path = \"scaler.bin\"\n",
44
+ "with open(scaler_path, 'rb') as f_out:\n",
45
+ " scaler = pickle.load(f_out)"
46
+ ]
47
+ },
48
+ {
49
+ "cell_type": "code",
50
+ "execution_count": 10,
51
+ "metadata": {},
52
+ "outputs": [],
53
+ "source": [
54
+ "def preprocess(data):\n",
55
+ " \"\"\"Preprocessing of the data\"\"\"\n",
56
+ " # turn json input to dataframe\n",
57
+ " data = pd.DataFrame([data])\n",
58
+ "\n",
59
+ " # define numerical and categorical features\n",
60
+ " numerical = [\"X1\", \"X2\", \"X3\", \"X4\", \"X5\", \"X7\"]\n",
61
+ " categorical = [\"X6\", \"X8\"]\n",
62
+ "\n",
63
+ " # preprocess numerical features\n",
64
+ " X_num = scaler.transform(data[numerical])\n",
65
+ " # preprocess categorical features\n",
66
+ " data[categorical] = data[categorical].astype(\"string\")\n",
67
+ " X_dicts = data[categorical].to_dict(orient=\"records\")\n",
68
+ " X_cat = dv.transform(X_dicts)\n",
69
+ " # concatenate both\n",
70
+ " X = np.concatenate((X_num, X_cat), axis=1)\n",
71
+ "\n",
72
+ " return X\n",
73
+ "\n",
74
+ "\n",
75
+ "def predict(X):\n",
76
+ " \"\"\"make predictions\"\"\"\n",
77
+ " pred = model.predict(X)\n",
78
+ " print('prediction', pred[0])\n",
79
+ " return float(pred[0])"
80
+ ]
81
+ },
82
+ {
83
+ "cell_type": "code",
84
+ "execution_count": 17,
85
+ "metadata": {},
86
+ "outputs": [],
87
+ "source": [
88
+ "def main(input_data):\n",
89
+ " \"\"\"request input, preprocess it and make prediction\"\"\"\n",
90
+ " features = preprocess(input_data)\n",
91
+ " features_2 = xgb.DMatrix(features)\n",
92
+ " pred = predict(features_2)\n",
93
+ "\n",
94
+ " result = {'heat load': pred}\n",
95
+ "\n",
96
+ " return result\n"
97
+ ]
98
+ },
99
+ {
100
+ "cell_type": "code",
101
+ "execution_count": 18,
102
+ "metadata": {},
103
+ "outputs": [
104
+ {
105
+ "name": "stdout",
106
+ "output_type": "stream",
107
+ "text": [
108
+ "prediction 15.648413\n"
109
+ ]
110
+ },
111
+ {
112
+ "data": {
113
+ "text/plain": [
114
+ "{'heat load': 15.648412704467773}"
115
+ ]
116
+ },
117
+ "execution_count": 18,
118
+ "metadata": {},
119
+ "output_type": "execute_result"
120
+ }
121
+ ],
122
+ "source": [
123
+ "input_example = {\n",
124
+ " \"X1\": 0.98,\n",
125
+ " \"X2\": 514.50,\n",
126
+ " \"X3\": 294.00,\n",
127
+ " \"X4\": 110.25,\n",
128
+ " \"X5\": 7.00,\n",
129
+ " \"X6\": 2,\n",
130
+ " \"X7\": 0.00,\n",
131
+ " \"X8\": 0,\n",
132
+ "}\n",
133
+ "\n",
134
+ "main(input_example)"
135
+ ]
136
+ }
137
+ ],
138
+ "metadata": {
139
+ "kernelspec": {
140
+ "display_name": "mlops",
141
+ "language": "python",
142
+ "name": "python3"
143
+ },
144
+ "language_info": {
145
+ "codemirror_mode": {
146
+ "name": "ipython",
147
+ "version": 3
148
+ },
149
+ "file_extension": ".py",
150
+ "mimetype": "text/x-python",
151
+ "name": "python",
152
+ "nbconvert_exporter": "python",
153
+ "pygments_lexer": "ipython3",
154
+ "version": "3.10.8"
155
+ },
156
+ "orig_nbformat": 4
157
+ },
158
+ "nbformat": 4,
159
+ "nbformat_minor": 2
160
+ }
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ mlflow<3,>=2.1
2
+ pandas==1.5.2
3
+ scikit-learn==1.2.0
4
+ xgboost==1.7.2
5
+ pickle
scaler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0acf51e56ef2b71f5fab9f24d0c36e0d246b5832c597bdd86e2094c543a3a87
3
+ size 710