به نام خدا

رگرسیون تخمین قیمت خانه با چند ورودی
تصویر+داده های ساختار یافته

In [1]:
# import the necessary packages
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from keras.optimizers import Adam
import pandas as pd
import numpy as np
import glob
import cv2
import os
import locale
Using TensorFlow backend.
In [2]:
inputPath = "D:/dataset/Houses-dataset/Houses Dataset/HousesInfo.txt"
datasetPath = "D:/dataset/Houses-dataset/Houses Dataset"

cols = ["bedrooms", "bathrooms", "area", "zipcode", "price"]
df = pd.read_csv(inputPath, sep=" ", header=None, names=cols)


zipcodes, counts = np.unique(df["zipcode"], return_counts=True)

# loop over each of the unique zip codes and their corresponding
# count
for (zipcode, count) in zip(zipcodes, counts):
    # the zip code counts for our housing dataset is *extremely*
    # unbalanced (some only having 1 or 2 houses per zip code)
    # so let's sanitize our data by removing any houses with less
    # than 25 houses per zip code
    if count < 25:
        idxs = df[df["zipcode"] == zipcode].index
        df.drop(idxs, inplace=True)
In [3]:
# initialize our images array (i.e., the house images themselves)
images = []

# loop over the indexes of the houses
for i in df.index.values:
    # find the four images for the house and sort the file paths,
    # ensuring the four are always in the *same order*
    basePath = os.path.sep.join([datasetPath, "{}_*".format(i + 1)])
    housePaths = sorted(list(glob.glob(basePath)))
    # initialize our list of input images along with the output image
    # after *combining* the four input images
    inputImages = []
    outputImage = np.zeros((64, 64, 3), dtype="uint8")

    # loop over the input house paths
    for housePath in housePaths:
        # load the input image, resize it to be 32 32, and then
        # update the list of input images
        image = cv2.imread(housePath)
        image = cv2.resize(image, (32, 32))
        inputImages.append(image)

    # tile the four input images in the output image such the first
    # image goes in the top-right corner, the second image in the
    # top-left corner, the third image in the bottom-right corner,
    # and the final image in the bottom-left corner
    outputImage[0:32, 0:32] = inputImages[0]
    outputImage[0:32, 32:64] = inputImages[1]
    outputImage[32:64, 32:64] = inputImages[2]
    outputImage[32:64, 0:32] = inputImages[3]

    # add the tiled image to our set of images the network will be
    # trained on
    images.append(outputImage)
images = np.array(images)
In [4]:
images = images / 255.0
In [5]:
# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
split = train_test_split(df, images, test_size=0.25, random_state=42)
(trainAttrX, testAttrX, trainImagesX, testImagesX) = split
In [6]:
# find the largest house price in the training set and use it to
# scale our house prices to the range [0, 1] (will lead to better
# training and convergence)
maxPrice = trainAttrX["price"].max()
trainY = trainAttrX["price"] / maxPrice
testY = testAttrX["price"] / maxPrice
In [7]:
# initialize the column names of the continuous data
continuous = ["bedrooms", "bathrooms", "area"]

# performin min-max scaling each continuous feature column to
# the range [0, 1]
cs = MinMaxScaler()
trainContinuous = cs.fit_transform(trainAttrX[continuous])
testContinuous = cs.transform(testAttrX[continuous])

# one-hot encode the zip code categorical data (by definition of
# one-hot encoing, all output features are now in the range [0, 1])
zipBinarizer = LabelBinarizer().fit(df["zipcode"])
trainCategorical = zipBinarizer.transform(trainAttrX["zipcode"])
testCategorical = zipBinarizer.transform(testAttrX["zipcode"])

# construct our training and testing data points by concatenating
# the categorical features with the continuous features
trainAttrX = np.hstack([trainCategorical, trainContinuous])
testAttrX = np.hstack([testCategorical, testContinuous])
C:\Users\alire\Miniconda3\envs\tensorflow\lib\site-packages\sklearn\preprocessing\data.py:323: DataConversionWarning: Data with input dtype int64, float64 were all converted to float64 by MinMaxScaler.
  return self.partial_fit(X, y)
In [12]:
dim = trainAttrX.shape[1] #10

# define our MLP network
mlp = Sequential()
mlp.add(Dense(8, input_dim=dim, activation="relu"))
mlp.add(Dense(4, activation="relu"))
In [13]:
from keras.models import Model, Input
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers import Activation, Dense, Flatten, Dropout
from keras.layers.normalization import BatchNormalization

width, height, depth = 64, 64, 3
filters=(16, 32, 64)
# initialize the input shape and channel dimension, assuming
# TensorFlow/channels-last ordering
inputShape = (height, width, depth)
chanDim = -1

# define the model input
inputs = Input(shape=inputShape)

# loop over the number of filters
for (i, f) in enumerate(filters):
    # if this is the first CONV layer then set the input
    # appropriately
    if i == 0:
        x = inputs

    # CONV => RELU => BN => POOL
    x = Conv2D(f, (3, 3), padding="same")(x)
    x = Activation("relu")(x)
    x = BatchNormalization(axis=chanDim)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    
# flatten the volume, then FC => RELU => BN => DROPOUT
x = Flatten()(x)
x = Dense(16)(x)
x = Activation("relu")(x)
x = BatchNormalization(axis=chanDim)(x)
x = Dropout(0.5)(x)

# apply another FC layer, this one to match the number of nodes
# coming out of the MLP
x = Dense(4)(x)
x = Activation("relu")(x)

# construct the CNN
cnn = Model(inputs, x)
In [14]:
from keras.layers import concatenate
# create the input to our final set of layers as the *output* of both
# the MLP and CNN
combinedInput = concatenate([mlp.output, cnn.output])

# our final FC layer head will have two dense layers, the final one
# being our regression head
x = Dense(4, activation="relu")(combinedInput)
x = Dense(1, activation="linear")(x)

model = Model(inputs=[mlp.input, cnn.input], outputs=x)
In [15]:
opt = Adam(lr=1e-3, decay=1e-3 / 200)
model.compile(loss="mean_absolute_percentage_error", optimizer=opt)
 
In [12]:
# train the model
model.fit(
    [trainAttrX, trainImagesX], trainY,
    validation_data=([testAttrX, testImagesX], testY),
    epochs=200, batch_size=8)
Train on 271 samples, validate on 91 samples
Epoch 1/200
271/271 [==============================] - 4s 13ms/step - loss: 648.9047 - val_loss: 1557.0419
Epoch 2/200
271/271 [==============================] - 1s 2ms/step - loss: 295.0318 - val_loss: 435.3303
Epoch 3/200
271/271 [==============================] - 1s 2ms/step - loss: 263.3781 - val_loss: 89.4173
Epoch 4/200
271/271 [==============================] - 1s 2ms/step - loss: 148.8689 - val_loss: 71.4119
Epoch 5/200
271/271 [==============================] - 1s 2ms/step - loss: 125.9273 - val_loss: 56.9353
Epoch 6/200
271/271 [==============================] - 1s 2ms/step - loss: 117.0790 - val_loss: 52.3655
Epoch 7/200
271/271 [==============================] - 1s 2ms/step - loss: 86.9748 - val_loss: 92.0126
Epoch 8/200
271/271 [==============================] - 1s 2ms/step - loss: 74.0905 - val_loss: 84.1273
Epoch 9/200
271/271 [==============================] - 1s 2ms/step - loss: 70.3482 - val_loss: 68.4371
Epoch 10/200
271/271 [==============================] - 1s 2ms/step - loss: 62.8653 - val_loss: 61.1865
Epoch 11/200
271/271 [==============================] - 1s 2ms/step - loss: 68.4560 - val_loss: 98.4405
Epoch 12/200
271/271 [==============================] - 1s 2ms/step - loss: 61.4715 - val_loss: 107.6340
Epoch 13/200
271/271 [==============================] - 1s 3ms/step - loss: 63.7499 - val_loss: 78.6291
Epoch 14/200
271/271 [==============================] - 1s 2ms/step - loss: 59.4053 - val_loss: 62.9307
Epoch 15/200
271/271 [==============================] - 1s 2ms/step - loss: 62.8789 - val_loss: 61.8369
Epoch 16/200
271/271 [==============================] - 1s 3ms/step - loss: 58.4935 - val_loss: 52.9061
Epoch 17/200
271/271 [==============================] - 1s 2ms/step - loss: 55.2341 - val_loss: 52.2281
Epoch 18/200
271/271 [==============================] - 1s 2ms/step - loss: 57.4398 - val_loss: 49.2343
Epoch 19/200
271/271 [==============================] - 1s 2ms/step - loss: 49.1358 - val_loss: 49.0960
Epoch 20/200
271/271 [==============================] - 1s 3ms/step - loss: 48.4470 - val_loss: 38.7266
Epoch 21/200
271/271 [==============================] - 1s 2ms/step - loss: 45.8759 - val_loss: 40.5615
Epoch 22/200
271/271 [==============================] - 1s 2ms/step - loss: 45.1791 - val_loss: 35.9699
Epoch 23/200
271/271 [==============================] - 1s 2ms/step - loss: 47.2793 - val_loss: 34.6656
Epoch 24/200
271/271 [==============================] - 1s 2ms/step - loss: 41.3985 - val_loss: 32.6100
Epoch 25/200
271/271 [==============================] - 1s 2ms/step - loss: 43.0263 - val_loss: 29.8411
Epoch 26/200
271/271 [==============================] - 1s 2ms/step - loss: 38.7130 - val_loss: 30.4016
Epoch 27/200
271/271 [==============================] - 1s 2ms/step - loss: 39.3143 - val_loss: 30.3668
Epoch 28/200
271/271 [==============================] - 1s 2ms/step - loss: 35.6419 - val_loss: 28.1644
Epoch 29/200
271/271 [==============================] - 1s 2ms/step - loss: 36.1231 - val_loss: 26.3628
Epoch 30/200
271/271 [==============================] - 1s 2ms/step - loss: 37.1179 - val_loss: 27.8835
Epoch 31/200
271/271 [==============================] - 1s 2ms/step - loss: 34.4293 - val_loss: 27.0757
Epoch 32/200
271/271 [==============================] - 1s 3ms/step - loss: 33.9238 - val_loss: 26.0894
Epoch 33/200
271/271 [==============================] - 1s 2ms/step - loss: 32.0280 - val_loss: 24.7928
Epoch 34/200
271/271 [==============================] - 1s 2ms/step - loss: 33.4151 - val_loss: 23.2257
Epoch 35/200
271/271 [==============================] - 1s 3ms/step - loss: 30.6855 - val_loss: 23.1927
Epoch 36/200
271/271 [==============================] - 1s 3ms/step - loss: 31.0782 - val_loss: 24.5160
Epoch 37/200
271/271 [==============================] - 1s 2ms/step - loss: 29.6105 - val_loss: 23.6624
Epoch 38/200
271/271 [==============================] - 1s 3ms/step - loss: 29.1123 - val_loss: 24.6812
Epoch 39/200
271/271 [==============================] - 1s 2ms/step - loss: 29.5348 - val_loss: 23.9998
Epoch 40/200
271/271 [==============================] - 1s 2ms/step - loss: 28.0747 - val_loss: 24.2892
Epoch 41/200
271/271 [==============================] - 1s 3ms/step - loss: 28.3978 - val_loss: 24.3075
Epoch 42/200
271/271 [==============================] - 1s 2ms/step - loss: 28.5522 - val_loss: 24.4653
Epoch 43/200
271/271 [==============================] - 1s 2ms/step - loss: 28.5345 - val_loss: 23.2335
Epoch 44/200
271/271 [==============================] - 1s 2ms/step - loss: 30.2791 - val_loss: 23.6889
Epoch 45/200
271/271 [==============================] - 1s 3ms/step - loss: 26.7179 - val_loss: 23.5281
Epoch 46/200
271/271 [==============================] - 1s 2ms/step - loss: 26.7942 - val_loss: 22.6142
Epoch 47/200
271/271 [==============================] - 1s 2ms/step - loss: 26.1200 - val_loss: 24.0474
Epoch 48/200
271/271 [==============================] - 1s 2ms/step - loss: 27.1239 - val_loss: 24.5988
Epoch 49/200
271/271 [==============================] - 1s 2ms/step - loss: 26.3363 - val_loss: 23.5277
Epoch 50/200
271/271 [==============================] - 1s 2ms/step - loss: 26.9117 - val_loss: 23.7065
Epoch 51/200
271/271 [==============================] - 1s 3ms/step - loss: 26.0222 - val_loss: 22.4367
Epoch 52/200
271/271 [==============================] - 1s 2ms/step - loss: 26.5655 - val_loss: 23.2711
Epoch 53/200
271/271 [==============================] - 1s 2ms/step - loss: 25.3407 - val_loss: 23.7255
Epoch 54/200
271/271 [==============================] - 1s 3ms/step - loss: 26.0835 - val_loss: 23.9367
Epoch 55/200
271/271 [==============================] - 1s 3ms/step - loss: 25.8226 - val_loss: 24.7970
Epoch 56/200
271/271 [==============================] - 1s 3ms/step - loss: 25.0464 - val_loss: 24.6682
Epoch 57/200
271/271 [==============================] - 1s 3ms/step - loss: 25.4406 - val_loss: 25.9341
Epoch 58/200
271/271 [==============================] - 1s 3ms/step - loss: 26.6027 - val_loss: 25.4297
Epoch 59/200
271/271 [==============================] - 1s 3ms/step - loss: 25.3199 - val_loss: 23.6805
Epoch 60/200
271/271 [==============================] - 1s 3ms/step - loss: 24.9309 - val_loss: 25.1149
Epoch 61/200
271/271 [==============================] - 1s 3ms/step - loss: 24.7824 - val_loss: 22.6854
Epoch 62/200
271/271 [==============================] - 1s 3ms/step - loss: 24.6862 - val_loss: 23.8381
Epoch 63/200
271/271 [==============================] - 1s 3ms/step - loss: 24.9382 - val_loss: 24.3762
Epoch 64/200
271/271 [==============================] - 1s 3ms/step - loss: 25.6479 - val_loss: 23.7328
Epoch 65/200
271/271 [==============================] - 1s 3ms/step - loss: 25.2829 - val_loss: 23.5561
Epoch 66/200
271/271 [==============================] - 1s 3ms/step - loss: 25.3596 - val_loss: 24.2026
Epoch 67/200
271/271 [==============================] - 1s 3ms/step - loss: 25.1306 - val_loss: 24.8977
Epoch 68/200
271/271 [==============================] - 1s 3ms/step - loss: 24.1819 - val_loss: 23.8329
Epoch 69/200
271/271 [==============================] - 1s 3ms/step - loss: 24.2964 - val_loss: 24.2676
Epoch 70/200
271/271 [==============================] - 1s 3ms/step - loss: 24.4155 - val_loss: 24.6608
Epoch 71/200
271/271 [==============================] - 1s 3ms/step - loss: 24.0031 - val_loss: 25.8975
Epoch 72/200
271/271 [==============================] - 1s 3ms/step - loss: 24.6550 - val_loss: 24.9884
Epoch 73/200
271/271 [==============================] - 1s 3ms/step - loss: 24.9320 - val_loss: 24.0555
Epoch 74/200
271/271 [==============================] - 1s 3ms/step - loss: 24.6098 - val_loss: 22.7259
Epoch 75/200
271/271 [==============================] - 1s 3ms/step - loss: 24.7021 - val_loss: 23.7776
Epoch 76/200
271/271 [==============================] - 1s 3ms/step - loss: 24.5322 - val_loss: 23.1760
Epoch 77/200
271/271 [==============================] - 1s 3ms/step - loss: 24.9114 - val_loss: 23.4618
Epoch 78/200
271/271 [==============================] - 1s 3ms/step - loss: 24.4171 - val_loss: 22.4097
Epoch 79/200
271/271 [==============================] - 1s 3ms/step - loss: 24.1813 - val_loss: 23.5668
Epoch 80/200
271/271 [==============================] - 1s 2ms/step - loss: 24.0919 - val_loss: 24.1634
Epoch 81/200
271/271 [==============================] - 1s 2ms/step - loss: 24.2783 - val_loss: 23.2844
Epoch 82/200
271/271 [==============================] - 1s 2ms/step - loss: 23.1378 - val_loss: 23.1739
Epoch 83/200
271/271 [==============================] - 1s 2ms/step - loss: 23.6621 - val_loss: 22.0500
Epoch 84/200
271/271 [==============================] - 1s 2ms/step - loss: 24.1445 - val_loss: 24.2814
Epoch 85/200
271/271 [==============================] - 1s 2ms/step - loss: 23.9020 - val_loss: 23.6259
Epoch 86/200
271/271 [==============================] - 1s 2ms/step - loss: 23.0769 - val_loss: 25.0544
Epoch 87/200
271/271 [==============================] - 1s 2ms/step - loss: 23.6500 - val_loss: 24.7339
Epoch 88/200
271/271 [==============================] - 1s 2ms/step - loss: 23.5629 - val_loss: 24.6911
Epoch 89/200
271/271 [==============================] - 1s 2ms/step - loss: 23.0431 - val_loss: 22.8248
Epoch 90/200
271/271 [==============================] - 1s 2ms/step - loss: 22.3786 - val_loss: 23.5871
Epoch 91/200
271/271 [==============================] - 1s 2ms/step - loss: 23.3300 - val_loss: 23.8678
Epoch 92/200
271/271 [==============================] - 1s 2ms/step - loss: 22.8454 - val_loss: 24.8007
Epoch 93/200
271/271 [==============================] - 1s 2ms/step - loss: 23.5883 - val_loss: 23.9744
Epoch 94/200
271/271 [==============================] - 1s 2ms/step - loss: 23.9266 - val_loss: 22.5172
Epoch 95/200
271/271 [==============================] - 1s 2ms/step - loss: 23.7154 - val_loss: 24.2703
Epoch 96/200
271/271 [==============================] - 1s 2ms/step - loss: 23.2175 - val_loss: 25.0560
Epoch 97/200
271/271 [==============================] - 1s 2ms/step - loss: 22.7850 - val_loss: 22.8530
Epoch 98/200
271/271 [==============================] - 1s 2ms/step - loss: 22.8096 - val_loss: 23.0487
Epoch 99/200
271/271 [==============================] - 1s 2ms/step - loss: 23.3764 - val_loss: 22.9279
Epoch 100/200
271/271 [==============================] - 1s 2ms/step - loss: 22.8511 - val_loss: 24.4953
Epoch 101/200
271/271 [==============================] - 1s 2ms/step - loss: 22.7844 - val_loss: 24.6558
Epoch 102/200
271/271 [==============================] - 1s 2ms/step - loss: 23.9293 - val_loss: 23.1721
Epoch 103/200
271/271 [==============================] - 1s 2ms/step - loss: 22.3994 - val_loss: 22.6509
Epoch 104/200
271/271 [==============================] - 1s 2ms/step - loss: 22.0766 - val_loss: 23.9287
Epoch 105/200
271/271 [==============================] - 1s 2ms/step - loss: 21.9605 - val_loss: 23.3830
Epoch 106/200
271/271 [==============================] - 1s 2ms/step - loss: 22.1403 - val_loss: 22.9586
Epoch 107/200
271/271 [==============================] - 1s 2ms/step - loss: 22.9115 - val_loss: 23.6826
Epoch 108/200
271/271 [==============================] - 1s 2ms/step - loss: 21.6992 - val_loss: 24.7995
Epoch 109/200
271/271 [==============================] - 1s 2ms/step - loss: 22.3458 - val_loss: 23.7422
Epoch 110/200
271/271 [==============================] - 1s 2ms/step - loss: 21.6766 - val_loss: 23.1388
Epoch 111/200
271/271 [==============================] - 1s 2ms/step - loss: 21.9769 - val_loss: 24.3957
Epoch 112/200
271/271 [==============================] - 1s 2ms/step - loss: 23.5219 - val_loss: 24.9222
Epoch 113/200
271/271 [==============================] - 1s 2ms/step - loss: 23.9933 - val_loss: 22.9760
Epoch 114/200
271/271 [==============================] - 1s 2ms/step - loss: 23.3419 - val_loss: 23.9621
Epoch 115/200
271/271 [==============================] - 1s 3ms/step - loss: 22.9877 - val_loss: 24.5436
Epoch 116/200
271/271 [==============================] - 1s 2ms/step - loss: 22.5001 - val_loss: 24.2677
Epoch 117/200
271/271 [==============================] - 1s 2ms/step - loss: 22.2790 - val_loss: 25.4166
Epoch 118/200
271/271 [==============================] - 1s 3ms/step - loss: 21.5446 - val_loss: 23.1773
Epoch 119/200
271/271 [==============================] - 1s 3ms/step - loss: 21.0119 - val_loss: 22.5232
Epoch 120/200
271/271 [==============================] - 1s 3ms/step - loss: 23.0591 - val_loss: 23.4623
Epoch 121/200
271/271 [==============================] - 1s 3ms/step - loss: 21.3468 - val_loss: 24.3521
Epoch 122/200
271/271 [==============================] - 1s 3ms/step - loss: 22.3401 - val_loss: 25.6655
Epoch 123/200
271/271 [==============================] - 1s 3ms/step - loss: 21.1024 - val_loss: 25.0159
Epoch 124/200
271/271 [==============================] - 1s 3ms/step - loss: 22.4840 - val_loss: 23.6090
Epoch 125/200
271/271 [==============================] - 1s 3ms/step - loss: 23.0498 - val_loss: 24.6601
Epoch 126/200
271/271 [==============================] - 1s 3ms/step - loss: 22.8579 - val_loss: 23.0653
Epoch 127/200
271/271 [==============================] - 1s 3ms/step - loss: 21.1758 - val_loss: 23.6464
Epoch 128/200
271/271 [==============================] - 1s 3ms/step - loss: 21.7066 - val_loss: 24.2441
Epoch 129/200
271/271 [==============================] - 1s 3ms/step - loss: 21.3897 - val_loss: 23.8289
Epoch 130/200
271/271 [==============================] - 1s 3ms/step - loss: 21.1459 - val_loss: 24.2874
Epoch 131/200
271/271 [==============================] - 1s 3ms/step - loss: 22.2433 - val_loss: 26.3437
Epoch 132/200
271/271 [==============================] - 1s 3ms/step - loss: 21.6145 - val_loss: 24.2066
Epoch 133/200
271/271 [==============================] - 1s 3ms/step - loss: 21.7898 - val_loss: 24.2229
Epoch 134/200
271/271 [==============================] - 1s 2ms/step - loss: 21.0470 - val_loss: 25.2485
Epoch 135/200
271/271 [==============================] - 1s 2ms/step - loss: 21.4547 - val_loss: 24.8701
Epoch 136/200
271/271 [==============================] - 1s 3ms/step - loss: 21.3248 - val_loss: 24.6392
Epoch 137/200
271/271 [==============================] - 1s 3ms/step - loss: 21.4210 - val_loss: 24.5334
Epoch 138/200
271/271 [==============================] - 1s 3ms/step - loss: 20.5434 - val_loss: 24.1110
Epoch 139/200
271/271 [==============================] - 1s 3ms/step - loss: 20.1904 - val_loss: 24.9667
Epoch 140/200
271/271 [==============================] - 1s 2ms/step - loss: 22.0516 - val_loss: 23.1555
Epoch 141/200
271/271 [==============================] - 1s 2ms/step - loss: 20.9837 - val_loss: 25.2765
Epoch 142/200
271/271 [==============================] - 1s 3ms/step - loss: 20.0432 - val_loss: 24.0895
Epoch 143/200
271/271 [==============================] - 1s 3ms/step - loss: 21.1797 - val_loss: 25.6608
Epoch 144/200
271/271 [==============================] - 1s 3ms/step - loss: 21.1109 - val_loss: 23.1126
Epoch 145/200
271/271 [==============================] - 1s 2ms/step - loss: 20.9540 - val_loss: 24.0595
Epoch 146/200
271/271 [==============================] - 1s 3ms/step - loss: 20.9492 - val_loss: 25.6181
Epoch 147/200
271/271 [==============================] - 1s 2ms/step - loss: 21.0759 - val_loss: 24.4433
Epoch 148/200
271/271 [==============================] - 1s 3ms/step - loss: 21.0449 - val_loss: 24.2352
Epoch 149/200
271/271 [==============================] - 1s 3ms/step - loss: 20.7127 - val_loss: 25.9453
Epoch 150/200
271/271 [==============================] - 1s 3ms/step - loss: 20.9094 - val_loss: 24.5455
Epoch 151/200
271/271 [==============================] - 1s 2ms/step - loss: 21.0932 - val_loss: 24.7296
Epoch 152/200
271/271 [==============================] - 1s 3ms/step - loss: 19.6064 - val_loss: 25.7715
Epoch 153/200
271/271 [==============================] - 1s 3ms/step - loss: 21.9080 - val_loss: 26.7118
Epoch 154/200
271/271 [==============================] - 1s 3ms/step - loss: 21.3151 - val_loss: 24.6854
Epoch 155/200
271/271 [==============================] - 1s 3ms/step - loss: 19.3823 - val_loss: 24.8574
Epoch 156/200
271/271 [==============================] - 1s 2ms/step - loss: 20.7068 - val_loss: 24.8640
Epoch 157/200
271/271 [==============================] - 1s 2ms/step - loss: 19.4891 - val_loss: 24.4119
Epoch 158/200
271/271 [==============================] - 1s 2ms/step - loss: 19.5401 - val_loss: 24.1409
Epoch 159/200
271/271 [==============================] - 1s 2ms/step - loss: 18.7161 - val_loss: 24.9310
Epoch 160/200
271/271 [==============================] - 1s 2ms/step - loss: 19.6882 - val_loss: 25.2773
Epoch 161/200
271/271 [==============================] - 1s 2ms/step - loss: 19.6932 - val_loss: 23.6790
Epoch 162/200
271/271 [==============================] - 1s 2ms/step - loss: 19.6560 - val_loss: 24.7376
Epoch 163/200
271/271 [==============================] - 1s 2ms/step - loss: 19.7813 - val_loss: 23.6806
Epoch 164/200
271/271 [==============================] - 1s 2ms/step - loss: 20.4543 - val_loss: 23.7171
Epoch 165/200
271/271 [==============================] - 1s 2ms/step - loss: 20.6330 - val_loss: 23.0373
Epoch 166/200
271/271 [==============================] - 1s 2ms/step - loss: 19.7199 - val_loss: 23.2605
Epoch 167/200
271/271 [==============================] - 1s 2ms/step - loss: 20.5046 - val_loss: 24.3895
Epoch 168/200
271/271 [==============================] - 1s 2ms/step - loss: 20.2475 - val_loss: 23.7202
Epoch 169/200
271/271 [==============================] - 1s 2ms/step - loss: 20.3394 - val_loss: 25.8319
Epoch 170/200
271/271 [==============================] - 1s 2ms/step - loss: 20.7570 - val_loss: 24.1905
Epoch 171/200
271/271 [==============================] - 1s 2ms/step - loss: 19.7351 - val_loss: 24.5459
Epoch 172/200
271/271 [==============================] - 1s 2ms/step - loss: 19.6232 - val_loss: 25.1260
Epoch 173/200
271/271 [==============================] - 1s 2ms/step - loss: 19.4657 - val_loss: 24.6098
Epoch 174/200
271/271 [==============================] - 1s 2ms/step - loss: 19.4516 - val_loss: 23.7137
Epoch 175/200
271/271 [==============================] - 1s 2ms/step - loss: 19.9536 - val_loss: 24.7625
Epoch 176/200
271/271 [==============================] - 1s 2ms/step - loss: 20.5934 - val_loss: 24.1400
Epoch 177/200
271/271 [==============================] - 1s 2ms/step - loss: 19.2723 - val_loss: 24.1578
Epoch 178/200
271/271 [==============================] - 1s 2ms/step - loss: 18.9053 - val_loss: 23.1135
Epoch 179/200
271/271 [==============================] - 1s 2ms/step - loss: 18.6973 - val_loss: 22.2897
Epoch 180/200
271/271 [==============================] - 1s 2ms/step - loss: 18.4351 - val_loss: 23.7637
Epoch 181/200
271/271 [==============================] - 1s 2ms/step - loss: 19.4901 - val_loss: 25.4164
Epoch 182/200
271/271 [==============================] - 1s 2ms/step - loss: 19.5306 - val_loss: 22.9352
Epoch 183/200
271/271 [==============================] - 1s 2ms/step - loss: 18.6304 - val_loss: 22.2590
Epoch 184/200
271/271 [==============================] - 1s 2ms/step - loss: 17.5532 - val_loss: 23.7157
Epoch 185/200
271/271 [==============================] - 1s 2ms/step - loss: 17.5140 - val_loss: 22.3475
Epoch 186/200
271/271 [==============================] - 1s 3ms/step - loss: 18.5302 - val_loss: 23.5306
Epoch 187/200
271/271 [==============================] - 1s 2ms/step - loss: 18.8947 - val_loss: 22.6086
Epoch 188/200
271/271 [==============================] - 1s 2ms/step - loss: 17.2308 - val_loss: 22.4057
Epoch 189/200
271/271 [==============================] - 1s 2ms/step - loss: 18.7233 - val_loss: 23.2331
Epoch 190/200
271/271 [==============================] - 1s 2ms/step - loss: 18.6952 - val_loss: 21.4894
Epoch 191/200
271/271 [==============================] - 1s 2ms/step - loss: 17.2299 - val_loss: 22.7292
Epoch 192/200
271/271 [==============================] - 1s 2ms/step - loss: 18.1840 - val_loss: 21.0965
Epoch 193/200
271/271 [==============================] - 1s 2ms/step - loss: 18.3752 - val_loss: 23.2270
Epoch 194/200
271/271 [==============================] - 1s 2ms/step - loss: 17.4234 - val_loss: 22.1689
Epoch 195/200
271/271 [==============================] - 1s 2ms/step - loss: 17.1990 - val_loss: 21.6774
Epoch 196/200
271/271 [==============================] - 1s 2ms/step - loss: 18.0884 - val_loss: 21.8389
Epoch 197/200
271/271 [==============================] - 1s 2ms/step - loss: 16.6357 - val_loss: 21.4126
Epoch 198/200
271/271 [==============================] - 1s 2ms/step - loss: 19.2526 - val_loss: 24.5718
Epoch 199/200
271/271 [==============================] - 1s 2ms/step - loss: 19.4204 - val_loss: 25.7963
Epoch 200/200
271/271 [==============================] - 1s 2ms/step - loss: 19.6354 - val_loss: 21.8603
Out[12]:
<keras.callbacks.History at 0x294eac8bda0>
In [13]:
# make predictions on the testing data
preds = model.predict([testAttrX, testImagesX])
In [14]:
# compute the difference between the *predicted* house prices and the
# *actual* house prices, then compute the percentage difference and
# the absolute percentage difference
diff = preds.flatten() - testY
percentDiff = (diff / testY) * 100
absPercentDiff = np.abs(percentDiff)
 
# compute the mean and standard deviation of the absolute percentage
# difference
mean = np.mean(absPercentDiff)
std = np.std(absPercentDiff)
 
# finally, show some statistics on our model
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
print("[INFO] avg. house price: {}, std house price: {}".format(
    locale.currency(df["price"].mean(), grouping=True),
    locale.currency(df["price"].std(), grouping=True)))
print("[INFO] mean: {:.2f}%, std: {:.2f}%".format(mean, std))
[INFO] avg. house price: $533,388.27, std house price: $493,403.08
[INFO] mean: 21.86%, std: 18.29%
دوره مقدماتی یادگیری عمیق
علیرضا اخوان پور
پنج شنبه، ۲۵ بهمن ۱۳۹۷
Class.Vision - AkhavanPour.ir - GitHub