Skip to content
This repository has been archived by the owner on May 8, 2024. It is now read-only.

Commit

Permalink
Merge pull request #4 from oneapi-src/main-upstream
Browse files Browse the repository at this point in the history
Main upstream
  • Loading branch information
aagalleg authored May 8, 2024
2 parents 565efd1 + 0cbd7ed commit da2335e
Show file tree
Hide file tree
Showing 5 changed files with 176 additions and 38 deletions.
141 changes: 141 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
.pybuilder/
target/

# Jupyter Notebook
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version

# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock

# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

# pytype static type analyzer
.pytype/

# Cython debug symbols
cython_debug/

data/
output/
.vscode
14 changes: 7 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -168,13 +168,13 @@ conda config --set solver libmamba

| Packages | Version |
| -------- | ------- |
| intelpython3_core | 2024.0.0 |
| python | 3.9.18 |
| intelpython3_core | 2024.0.0 |
| intel-aikit-tensorflow | 2024.0 |
| tqdm | 4.64.0 |
| pip | 23.3 |
| opencv-python | 4.8.0.76 |
| intelpython3_core | 2024.1.0 |
| python | 3.9 |
| intelpython3_core | 2024.1.0 |
| intel-aikit-tensorflow | 2024.1 |
| tqdm | 4.66.2 |
| pip | 24.0 |
| opencv-python | 4.9.0.80 |

The dependencies required to properly execute this workflow can be found in the yml file [$WORKSPACE/env/intel_env.yml](env/intel_env.yml).

Expand Down
12 changes: 6 additions & 6 deletions env/intel_env.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@ channels:
- intel
- conda-forge
dependencies:
- intelpython3_core=2024.0.0
- python=3.9.18
- intel-aikit-tensorflow=2024.0
- tqdm=4.64.0
- pip=23.3
- intelpython3_core=2024.1.0
- python=3.9
- intel-aikit-tensorflow=2024.1
- tqdm=4.66.2
- pip=24.0
- pip:
- opencv-python==4.8.0.76
- opencv-python==4.9.0.80
23 changes: 11 additions & 12 deletions src/intel_neural_compressor/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@
"vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5"

cryptogen = SystemRandom()
class_colors = [(cryptogen.randint(0, 255), cryptogen.randint(
0, 255), cryptogen.randint(0, 255)) for _ in range(5000)]
class_colors = [np.array((cryptogen.randint(0, 255), cryptogen.randint(
0, 255), cryptogen.randint(0, 255))) for _ in range(5000)]


def get_colored_segmentation_image(seg_arr, n_classes, colors=None):
Expand All @@ -58,9 +58,8 @@ def get_colored_segmentation_image(seg_arr, n_classes, colors=None):
seg_img = np.zeros((output_height, output_width, 3))

for c in range(n_classes):
seg_img[:, :, 0] += ((seg_arr[:, :] == c) * (colors[c][0])).astype('uint8')
seg_img[:, :, 1] += ((seg_arr[:, :] == c) * (colors[c][1])).astype('uint8')
seg_img[:, :, 2] += ((seg_arr[:, :] == c) * (colors[c][2])).astype('uint8')
mask = (seg_arr == c)
seg_img[mask] += colors[c].astype('uint8')

return seg_img

Expand Down Expand Up @@ -142,7 +141,7 @@ def get_segmentation_array(image_input, nclasses, width, height, no_reshape=Fals
raise Exception(f"get_segmentation_array: Can't process input type {str(type(image_input))}")

img = cv2.resize(img, (width, height), interpolation=cv2.INTER_NEAREST)
img = img[:, :, 0]
img = img.mean(axis=-1)

for c in range(nclasses):
seg_labels[:, :, c] = (img == c).astype(int)
Expand Down Expand Up @@ -335,7 +334,9 @@ def evaluate(model=None, inp_images=None, annotations=None, inp_images_dir=None,
fn[cl_i] += np.sum((pr != cl_i) * (gt == cl_i))
n_pixels[cl_i] += np.sum(gt == cl_i)

cl_wise_score = tp / (tp + fp + fn + 0.000000000001)
cl_wise_score = tp / (tp + fp + fn)
cl_wise_score = np.nan_to_num(cl_wise_score, nan=0.0)

n_pixels_norm = n_pixels / np.sum(n_pixels)
frequency_weighted_iu = np.sum(cl_wise_score * n_pixels_norm)
mean_iu = np.mean(cl_wise_score)
Expand Down Expand Up @@ -466,11 +467,10 @@ def predict(model=None, inp=None, out_fname=None, checkpoints_path=None, overlay
pr = model.predict(np.array([x]))[0]
pr = pr.reshape((output_height, output_width, n_classes)).argmax(axis=2)

seg_img = visualize_segmentation(pr, inp, n_classes=n_classes, colors=colors,
overlay_img=overlay_img, prediction_width=prediction_width,
prediction_height=prediction_height)

if out_fname is not None:
seg_img = visualize_segmentation(pr, inp, n_classes=n_classes, colors=colors,
overlay_img=overlay_img, prediction_width=prediction_width,
prediction_height=prediction_height)
cv2.imwrite(out_fname, seg_img)

return pr
Expand Down Expand Up @@ -540,7 +540,6 @@ def train_hyperparameters_tuning(model, train_images, train_annotations, batch_s

start_time = time.time()
hist=model.fit_generator(train_gen, steps_per_epoch, epochs=epochs, workers=1, use_multiprocessing=False)
#model.fit_generator(train_gen, steps_per_epoch, epochs=epochs, workers=1, use_multiprocessing=False)
total_time += time.time()-start_time
print("Fit number: ", ctr, " ==> Time Taken for Training in seconds --> ", time.time()-start_time)
if best_config["accuracy"] < hist.history["accuracy"][0]:
Expand Down
24 changes: 11 additions & 13 deletions src/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@
"vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5"

cryptogen = SystemRandom()
class_colors = [(cryptogen.randint(0, 255), cryptogen.randint(
0, 255), cryptogen.randint(0, 255)) for _ in range(5000)]
class_colors = [np.array((cryptogen.randint(0, 255), cryptogen.randint(
0, 255), cryptogen.randint(0, 255))) for _ in range(5000)]


def get_colored_segmentation_image(seg_arr, n_classes, colors=None):
Expand All @@ -56,9 +56,8 @@ def get_colored_segmentation_image(seg_arr, n_classes, colors=None):
seg_img = np.zeros((output_height, output_width, 3))

for c in range(n_classes):
seg_img[:, :, 0] += ((seg_arr[:, :] == c) * (colors[c][0])).astype('uint8')
seg_img[:, :, 1] += ((seg_arr[:, :] == c) * (colors[c][1])).astype('uint8')
seg_img[:, :, 2] += ((seg_arr[:, :] == c) * (colors[c][2])).astype('uint8')
mask = (seg_arr == c)
seg_img[mask] += colors[c].astype('uint8')

return seg_img

Expand Down Expand Up @@ -140,7 +139,7 @@ def get_segmentation_array(image_input, nclasses, width, height, no_reshape=Fals
raise Exception(f"get_segmentation_array: Can't process input type {str(type(image_input))}")

img = cv2.resize(img, (width, height), interpolation=cv2.INTER_NEAREST)
img = img[:, :, 0]
img = img.mean(axis=-1)

for c in range(nclasses):
seg_labels[:, :, c] = (img == c).astype(int)
Expand Down Expand Up @@ -302,7 +301,6 @@ def evaluate(model=None, inp_images=None, annotations=None, inp_images_dir=None,
"output_width": output_width
}, f)

if checkpoints_path is not None:
latest_checkpoint = checkpoints_path # find_latest_checkpoint(checkpoints_path)
if latest_checkpoint is not None:
print("Loading the weights from latest checkpoint ",
Expand Down Expand Up @@ -333,7 +331,9 @@ def evaluate(model=None, inp_images=None, annotations=None, inp_images_dir=None,
fn[cl_i] += np.sum((pr != cl_i) * (gt == cl_i))
n_pixels[cl_i] += np.sum(gt == cl_i)

cl_wise_score = tp / (tp + fp + fn + 0.000000000001)
cl_wise_score = tp / (tp + fp + fn)
cl_wise_score = np.nan_to_num(cl_wise_score, nan=0.0)

n_pixels_norm = n_pixels / np.sum(n_pixels)
frequency_weighted_iu = np.sum(cl_wise_score * n_pixels_norm)
mean_iu = np.mean(cl_wise_score)
Expand Down Expand Up @@ -463,11 +463,10 @@ def predict(model=None, inp=None, out_fname=None, checkpoints_path=None, overlay
pr = model.predict(np.array([x]))[0]
pr = pr.reshape((output_height, output_width, n_classes)).argmax(axis=2)

seg_img = visualize_segmentation(pr, inp, n_classes=n_classes, colors=colors,
overlay_img=overlay_img, prediction_width=prediction_width,
prediction_height=prediction_height)

if out_fname is not None:
seg_img = visualize_segmentation(pr, inp, n_classes=n_classes, colors=colors,
overlay_img=overlay_img, prediction_width=prediction_width,
prediction_height=prediction_height)
cv2.imwrite(out_fname, seg_img)

return pr
Expand Down Expand Up @@ -540,7 +539,6 @@ def train_hyperparameters_tuning(model, train_images, train_annotations, batch_s

start_time = time.time()
hist=model.fit_generator(train_gen, steps_per_epoch, epochs=epochs, workers=1, use_multiprocessing=False)
#model.fit_generator(train_gen, steps_per_epoch, epochs=epochs, workers=1, use_multiprocessing=False)
total_time += time.time()-start_time
print("Fit number: ", ctr, " ==> Time Taken for Training in seconds --> ", time.time()-start_time)
if best_config["accuracy"] < hist.history["accuracy"][0]:
Expand Down

0 comments on commit da2335e

Please sign in to comment.