changeset 1:74fe58ff55a5 draft default tip

planemo upload for repository https://github.com/HMS-IDAC/UnMicst commit e14f76a8803cab0013c6dbe809bc81d7667f2ab9
author goeckslab
date Wed, 07 Sep 2022 23:10:14 +0000
parents 6bec4fef6b2e
children
files UnMicst.py batchUNet2DTMACycif.py batchUNet2DtCycif.py batchUnMicst.py macros.xml models/CytoplasmIncell/checkpoint models/CytoplasmIncell/datasetMean.data models/CytoplasmIncell/datasetStDev.data models/CytoplasmIncell/hp.data models/CytoplasmIncell/model.ckpt.data-00000-of-00001 models/CytoplasmIncell/model.ckpt.index models/CytoplasmIncell/model.ckpt.meta models/CytoplasmIncell2/datasetMean.data models/CytoplasmIncell2/datasetStDev.data models/CytoplasmIncell2/hp.data models/CytoplasmIncell2/model.ckpt.data-00000-of-00001 models/CytoplasmIncell2/model.ckpt.index models/CytoplasmIncell2/model.ckpt.meta models/CytoplasmZeissNikon/checkpoint models/CytoplasmZeissNikon/datasetMean.data models/CytoplasmZeissNikon/datasetStDev.data models/CytoplasmZeissNikon/hp.data models/CytoplasmZeissNikon/model.ckpt.data-00000-of-00001 models/CytoplasmZeissNikon/model.ckpt.index models/CytoplasmZeissNikon/model.ckpt.meta models/mousenucleiDAPI/checkpoint models/mousenucleiDAPI/datasetMean.data models/mousenucleiDAPI/datasetStDev.data models/mousenucleiDAPI/hp.data models/mousenucleiDAPI/model.ckpt.data-00000-of-00001 models/mousenucleiDAPI/model.ckpt.index models/mousenucleiDAPI/model.ckpt.meta models/mousenucleiDAPI/nuclei20x2bin1chan.data-00000-of-00001 models/mousenucleiDAPI/nuclei20x2bin1chan.index models/mousenucleiDAPI/nuclei20x2bin1chan.meta models/nucleiDAPI/checkpoint models/nucleiDAPI/datasetMean.data models/nucleiDAPI/datasetStDev.data models/nucleiDAPI/hp.data models/nucleiDAPI/model.ckpt.data-00000-of-00001 models/nucleiDAPI/model.ckpt.index models/nucleiDAPI/model.ckpt.meta models/nucleiDAPI1-5/checkpoint models/nucleiDAPI1-5/datasetMean.data models/nucleiDAPI1-5/datasetStDev.data models/nucleiDAPI1-5/hp.data models/nucleiDAPI1-5/model.ckpt.index models/nucleiDAPI1-5/model.ckpt.meta models/nucleiDAPILAMIN/checkpoint models/nucleiDAPILAMIN/datasetMean.data models/nucleiDAPILAMIN/datasetStDev.data models/nucleiDAPILAMIN/hp.data models/nucleiDAPILAMIN/model.ckpt.index models/nucleiDAPILAMIN/model.ckpt.meta test-data/105.tif test-data/105_ContoursPM_1.tif test-data/105_NucleiPM_1.tif toolbox/GPUselect.py toolbox/PartitionOfImage.py toolbox/__pycache__/GPUselect.cpython-37.pyc toolbox/__pycache__/PartitionOfImage.cpython-36.pyc toolbox/__pycache__/PartitionOfImage.cpython-37.pyc toolbox/__pycache__/__init__.cpython-36.pyc toolbox/__pycache__/ftools.cpython-36.pyc toolbox/__pycache__/ftools.cpython-37.pyc toolbox/__pycache__/imtools.cpython-36.pyc toolbox/__pycache__/imtools.cpython-37.pyc toolbox/ftools.py toolbox/imtools.py unmicst.xml
diffstat 70 files changed, 69 insertions(+), 3170 deletions(-) [+]
line wrap: on
line diff
--- a/UnMicst.py	Fri Mar 12 00:17:29 2021 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,674 +0,0 @@
-import numpy as np
-from scipy import misc
-import tensorflow.compat.v1 as tf
-import shutil
-import scipy.io as sio
-import os, fnmatch, glob
-import skimage.exposure as sk
-import skimage.io
-import argparse
-import czifile
-from nd2reader import ND2Reader
-import tifffile
-import sys
-tf.disable_v2_behavior()
-#sys.path.insert(0, 'C:\\Users\\Public\\Documents\\ImageScience')
-
-from toolbox.imtools import *
-from toolbox.ftools import *
-from toolbox.PartitionOfImage import PI2D
-from toolbox import GPUselect
-
-def concat3(lst):
-	return tf.concat(lst, 3)
-
-
-class UNet2D:
-	hp = None  # hyper-parameters
-	nn = None  # network
-	tfTraining = None  # if training or not (to handle batch norm)
-	tfData = None  # data placeholder
-	Session = None
-	DatasetMean = 0
-	DatasetStDev = 0
-
-	def setupWithHP(hp):
-		UNet2D.setup(hp['imSize'],
-					 hp['nChannels'],
-					 hp['nClasses'],
-					 hp['nOut0'],
-					 hp['featMapsFact'],
-					 hp['downSampFact'],
-					 hp['ks'],
-					 hp['nExtraConvs'],
-					 hp['stdDev0'],
-					 hp['nLayers'],
-					 hp['batchSize'])
-
-	def setup(imSize, nChannels, nClasses, nOut0, featMapsFact, downSampFact, kernelSize, nExtraConvs, stdDev0,
-			  nDownSampLayers, batchSize):
-		UNet2D.hp = {'imSize': imSize,
-					 'nClasses': nClasses,
-					 'nChannels': nChannels,
-					 'nExtraConvs': nExtraConvs,
-					 'nLayers': nDownSampLayers,
-					 'featMapsFact': featMapsFact,
-					 'downSampFact': downSampFact,
-					 'ks': kernelSize,
-					 'nOut0': nOut0,
-					 'stdDev0': stdDev0,
-					 'batchSize': batchSize}
-
-		nOutX = [UNet2D.hp['nChannels'], UNet2D.hp['nOut0']]
-		dsfX = []
-		for i in range(UNet2D.hp['nLayers']):
-			nOutX.append(nOutX[-1] * UNet2D.hp['featMapsFact'])
-			dsfX.append(UNet2D.hp['downSampFact'])
-
-		# --------------------------------------------------
-		# downsampling layer
-		# --------------------------------------------------
-
-		with tf.name_scope('placeholders'):
-			UNet2D.tfTraining = tf.placeholder(tf.bool, name='training')
-			UNet2D.tfData = tf.placeholder("float", shape=[None, UNet2D.hp['imSize'], UNet2D.hp['imSize'],
-														   UNet2D.hp['nChannels']], name='data')
-
-		def down_samp_layer(data, index):
-			with tf.name_scope('ld%d' % index):
-				ldXWeights1 = tf.Variable(
-					tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index], nOutX[index + 1]],
-										stddev=stdDev0), name='kernel1')
-				ldXWeightsExtra = []
-				for i in range(nExtraConvs):
-					ldXWeightsExtra.append(tf.Variable(
-						tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index + 1], nOutX[index + 1]],
-											stddev=stdDev0), name='kernelExtra%d' % i))
-
-				c00 = tf.nn.conv2d(data, ldXWeights1, strides=[1, 1, 1, 1], padding='SAME')
-				for i in range(nExtraConvs):
-					c00 = tf.nn.conv2d(tf.nn.relu(c00), ldXWeightsExtra[i], strides=[1, 1, 1, 1], padding='SAME')
-
-				ldXWeightsShortcut = tf.Variable(
-					tf.truncated_normal([1, 1, nOutX[index], nOutX[index + 1]], stddev=stdDev0), name='shortcutWeights')
-				shortcut = tf.nn.conv2d(data, ldXWeightsShortcut, strides=[1, 1, 1, 1], padding='SAME')
-
-				bn = tf.layers.batch_normalization(tf.nn.relu(c00 + shortcut), training=UNet2D.tfTraining)
-
-				return tf.nn.max_pool(bn, ksize=[1, dsfX[index], dsfX[index], 1],
-									  strides=[1, dsfX[index], dsfX[index], 1], padding='SAME', name='maxpool')
-
-		# --------------------------------------------------
-		# bottom layer
-		# --------------------------------------------------
-
-		with tf.name_scope('lb'):
-			lbWeights1 = tf.Variable(tf.truncated_normal(
-				[UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[UNet2D.hp['nLayers']], nOutX[UNet2D.hp['nLayers'] + 1]],
-				stddev=stdDev0), name='kernel1')
-
-			def lb(hidden):
-				return tf.nn.relu(tf.nn.conv2d(hidden, lbWeights1, strides=[1, 1, 1, 1], padding='SAME'), name='conv')
-
-		# --------------------------------------------------
-		# downsampling
-		# --------------------------------------------------
-
-		with tf.name_scope('downsampling'):
-			dsX = []
-			dsX.append(UNet2D.tfData)
-
-			for i in range(UNet2D.hp['nLayers']):
-				dsX.append(down_samp_layer(dsX[i], i))
-
-			b = lb(dsX[UNet2D.hp['nLayers']])
-
-		# --------------------------------------------------
-		# upsampling layer
-		# --------------------------------------------------
-
-		def up_samp_layer(data, index):
-			with tf.name_scope('lu%d' % index):
-				luXWeights1 = tf.Variable(
-					tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index + 1], nOutX[index + 2]],
-										stddev=stdDev0), name='kernel1')
-				luXWeights2 = tf.Variable(tf.truncated_normal(
-					[UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index] + nOutX[index + 1], nOutX[index + 1]],
-					stddev=stdDev0), name='kernel2')
-				luXWeightsExtra = []
-				for i in range(nExtraConvs):
-					luXWeightsExtra.append(tf.Variable(
-						tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index + 1], nOutX[index + 1]],
-											stddev=stdDev0), name='kernel2Extra%d' % i))
-
-				outSize = UNet2D.hp['imSize']
-				for i in range(index):
-					outSize /= dsfX[i]
-				outSize = int(outSize)
-
-				outputShape = [UNet2D.hp['batchSize'], outSize, outSize, nOutX[index + 1]]
-				us = tf.nn.relu(
-					tf.nn.conv2d_transpose(data, luXWeights1, outputShape, strides=[1, dsfX[index], dsfX[index], 1],
-										   padding='SAME'), name='conv1')
-				cc = concat3([dsX[index], us])
-				cv = tf.nn.relu(tf.nn.conv2d(cc, luXWeights2, strides=[1, 1, 1, 1], padding='SAME'), name='conv2')
-				for i in range(nExtraConvs):
-					cv = tf.nn.relu(tf.nn.conv2d(cv, luXWeightsExtra[i], strides=[1, 1, 1, 1], padding='SAME'),
-									name='conv2Extra%d' % i)
-				return cv
-
-		# --------------------------------------------------
-		# final (top) layer
-		# --------------------------------------------------
-
-		with tf.name_scope('lt'):
-			ltWeights1 = tf.Variable(tf.truncated_normal([1, 1, nOutX[1], nClasses], stddev=stdDev0), name='kernel')
-
-			def lt(hidden):
-				return tf.nn.conv2d(hidden, ltWeights1, strides=[1, 1, 1, 1], padding='SAME', name='conv')
-
-		# --------------------------------------------------
-		# upsampling
-		# --------------------------------------------------
-
-		with tf.name_scope('upsampling'):
-			usX = []
-			usX.append(b)
-
-			for i in range(UNet2D.hp['nLayers']):
-				usX.append(up_samp_layer(usX[i], UNet2D.hp['nLayers'] - 1 - i))
-
-			t = lt(usX[UNet2D.hp['nLayers']])
-
-		sm = tf.nn.softmax(t, -1)
-		UNet2D.nn = sm
-
-	def train(imPath, logPath, modelPath, pmPath, nTrain, nValid, nTest, restoreVariables, nSteps, gpuIndex,
-			  testPMIndex):
-		os.environ['CUDA_VISIBLE_DEVICES'] = '%d' % gpuIndex
-
-		outLogPath = logPath
-		trainWriterPath = pathjoin(logPath, 'Train')
-		validWriterPath = pathjoin(logPath, 'Valid')
-		outModelPath = pathjoin(modelPath, 'model.ckpt')
-		outPMPath = pmPath
-
-		batchSize = UNet2D.hp['batchSize']
-		imSize = UNet2D.hp['imSize']
-		nChannels = UNet2D.hp['nChannels']
-		nClasses = UNet2D.hp['nClasses']
-
-		# --------------------------------------------------
-		# data
-		# --------------------------------------------------
-
-		Train = np.zeros((nTrain, imSize, imSize, nChannels))
-		Valid = np.zeros((nValid, imSize, imSize, nChannels))
-		Test = np.zeros((nTest, imSize, imSize, nChannels))
-		LTrain = np.zeros((nTrain, imSize, imSize, nClasses))
-		LValid = np.zeros((nValid, imSize, imSize, nClasses))
-		LTest = np.zeros((nTest, imSize, imSize, nClasses))
-
-		print('loading data, computing mean / st dev')
-		if not os.path.exists(modelPath):
-			os.makedirs(modelPath)
-		if restoreVariables:
-			datasetMean = loadData(pathjoin(modelPath, 'datasetMean.data'))
-			datasetStDev = loadData(pathjoin(modelPath, 'datasetStDev.data'))
-		else:
-			datasetMean = 0
-			datasetStDev = 0
-			for iSample in range(nTrain + nValid + nTest):
-				I = im2double(tifread('%s/I%05d_Img.tif' % (imPath, iSample)))
-				datasetMean += np.mean(I)
-				datasetStDev += np.std(I)
-			datasetMean /= (nTrain + nValid + nTest)
-			datasetStDev /= (nTrain + nValid + nTest)
-			saveData(datasetMean, pathjoin(modelPath, 'datasetMean.data'))
-			saveData(datasetStDev, pathjoin(modelPath, 'datasetStDev.data'))
-
-		perm = np.arange(nTrain + nValid + nTest)
-		np.random.shuffle(perm)
-
-		for iSample in range(0, nTrain):
-			path = '%s/I%05d_Img.tif' % (imPath, perm[iSample])
-			im = im2double(tifread(path))
-			Train[iSample, :, :, 0] = (im - datasetMean) / datasetStDev
-			path = '%s/I%05d_Ant.tif' % (imPath, perm[iSample])
-			im = tifread(path)
-			for i in range(nClasses):
-				LTrain[iSample, :, :, i] = (im == i + 1)
-
-		for iSample in range(0, nValid):
-			path = '%s/I%05d_Img.tif' % (imPath, perm[nTrain + iSample])
-			im = im2double(tifread(path))
-			Valid[iSample, :, :, 0] = (im - datasetMean) / datasetStDev
-			path = '%s/I%05d_Ant.tif' % (imPath, perm[nTrain + iSample])
-			im = tifread(path)
-			for i in range(nClasses):
-				LValid[iSample, :, :, i] = (im == i + 1)
-
-		for iSample in range(0, nTest):
-			path = '%s/I%05d_Img.tif' % (imPath, perm[nTrain + nValid + iSample])
-			im = im2double(tifread(path))
-			Test[iSample, :, :, 0] = (im - datasetMean) / datasetStDev
-			path = '%s/I%05d_Ant.tif' % (imPath, perm[nTrain + nValid + iSample])
-			im = tifread(path)
-			for i in range(nClasses):
-				LTest[iSample, :, :, i] = (im == i + 1)
-
-		# --------------------------------------------------
-		# optimization
-		# --------------------------------------------------
-
-		tfLabels = tf.placeholder("float", shape=[None, imSize, imSize, nClasses], name='labels')
-
-		globalStep = tf.Variable(0, trainable=False)
-		learningRate0 = 0.01
-		decaySteps = 1000
-		decayRate = 0.95
-		learningRate = tf.train.exponential_decay(learningRate0, globalStep, decaySteps, decayRate, staircase=True)
-
-		with tf.name_scope('optim'):
-			loss = tf.reduce_mean(-tf.reduce_sum(tf.multiply(tfLabels, tf.log(UNet2D.nn)), 3))
-			updateOps = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
-			# optimizer = tf.train.MomentumOptimizer(1e-3,0.9)
-			optimizer = tf.train.MomentumOptimizer(learningRate, 0.9)
-			# optimizer = tf.train.GradientDescentOptimizer(learningRate)
-			with tf.control_dependencies(updateOps):
-				optOp = optimizer.minimize(loss, global_step=globalStep)
-
-		with tf.name_scope('eval'):
-			error = []
-			for iClass in range(nClasses):
-				labels0 = tf.reshape(tf.to_int32(tf.slice(tfLabels, [0, 0, 0, iClass], [-1, -1, -1, 1])),
-									 [batchSize, imSize, imSize])
-				predict0 = tf.reshape(tf.to_int32(tf.equal(tf.argmax(UNet2D.nn, 3), iClass)),
-									  [batchSize, imSize, imSize])
-				correct = tf.multiply(labels0, predict0)
-				nCorrect0 = tf.reduce_sum(correct)
-				nLabels0 = tf.reduce_sum(labels0)
-				error.append(1 - tf.to_float(nCorrect0) / tf.to_float(nLabels0))
-			errors = tf.tuple(error)
-
-		# --------------------------------------------------
-		# inspection
-		# --------------------------------------------------
-
-		with tf.name_scope('scalars'):
-			tf.summary.scalar('avg_cross_entropy', loss)
-			for iClass in range(nClasses):
-				tf.summary.scalar('avg_pixel_error_%d' % iClass, error[iClass])
-			tf.summary.scalar('learning_rate', learningRate)
-		with tf.name_scope('images'):
-			split0 = tf.slice(UNet2D.nn, [0, 0, 0, 0], [-1, -1, -1, 1])
-			split1 = tf.slice(UNet2D.nn, [0, 0, 0, 1], [-1, -1, -1, 1])
-			if nClasses > 2:
-				split2 = tf.slice(UNet2D.nn, [0, 0, 0, 2], [-1, -1, -1, 1])
-			tf.summary.image('pm0', split0)
-			tf.summary.image('pm1', split1)
-			if nClasses > 2:
-				tf.summary.image('pm2', split2)
-		merged = tf.summary.merge_all()
-
-		# --------------------------------------------------
-		# session
-		# --------------------------------------------------
-
-		saver = tf.train.Saver()
-		sess = tf.Session(config=tf.ConfigProto(
-			allow_soft_placement=True))  # config parameter needed to save variables when using GPU
-
-		if os.path.exists(outLogPath):
-			shutil.rmtree(outLogPath)
-		trainWriter = tf.summary.FileWriter(trainWriterPath, sess.graph)
-		validWriter = tf.summary.FileWriter(validWriterPath, sess.graph)
-
-		if restoreVariables:
-			saver.restore(sess, outModelPath)
-			print("Model restored.")
-		else:
-			sess.run(tf.global_variables_initializer())
-
-		# --------------------------------------------------
-		# train
-		# --------------------------------------------------
-
-		batchData = np.zeros((batchSize, imSize, imSize, nChannels))
-		batchLabels = np.zeros((batchSize, imSize, imSize, nClasses))
-		for i in range(nSteps):
-			# train
-
-			perm = np.arange(nTrain)
-			np.random.shuffle(perm)
-
-			for j in range(batchSize):
-				batchData[j, :, :, :] = Train[perm[j], :, :, :]
-				batchLabels[j, :, :, :] = LTrain[perm[j], :, :, :]
-
-			summary, _ = sess.run([merged, optOp],
-								  feed_dict={UNet2D.tfData: batchData, tfLabels: batchLabels, UNet2D.tfTraining: 1})
-			trainWriter.add_summary(summary, i)
-
-			# validation
-
-			perm = np.arange(nValid)
-			np.random.shuffle(perm)
-
-			for j in range(batchSize):
-				batchData[j, :, :, :] = Valid[perm[j], :, :, :]
-				batchLabels[j, :, :, :] = LValid[perm[j], :, :, :]
-
-			summary, es = sess.run([merged, errors],
-								   feed_dict={UNet2D.tfData: batchData, tfLabels: batchLabels, UNet2D.tfTraining: 0})
-			validWriter.add_summary(summary, i)
-
-			e = np.mean(es)
-			print('step %05d, e: %f' % (i, e))
-
-			if i == 0:
-				if restoreVariables:
-					lowestError = e
-				else:
-					lowestError = np.inf
-
-			if np.mod(i, 100) == 0 and e < lowestError:
-				lowestError = e
-				print("Model saved in file: %s" % saver.save(sess, outModelPath))
-
-		# --------------------------------------------------
-		# test
-		# --------------------------------------------------
-
-		if not os.path.exists(outPMPath):
-			os.makedirs(outPMPath)
-
-		for i in range(nTest):
-			j = np.mod(i, batchSize)
-
-			batchData[j, :, :, :] = Test[i, :, :, :]
-			batchLabels[j, :, :, :] = LTest[i, :, :, :]
-
-			if j == batchSize - 1 or i == nTest - 1:
-
-				output = sess.run(UNet2D.nn,
-								  feed_dict={UNet2D.tfData: batchData, tfLabels: batchLabels, UNet2D.tfTraining: 0})
-
-				for k in range(j + 1):
-					pm = output[k, :, :, testPMIndex]
-					gt = batchLabels[k, :, :, testPMIndex]
-					im = np.sqrt(normalize(batchData[k, :, :, 0]))
-					imwrite(np.uint8(255 * np.concatenate((im, np.concatenate((pm, gt), axis=1)), axis=1)),
-							'%s/I%05d.png' % (outPMPath, i - j + k + 1))
-
-		# --------------------------------------------------
-		# save hyper-parameters, clean-up
-		# --------------------------------------------------
-
-		saveData(UNet2D.hp, pathjoin(modelPath, 'hp.data'))
-
-		trainWriter.close()
-		validWriter.close()
-		sess.close()
-
-	def deploy(imPath, nImages, modelPath, pmPath, gpuIndex, pmIndex):
-		os.environ['CUDA_VISIBLE_DEVICES'] = '%d' % gpuIndex
-
-		variablesPath = pathjoin(modelPath, 'model.ckpt')
-		outPMPath = pmPath
-
-		hp = loadData(pathjoin(modelPath, 'hp.data'))
-		UNet2D.setupWithHP(hp)
-
-		batchSize = UNet2D.hp['batchSize']
-		imSize = UNet2D.hp['imSize']
-		nChannels = UNet2D.hp['nChannels']
-		nClasses = UNet2D.hp['nClasses']
-
-		# --------------------------------------------------
-		# data
-		# --------------------------------------------------
-
-		Data = np.zeros((nImages, imSize, imSize, nChannels))
-
-		datasetMean = loadData(pathjoin(modelPath, 'datasetMean.data'))
-		datasetStDev = loadData(pathjoin(modelPath, 'datasetStDev.data'))
-
-		for iSample in range(0, nImages):
-			path = '%s/I%05d_Img.tif' % (imPath, iSample)
-			im = im2double(tifread(path))
-			Data[iSample, :, :, 0] = (im - datasetMean) / datasetStDev
-
-		# --------------------------------------------------
-		# session
-		# --------------------------------------------------
-
-		saver = tf.train.Saver()
-		sess = tf.Session(config=tf.ConfigProto(
-			allow_soft_placement=True))  # config parameter needed to save variables when using GPU
-
-		saver.restore(sess, variablesPath)
-		print("Model restored.")
-
-		# --------------------------------------------------
-		# deploy
-		# --------------------------------------------------
-
-		batchData = np.zeros((batchSize, imSize, imSize, nChannels))
-
-		if not os.path.exists(outPMPath):
-			os.makedirs(outPMPath)
-
-		for i in range(nImages):
-			print(i, nImages)
-
-			j = np.mod(i, batchSize)
-
-			batchData[j, :, :, :] = Data[i, :, :, :]
-
-			if j == batchSize - 1 or i == nImages - 1:
-
-				output = sess.run(UNet2D.nn, feed_dict={UNet2D.tfData: batchData, UNet2D.tfTraining: 0})
-
-				for k in range(j + 1):
-					pm = output[k, :, :, pmIndex]
-					im = np.sqrt(normalize(batchData[k, :, :, 0]))
-					# imwrite(np.uint8(255*np.concatenate((im,pm),axis=1)),'%s/I%05d.png' % (outPMPath,i-j+k+1))
-					imwrite(np.uint8(255 * im), '%s/I%05d_Im.png' % (outPMPath, i - j + k + 1))
-					imwrite(np.uint8(255 * pm), '%s/I%05d_PM.png' % (outPMPath, i - j + k + 1))
-
-		# --------------------------------------------------
-		# clean-up
-		# --------------------------------------------------
-
-		sess.close()
-
-	def singleImageInferenceSetup(modelPath, gpuIndex,mean,std):
-		variablesPath = pathjoin(modelPath, 'model.ckpt')
-
-		hp = loadData(pathjoin(modelPath, 'hp.data'))
-		UNet2D.setupWithHP(hp)
-		if mean ==-1:
-			UNet2D.DatasetMean = loadData(pathjoin(modelPath, 'datasetMean.data'))
-		else:
-			UNet2D.DatasetMean = mean
-
-		if std == -1:
-			UNet2D.DatasetStDev = loadData(pathjoin(modelPath, 'datasetStDev.data'))
-		else:
-			UNet2D.DatasetStDev = std
-		print(UNet2D.DatasetMean)
-		print(UNet2D.DatasetStDev)
-
-		# --------------------------------------------------
-		# session
-		# --------------------------------------------------
-
-		saver = tf.train.Saver()
-		UNet2D.Session = tf.Session(config=tf.ConfigProto())
-			# allow_soft_placement=True))  # config parameter needed to save variables when using GPU
-
-		saver.restore(UNet2D.Session, variablesPath)
-		print("Model restored.")
-
-	def singleImageInferenceCleanup():
-		UNet2D.Session.close()
-
-	def singleImageInference(image, mode, pmIndex):
-		print('Inference...')
-
-		batchSize = UNet2D.hp['batchSize']
-		imSize = UNet2D.hp['imSize']
-		nChannels = UNet2D.hp['nChannels']
-
-		PI2D.setup(image, imSize, int(imSize / 8), mode)
-		PI2D.createOutput(nChannels)
-
-		batchData = np.zeros((batchSize, imSize, imSize, nChannels))
-		for i in range(PI2D.NumPatches):
-			j = np.mod(i, batchSize)
-			batchData[j, :, :, 0] = (PI2D.getPatch(i) - UNet2D.DatasetMean) / UNet2D.DatasetStDev
-			if j == batchSize - 1 or i == PI2D.NumPatches - 1:
-				output = UNet2D.Session.run(UNet2D.nn, feed_dict={UNet2D.tfData: batchData, UNet2D.tfTraining: 0})
-				for k in range(j + 1):
-					pm = output[k, :, :, pmIndex]
-					PI2D.patchOutput(i - j + k, pm)
-			# PI2D.patchOutput(i-j+k,normalize(imgradmag(PI2D.getPatch(i-j+k),1)))
-
-		return PI2D.getValidOutput()
-
-
-if __name__ == '__main__':
-	parser = argparse.ArgumentParser()
-	parser.add_argument("imagePath", help="path to the .tif file")
-	parser.add_argument("--model",  help="type of model. For example, nuclei vs cytoplasm",default = 'nucleiDAPI')
-	parser.add_argument("--outputPath", help="output path of probability map")
-	parser.add_argument("--channel", help="channel to perform inference on", type=int, default=0)
-	parser.add_argument("--classOrder", help="background, contours, foreground", type = int, nargs = '+', default=-1)
-	parser.add_argument("--mean", help="mean intensity of input image. Use -1 to use model", type=float, default=-1)
-	parser.add_argument("--std", help="mean standard deviation of input image. Use -1 to use model", type=float, default=-1)
-	parser.add_argument("--scalingFactor", help="factor by which to increase/decrease image size by", type=float,
-						default=1)
-	parser.add_argument("--stackOutput", help="save probability maps as separate files", action='store_true')
-	parser.add_argument("--GPU", help="explicitly select GPU", type=int, default = -1)
-	parser.add_argument("--outlier",
-						help="map percentile intensity to max when rescaling intensity values. Max intensity as default",
-						type=float, default=-1)
-	args = parser.parse_args()
-
-	logPath = ''
-	scriptPath = os.path.dirname(os.path.realpath(__file__))
-	modelPath = os.path.join(scriptPath, 'models', args.model)
-	# modelPath = os.path.join(scriptPath, 'models/cytoplasmINcell')
-	# modelPath = os.path.join(scriptPath, 'cytoplasmZeissNikon')
-	pmPath = ''
-
-	if os.system('nvidia-smi') == 0:
-		if args.GPU == -1:
-			print("automatically choosing GPU")
-			GPU = GPUselect.pick_gpu_lowest_memory()
-		else:
-			GPU = args.GPU
-		print('Using GPU ' + str(GPU))
-
-	else:
-		if sys.platform == 'win32':  # only 1 gpu on windows
-			if args.GPU==-1:
-				GPU = 0
-			else:
-				GPU = args.GPU
-			print('Using GPU ' + str(GPU))
-		else:
-			GPU=0
-			print('Using CPU')
-	os.environ['CUDA_VISIBLE_DEVICES'] = '%d' % GPU
-	UNet2D.singleImageInferenceSetup(modelPath, GPU,args.mean,args.std)
-	nClass = UNet2D.hp['nClasses']
-	imagePath = args.imagePath
-	dapiChannel = args.channel
-	dsFactor = args.scalingFactor
-	parentFolder = os.path.dirname(os.path.dirname(imagePath))
-	fileName = os.path.basename(imagePath)
-	fileNamePrefix = fileName.split(os.extsep, 1)
-	print(fileName)
-	fileType = fileNamePrefix[1]
-
-	if fileType=='ome.tif' or fileType == 'btf' :
-		I = skio.imread(imagePath, img_num=dapiChannel,plugin='tifffile')
-	elif fileType == 'tif' :
-		I = tifffile.imread(imagePath, key=dapiChannel)
-	elif fileType == 'czi':
-		with czifile.CziFile(imagePath) as czi:
-			image = czi.asarray()
-			I = image[0, 0, dapiChannel, 0, 0, :, :, 0]
-	elif fileType == 'nd2':
-		with ND2Reader(imagePath) as fullStack:
-			I = fullStack[dapiChannel]
-
-	if args.classOrder == -1:
-		args.classOrder = range(nClass)
-
-	rawI = I
-	print(type(I))
-	hsize = int((float(I.shape[0]) * float(dsFactor)))
-	vsize = int((float(I.shape[1]) * float(dsFactor)))
-	I = resize(I, (hsize, vsize))
-	if args.outlier == -1:
-		maxLimit = np.max(I)
-	else:
-		maxLimit = np.percentile(I, args.outlier)
-	I = im2double(sk.rescale_intensity(I, in_range=(np.min(I), maxLimit), out_range=(0, 0.983)))
-	rawI = im2double(rawI) / np.max(im2double(rawI))
-	if not args.outputPath:
-		args.outputPath = parentFolder + '//probability_maps'
-
-	if not os.path.exists(args.outputPath):
-		os.makedirs(args.outputPath)
-
-	append_kwargs = {
-		'bigtiff': True,
-		'metadata': None,
-		'append': True,
-	}
-	save_kwargs = {
-		'bigtiff': True,
-		'metadata': None,
-		'append': False,
-	}
-	if args.stackOutput:
-		slice=0
-		for iClass in args.classOrder[::-1]:
-			PM = np.uint8(255*UNet2D.singleImageInference(I, 'accumulate', iClass)) # backwards in order to align with ilastik...
-			PM = resize(PM, (rawI.shape[0], rawI.shape[1]))
-			if slice==0:
-				skimage.io.imsave(args.outputPath + '//' + fileNamePrefix[0] + '_Probabilities_' + str(dapiChannel) + '.tif', np.uint8(255 * PM),**save_kwargs)
-			else:
-				skimage.io.imsave(args.outputPath + '//' + fileNamePrefix[0] + '_Probabilities_' + str(dapiChannel) + '.tif',np.uint8(255 * PM),**append_kwargs)
-			if slice==1:
-				save_kwargs['append'] = False
-				skimage.io.imsave(args.outputPath + '//' + fileNamePrefix[0] + '_Preview_' + str(dapiChannel) + '.tif',	np.uint8(255 * PM), **save_kwargs)
-				skimage.io.imsave(args.outputPath + '//' + fileNamePrefix[0] + '_Preview_' + str(dapiChannel) + '.tif', np.uint8(255 * rawI), **append_kwargs)
-			slice = slice + 1
-
-	else:
-		contours = np.uint8(255*UNet2D.singleImageInference(I, 'accumulate', args.classOrder[1]))
-		hsize = int((float(I.shape[0]) * float(1 / dsFactor)))
-		vsize = int((float(I.shape[1]) * float(1 / dsFactor)))
-		contours = resize(contours, (rawI.shape[0], rawI.shape[1]))
-		skimage.io.imsave(args.outputPath + '//' + fileNamePrefix[0] + '_ContoursPM_' + str(dapiChannel) + '.tif',np.uint8(255 * contours),**save_kwargs)
-		skimage.io.imsave(args.outputPath + '//' + fileNamePrefix[0] + '_ContoursPM_' + str(dapiChannel) + '.tif',np.uint8(255 * rawI), **append_kwargs)
-		del contours
-		nuclei = np.uint8(255*UNet2D.singleImageInference(I, 'accumulate', args.classOrder[2]))
-		nuclei = resize(nuclei, (rawI.shape[0], rawI.shape[1]))
-		skimage.io.imsave(args.outputPath + '//' + fileNamePrefix[0] + '_NucleiPM_' + str(dapiChannel) + '.tif',np.uint8(255 * nuclei), **save_kwargs)
-		del nuclei
-	UNet2D.singleImageInferenceCleanup()
-
-#aligned output files to reflect ilastik
-#outputting all classes as single file
-#handles multiple formats including tif, ome.tif, nd2, czi
-#selectable models (human nuclei, mouse nuclei, cytoplasm)
-
-#added legacy function to save output files
-#append save function to reduce memory footprint
-#added --classOrder parameter to specify which class is background, contours, and nuclei respectively
\ No newline at end of file
--- a/batchUNet2DTMACycif.py	Fri Mar 12 00:17:29 2021 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,594 +0,0 @@
-import numpy as np
-from scipy import misc
-import tensorflow as tf
-import shutil
-import scipy.io as sio
-import os,fnmatch,PIL,glob
-import skimage.exposure as sk
-
-import sys
-sys.path.insert(0, 'C:\\Users\\Public\\Documents\\ImageScience')
-from toolbox.imtools import *
-from toolbox.ftools import *
-from toolbox.PartitionOfImage import PI2D
-
-
-def concat3(lst):
-		return tf.concat(lst,3)
-
-class UNet2D:
-	hp = None # hyper-parameters
-	nn = None # network
-	tfTraining = None # if training or not (to handle batch norm)
-	tfData = None # data placeholder
-	Session = None
-	DatasetMean = 0
-	DatasetStDev = 0
-
-	def setupWithHP(hp):
-		UNet2D.setup(hp['imSize'],
-					 hp['nChannels'],
-					 hp['nClasses'],
-					 hp['nOut0'],
-					 hp['featMapsFact'],
-					 hp['downSampFact'],
-					 hp['ks'],
-					 hp['nExtraConvs'],
-					 hp['stdDev0'],
-					 hp['nLayers'],
-					 hp['batchSize'])
-
-	def setup(imSize,nChannels,nClasses,nOut0,featMapsFact,downSampFact,kernelSize,nExtraConvs,stdDev0,nDownSampLayers,batchSize):
-		UNet2D.hp = {'imSize':imSize,
-					 'nClasses':nClasses,
-					 'nChannels':nChannels,
-					 'nExtraConvs':nExtraConvs,
-					 'nLayers':nDownSampLayers,
-					 'featMapsFact':featMapsFact,
-					 'downSampFact':downSampFact,
-					 'ks':kernelSize,
-					 'nOut0':nOut0,
-					 'stdDev0':stdDev0,
-					 'batchSize':batchSize}
-
-		nOutX = [UNet2D.hp['nChannels'],UNet2D.hp['nOut0']]
-		dsfX = []
-		for i in range(UNet2D.hp['nLayers']):
-			nOutX.append(nOutX[-1]*UNet2D.hp['featMapsFact'])
-			dsfX.append(UNet2D.hp['downSampFact'])
-
-
-		# --------------------------------------------------
-		# downsampling layer
-		# --------------------------------------------------
-
-		with tf.name_scope('placeholders'):
-			UNet2D.tfTraining = tf.placeholder(tf.bool, name='training')
-			UNet2D.tfData = tf.placeholder("float", shape=[None,UNet2D.hp['imSize'],UNet2D.hp['imSize'],UNet2D.hp['nChannels']],name='data')
-
-		def down_samp_layer(data,index):
-			with tf.name_scope('ld%d' % index):
-				ldXWeights1 = tf.Variable(tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index], nOutX[index+1]], stddev=stdDev0),name='kernel1')
-				ldXWeightsExtra = []
-				for i in range(nExtraConvs):
-					ldXWeightsExtra.append(tf.Variable(tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index+1], nOutX[index+1]], stddev=stdDev0),name='kernelExtra%d' % i))
-				
-				c00 = tf.nn.conv2d(data, ldXWeights1, strides=[1, 1, 1, 1], padding='SAME')
-				for i in range(nExtraConvs):
-					c00 = tf.nn.conv2d(tf.nn.relu(c00), ldXWeightsExtra[i], strides=[1, 1, 1, 1], padding='SAME')
-
-				ldXWeightsShortcut = tf.Variable(tf.truncated_normal([1, 1, nOutX[index], nOutX[index+1]], stddev=stdDev0),name='shortcutWeights')
-				shortcut = tf.nn.conv2d(data, ldXWeightsShortcut, strides=[1, 1, 1, 1], padding='SAME')
-
-				bn = tf.layers.batch_normalization(tf.nn.relu(c00+shortcut), training=UNet2D.tfTraining)
-
-				return tf.nn.max_pool(bn, ksize=[1, dsfX[index], dsfX[index], 1], strides=[1, dsfX[index], dsfX[index], 1], padding='SAME',name='maxpool')
-
-		# --------------------------------------------------
-		# bottom layer
-		# --------------------------------------------------
-
-		with tf.name_scope('lb'):
-			lbWeights1 = tf.Variable(tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[UNet2D.hp['nLayers']], nOutX[UNet2D.hp['nLayers']+1]], stddev=stdDev0),name='kernel1')
-			def lb(hidden):
-				return tf.nn.relu(tf.nn.conv2d(hidden, lbWeights1, strides=[1, 1, 1, 1], padding='SAME'),name='conv')
-
-		# --------------------------------------------------
-		# downsampling
-		# --------------------------------------------------
-
-		with tf.name_scope('downsampling'):    
-			dsX = []
-			dsX.append(UNet2D.tfData)
-
-			for i in range(UNet2D.hp['nLayers']):
-				dsX.append(down_samp_layer(dsX[i],i))
-
-			b = lb(dsX[UNet2D.hp['nLayers']])
-
-		# --------------------------------------------------
-		# upsampling layer
-		# --------------------------------------------------
-
-		def up_samp_layer(data,index):
-			with tf.name_scope('lu%d' % index):
-				luXWeights1    = tf.Variable(tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index+1], nOutX[index+2]], stddev=stdDev0),name='kernel1')
-				luXWeights2    = tf.Variable(tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index]+nOutX[index+1], nOutX[index+1]], stddev=stdDev0),name='kernel2')
-				luXWeightsExtra = []
-				for i in range(nExtraConvs):
-					luXWeightsExtra.append(tf.Variable(tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index+1], nOutX[index+1]], stddev=stdDev0),name='kernel2Extra%d' % i))
-				
-				outSize = UNet2D.hp['imSize']
-				for i in range(index):
-					outSize /= dsfX[i]
-				outSize = int(outSize)
-
-				outputShape = [UNet2D.hp['batchSize'],outSize,outSize,nOutX[index+1]]
-				us = tf.nn.relu(tf.nn.conv2d_transpose(data, luXWeights1, outputShape, strides=[1, dsfX[index], dsfX[index], 1], padding='SAME'),name='conv1')
-				cc = concat3([dsX[index],us]) 
-				cv = tf.nn.relu(tf.nn.conv2d(cc, luXWeights2, strides=[1, 1, 1, 1], padding='SAME'),name='conv2')
-				for i in range(nExtraConvs):
-					cv = tf.nn.relu(tf.nn.conv2d(cv, luXWeightsExtra[i], strides=[1, 1, 1, 1], padding='SAME'),name='conv2Extra%d' % i)
-				return cv
-
-		# --------------------------------------------------
-		# final (top) layer
-		# --------------------------------------------------
-
-		with tf.name_scope('lt'):
-			ltWeights1    = tf.Variable(tf.truncated_normal([1, 1, nOutX[1], nClasses], stddev=stdDev0),name='kernel')
-			def lt(hidden):
-				return tf.nn.conv2d(hidden, ltWeights1, strides=[1, 1, 1, 1], padding='SAME',name='conv')
-
-
-		# --------------------------------------------------
-		# upsampling
-		# --------------------------------------------------
-
-		with tf.name_scope('upsampling'):
-			usX = []
-			usX.append(b)
-
-			for i in range(UNet2D.hp['nLayers']):
-				usX.append(up_samp_layer(usX[i],UNet2D.hp['nLayers']-1-i))
-
-			t = lt(usX[UNet2D.hp['nLayers']])
-
-
-		sm = tf.nn.softmax(t,-1)
-		UNet2D.nn = sm
-
-
-	def train(imPath,logPath,modelPath,pmPath,nTrain,nValid,nTest,restoreVariables,nSteps,gpuIndex,testPMIndex):
-		os.environ['CUDA_VISIBLE_DEVICES']= '%d' % gpuIndex
-
-		outLogPath = logPath
-		trainWriterPath = pathjoin(logPath,'Train')
-		validWriterPath = pathjoin(logPath,'Valid')
-		outModelPath = pathjoin(modelPath,'model.ckpt')
-		outPMPath = pmPath
-		
-		batchSize = UNet2D.hp['batchSize']
-		imSize = UNet2D.hp['imSize']
-		nChannels = UNet2D.hp['nChannels']
-		nClasses = UNet2D.hp['nClasses']
-
-		# --------------------------------------------------
-		# data
-		# --------------------------------------------------
-
-		Train = np.zeros((nTrain,imSize,imSize,nChannels))
-		Valid = np.zeros((nValid,imSize,imSize,nChannels))
-		Test = np.zeros((nTest,imSize,imSize,nChannels))
-		LTrain = np.zeros((nTrain,imSize,imSize,nClasses))
-		LValid = np.zeros((nValid,imSize,imSize,nClasses))
-		LTest = np.zeros((nTest,imSize,imSize,nClasses))
-
-		print('loading data, computing mean / st dev')
-		if not os.path.exists(modelPath):
-			os.makedirs(modelPath)
-		if restoreVariables:
-			datasetMean = loadData(pathjoin(modelPath,'datasetMean.data'))
-			datasetStDev = loadData(pathjoin(modelPath,'datasetStDev.data'))
-		else:
-			datasetMean = 0
-			datasetStDev = 0
-			for iSample in range(nTrain+nValid+nTest):
-				I = im2double(tifread('%s/I%05d_Img.tif' % (imPath,iSample)))
-				datasetMean += np.mean(I)
-				datasetStDev += np.std(I)
-			datasetMean /= (nTrain+nValid+nTest)
-			datasetStDev /= (nTrain+nValid+nTest)
-			saveData(datasetMean, pathjoin(modelPath,'datasetMean.data'))
-			saveData(datasetStDev, pathjoin(modelPath,'datasetStDev.data'))
-
-		perm = np.arange(nTrain+nValid+nTest)
-		np.random.shuffle(perm)
-
-		for iSample in range(0, nTrain):
-			path = '%s/I%05d_Img.tif' % (imPath,perm[iSample])
-			im = im2double(tifread(path))
-			Train[iSample,:,:,0] = (im-datasetMean)/datasetStDev
-			path = '%s/I%05d_Ant.tif' % (imPath,perm[iSample])
-			im = tifread(path)
-			for i in range(nClasses):
-				LTrain[iSample,:,:,i] = (im == i+1)
-
-		for iSample in range(0, nValid):
-			path = '%s/I%05d_Img.tif' % (imPath,perm[nTrain+iSample])
-			im = im2double(tifread(path))
-			Valid[iSample,:,:,0] = (im-datasetMean)/datasetStDev
-			path = '%s/I%05d_Ant.tif' % (imPath,perm[nTrain+iSample])
-			im = tifread(path)
-			for i in range(nClasses):
-				LValid[iSample,:,:,i] = (im == i+1)
-
-		for iSample in range(0, nTest):
-			path = '%s/I%05d_Img.tif' % (imPath,perm[nTrain+nValid+iSample])
-			im = im2double(tifread(path))
-			Test[iSample,:,:,0] = (im-datasetMean)/datasetStDev
-			path = '%s/I%05d_Ant.tif' % (imPath,perm[nTrain+nValid+iSample])
-			im = tifread(path)
-			for i in range(nClasses):
-				LTest[iSample,:,:,i] = (im == i+1)
-
-		# --------------------------------------------------
-		# optimization
-		# --------------------------------------------------
-
-		tfLabels = tf.placeholder("float", shape=[None,imSize,imSize,nClasses],name='labels')
-
-		globalStep = tf.Variable(0,trainable=False)
-		learningRate0 = 0.01
-		decaySteps = 1000
-		decayRate = 0.95
-		learningRate = tf.train.exponential_decay(learningRate0,globalStep,decaySteps,decayRate,staircase=True)
-
-		with tf.name_scope('optim'):
-			loss = tf.reduce_mean(-tf.reduce_sum(tf.multiply(tfLabels,tf.log(UNet2D.nn)),3))
-			updateOps = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
-			# optimizer = tf.train.MomentumOptimizer(1e-3,0.9)
-			optimizer = tf.train.MomentumOptimizer(learningRate,0.9)
-			# optimizer = tf.train.GradientDescentOptimizer(learningRate)
-			with tf.control_dependencies(updateOps):
-				optOp = optimizer.minimize(loss,global_step=globalStep)
-
-		with tf.name_scope('eval'):
-			error = []
-			for iClass in range(nClasses):
-				labels0 = tf.reshape(tf.to_int32(tf.slice(tfLabels,[0,0,0,iClass],[-1,-1,-1,1])),[batchSize,imSize,imSize])
-				predict0 = tf.reshape(tf.to_int32(tf.equal(tf.argmax(UNet2D.nn,3),iClass)),[batchSize,imSize,imSize])
-				correct = tf.multiply(labels0,predict0)
-				nCorrect0 = tf.reduce_sum(correct)
-				nLabels0 = tf.reduce_sum(labels0)
-				error.append(1-tf.to_float(nCorrect0)/tf.to_float(nLabels0))
-			errors = tf.tuple(error)
-
-		# --------------------------------------------------
-		# inspection
-		# --------------------------------------------------
-
-		with tf.name_scope('scalars'):
-			tf.summary.scalar('avg_cross_entropy', loss)
-			for iClass in range(nClasses):
-				tf.summary.scalar('avg_pixel_error_%d' % iClass, error[iClass])
-			tf.summary.scalar('learning_rate', learningRate)
-		with tf.name_scope('images'):
-			split0 = tf.slice(UNet2D.nn,[0,0,0,0],[-1,-1,-1,1])
-			split1 = tf.slice(UNet2D.nn,[0,0,0,1],[-1,-1,-1,1])
-			if nClasses > 2:
-				split2 = tf.slice(UNet2D.nn,[0,0,0,2],[-1,-1,-1,1])
-			tf.summary.image('pm0',split0)
-			tf.summary.image('pm1',split1)
-			if nClasses > 2:
-				tf.summary.image('pm2',split2)
-		merged = tf.summary.merge_all()
-
-
-		# --------------------------------------------------
-		# session
-		# --------------------------------------------------
-
-		saver = tf.train.Saver()
-		sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # config parameter needed to save variables when using GPU
-
-		if os.path.exists(outLogPath):
-			shutil.rmtree(outLogPath)
-		trainWriter = tf.summary.FileWriter(trainWriterPath, sess.graph)
-		validWriter = tf.summary.FileWriter(validWriterPath, sess.graph)
-
-		if restoreVariables:
-			saver.restore(sess, outModelPath)
-			print("Model restored.")
-		else:
-			sess.run(tf.global_variables_initializer())
-
-		# --------------------------------------------------
-		# train
-		# --------------------------------------------------
-
-		batchData = np.zeros((batchSize,imSize,imSize,nChannels))
-		batchLabels = np.zeros((batchSize,imSize,imSize,nClasses))
-		for i in range(nSteps):
-			# train
-
-			perm = np.arange(nTrain)
-			np.random.shuffle(perm)
-
-			for j in range(batchSize):
-				batchData[j,:,:,:] = Train[perm[j],:,:,:]
-				batchLabels[j,:,:,:] = LTrain[perm[j],:,:,:]
-
-			summary,_ = sess.run([merged,optOp],feed_dict={UNet2D.tfData: batchData, tfLabels: batchLabels, UNet2D.tfTraining: 1})
-			trainWriter.add_summary(summary, i)
-
-			# validation
-
-			perm = np.arange(nValid)
-			np.random.shuffle(perm)
-
-			for j in range(batchSize):
-				batchData[j,:,:,:] = Valid[perm[j],:,:,:]
-				batchLabels[j,:,:,:] = LValid[perm[j],:,:,:]
-
-			summary, es = sess.run([merged, errors],feed_dict={UNet2D.tfData: batchData, tfLabels: batchLabels, UNet2D.tfTraining: 0})
-			validWriter.add_summary(summary, i)
-
-			e = np.mean(es)
-			print('step %05d, e: %f' % (i,e))
-
-			if i == 0:
-				if restoreVariables:
-					lowestError = e
-				else:
-					lowestError = np.inf
-
-			if np.mod(i,100) == 0 and e < lowestError:
-				lowestError = e
-				print("Model saved in file: %s" % saver.save(sess, outModelPath))
-
-
-		# --------------------------------------------------
-		# test
-		# --------------------------------------------------
-
-		if not os.path.exists(outPMPath):
-			os.makedirs(outPMPath)
-
-		for i in range(nTest):
-			j = np.mod(i,batchSize)
-
-			batchData[j,:,:,:] = Test[i,:,:,:]
-			batchLabels[j,:,:,:] = LTest[i,:,:,:]
-		 
-			if j == batchSize-1 or i == nTest-1:
-
-				output = sess.run(UNet2D.nn,feed_dict={UNet2D.tfData: batchData, tfLabels: batchLabels, UNet2D.tfTraining: 0})
-
-				for k in range(j+1):
-					pm = output[k,:,:,testPMIndex]
-					gt = batchLabels[k,:,:,testPMIndex]
-					im = np.sqrt(normalize(batchData[k,:,:,0]))
-					imwrite(np.uint8(255*np.concatenate((im,np.concatenate((pm,gt),axis=1)),axis=1)),'%s/I%05d.png' % (outPMPath,i-j+k+1))
-
-
-		# --------------------------------------------------
-		# save hyper-parameters, clean-up
-		# --------------------------------------------------
-
-		saveData(UNet2D.hp,pathjoin(modelPath,'hp.data'))
-
-		trainWriter.close()
-		validWriter.close()
-		sess.close()
-
-	def deploy(imPath,nImages,modelPath,pmPath,gpuIndex,pmIndex):
-		os.environ['CUDA_VISIBLE_DEVICES']= '%d' % gpuIndex
-
-		variablesPath = pathjoin(modelPath,'model.ckpt')
-		outPMPath = pmPath
-
-		hp = loadData(pathjoin(modelPath,'hp.data'))
-		UNet2D.setupWithHP(hp)
-		
-		batchSize = UNet2D.hp['batchSize']
-		imSize = UNet2D.hp['imSize']
-		nChannels = UNet2D.hp['nChannels']
-		nClasses = UNet2D.hp['nClasses']
-
-		# --------------------------------------------------
-		# data
-		# --------------------------------------------------
-
-		Data = np.zeros((nImages,imSize,imSize,nChannels))
-
-		datasetMean = loadData(pathjoin(modelPath,'datasetMean.data'))
-		datasetStDev = loadData(pathjoin(modelPath,'datasetStDev.data'))
-
-		for iSample in range(0, nImages):
-			path = '%s/I%05d_Img.tif' % (imPath,iSample)
-			im = im2double(tifread(path))
-			Data[iSample,:,:,0] = (im-datasetMean)/datasetStDev
-
-		# --------------------------------------------------
-		# session
-		# --------------------------------------------------
-
-		saver = tf.train.Saver()
-		sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # config parameter needed to save variables when using GPU
-
-		saver.restore(sess, variablesPath)
-		print("Model restored.")
-
-		# --------------------------------------------------
-		# deploy
-		# --------------------------------------------------
-
-		batchData = np.zeros((batchSize,imSize,imSize,nChannels))
-
-		if not os.path.exists(outPMPath):
-			os.makedirs(outPMPath)
-
-		for i in range(nImages):
-			print(i,nImages)
-
-			j = np.mod(i,batchSize)
-
-			batchData[j,:,:,:] = Data[i,:,:,:]
-		 
-			if j == batchSize-1 or i == nImages-1:
-
-				output = sess.run(UNet2D.nn,feed_dict={UNet2D.tfData: batchData, UNet2D.tfTraining: 0})
-
-				for k in range(j+1):
-					pm = output[k,:,:,pmIndex]
-					im = np.sqrt(normalize(batchData[k,:,:,0]))
-					# imwrite(np.uint8(255*np.concatenate((im,pm),axis=1)),'%s/I%05d.png' % (outPMPath,i-j+k+1))
-					imwrite(np.uint8(255*im),'%s/I%05d_Im.png' % (outPMPath,i-j+k+1))
-					imwrite(np.uint8(255*pm),'%s/I%05d_PM.png' % (outPMPath,i-j+k+1))
-
-
-		# --------------------------------------------------
-		# clean-up
-		# --------------------------------------------------
-
-		sess.close()
-
-	def singleImageInferenceSetup(modelPath,gpuIndex):
-		os.environ['CUDA_VISIBLE_DEVICES']= '%d' % gpuIndex
-
-		variablesPath = pathjoin(modelPath,'model.ckpt')
-
-		hp = loadData(pathjoin(modelPath,'hp.data'))
-		UNet2D.setupWithHP(hp)
-
-		UNet2D.DatasetMean = loadData(pathjoin(modelPath,'datasetMean.data'))
-		UNet2D.DatasetStDev = loadData(pathjoin(modelPath,'datasetStDev.data'))
-		print(UNet2D.DatasetMean)
-		print(UNet2D.DatasetStDev)
-
-		# --------------------------------------------------
-		# session
-		# --------------------------------------------------
-
-		saver = tf.train.Saver()
-		UNet2D.Session = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # config parameter needed to save variables when using GPU
-
-		saver.restore(UNet2D.Session, variablesPath)
-		print("Model restored.")
-
-	def singleImageInferenceCleanup():
-		UNet2D.Session.close()
-
-	def singleImageInference(image,mode,pmIndex):
-		print('Inference...')
-
-		batchSize = UNet2D.hp['batchSize']
-		imSize = UNet2D.hp['imSize']
-		nChannels = UNet2D.hp['nChannels']
-
-		PI2D.setup(image,imSize,int(imSize/8),mode)
-		PI2D.createOutput(nChannels)
-
-		batchData = np.zeros((batchSize,imSize,imSize,nChannels))
-		for i in range(PI2D.NumPatches):
-			j = np.mod(i,batchSize)
-			batchData[j,:,:,0] = (PI2D.getPatch(i)-UNet2D.DatasetMean)/UNet2D.DatasetStDev
-			if j == batchSize-1 or i == PI2D.NumPatches-1:
-				output = UNet2D.Session.run(UNet2D.nn,feed_dict={UNet2D.tfData: batchData, UNet2D.tfTraining: 0})
-				for k in range(j+1):
-					pm = output[k,:,:,pmIndex]
-					PI2D.patchOutput(i-j+k,pm)
-					# PI2D.patchOutput(i-j+k,normalize(imgradmag(PI2D.getPatch(i-j+k),1)))
-
-		return PI2D.getValidOutput()
-
-
-if __name__ == '__main__':
-	logPath = 'D:\\LSP\\Sinem\\fromOlympus\\TFLogsssssssss'
-	modelPath = 'D:\\LSP\\UNet\\tonsil20x1bin1chan\\TFModel - 3class 16 kernels 5ks 2 layers'
-	pmPath = 'D:\\LSP\\Sinem\\fromOlympus\\TFProbMaps'
-
-	
-	# ----- test 1 -----
-
-	# imPath = 'D:\\LSP\\Sinem\\trainingSetContours\\trainingSetSmallLarge'
-	# # UNet2D.setup(128,1,2,8,2,2,3,1,0.1,2,8)
-	# # UNet2D.train(imPath,logPath,modelPath,pmPath,500,100,40,True,20000,1,0)
-	# UNet2D.setup(128, 1, 2, 12, 2, 2, 3, 4, 0.1, 4, 8)
-	# UNet2D.train(imPath, logPath, modelPath, pmPath, 1600, 400, 500, False, 150000, 1, 0)
-	# UNet2D.deploy(imPath,100,modelPath,pmPath,1,0)
-
-	# I = im2double(tifread('/home/mc457/files/CellBiology/IDAC/Marcelo/Etc/UNetTestSets/SinemSaka_NucleiSegmentation_SingleImageInferenceTest3.tif'))
-	# UNet2D.singleImageInferenceSetup(modelPath,0)
-	# J = UNet2D.singleImageInference(I,'accumulate',0)
-	# UNet2D.singleImageInferenceCleanup()
-	# # imshowlist([I,J])
-	# # sys.exit(0)
-	# # tifwrite(np.uint8(255*I),'/home/mc457/Workspace/I1.tif')
-	# # tifwrite(np.uint8(255*J),'/home/mc457/Workspace/I2.tif')
-	# K = np.zeros((2,I.shape[0],I.shape[1]))
-	# K[0,:,:] = I
-	# K[1,:,:] = J
-	# tifwrite(np.uint8(255*K),'/home/mc457/Workspace/Sinem_NucSeg.tif')
-
-	UNet2D.singleImageInferenceSetup(modelPath, 1)
-	imagePath ='Y:/sorger/data/RareCyte/Clarence/NKI_TMA'
-	sampleList = glob.glob(imagePath + '/ZTMA_18_810*')
-	dapiChannel = 0
-	for iSample in sampleList:
-		# fileList = glob.glob(iSample + '//dearray//*.tif')
-		fileList = [x for x in glob.glob(iSample + '/dearray/*.tif') if x != (iSample+'/dearray\\TMA_MAP.tif')]
-		print(fileList)
-		for iFile in fileList:
-			fileName = os.path.basename(iFile)
-			fileNamePrefix = fileName.split(os.extsep, 1)
-			I = tifffile.imread(iFile, key=dapiChannel)
-			I = im2double(sk.rescale_intensity(I, in_range=(np.min(I), np.max(I)), out_range=(0, 64424)))
-			# I=np.moveaxis(I,0,-1)
-			# I=I[:,:,0]
-			hsize = int((float(I.shape[0])*float(1)))
-			vsize = int((float(I.shape[1])*float(1)))
-			I = resize(I,(hsize,vsize))
-				#I = im2double(tifread('D:\\LSP\\cycif\\Unet\\Caitlin\\E - 04(fld 8 wv UV - DAPI)downsampled.tif'))
-			outputPath = iSample + '//prob_maps'
-			if not os.path.exists(outputPath):
-				os.makedirs(outputPath)
-			K = np.zeros((2,I.shape[0],I.shape[1]))
-			contours = UNet2D.singleImageInference(I,'accumulate',1)
-			K[1,:,:] = I
-			K[0,:,:] = contours
-			tifwrite(np.uint8(255 * K),
-					 outputPath + '//' + fileNamePrefix[0] + '_ContoursPM_' + str(dapiChannel + 1) + '.tif')
-			del K
-			K = np.zeros((1, I.shape[0], I.shape[1]))
-			nuclei = UNet2D.singleImageInference(I,'accumulate',2)
-			K[0, :, :] = nuclei
-			tifwrite(np.uint8(255 * K),
-					 outputPath + '//' + fileNamePrefix[0] + '_NucleiPM_' + str(dapiChannel + 1) + '.tif')
-			del K
-	UNet2D.singleImageInferenceCleanup()
-
-
-	# ----- test 2 -----
-
-	# imPath = '/home/mc457/files/CellBiology/IDAC/Marcelo/Etc/UNetTestSets/ClarenceYapp_NucleiSegmentation'
-	# UNet2D.setup(128,1,2,8,2,2,3,1,0.1,3,4)
-	# UNet2D.train(imPath,logPath,modelPath,pmPath,800,100,100,False,10,1)
-	# UNet2D.deploy(imPath,100,modelPath,pmPath,1)
-
-
-	# ----- test 3 -----
-
-	# imPath = '/home/mc457/files/CellBiology/IDAC/Marcelo/Etc/UNetTestSets/CarmanLi_CellTypeSegmentation'
-	# # UNet2D.setup(256,1,2,8,2,2,3,1,0.1,3,4)
-	# # UNet2D.train(imPath,logPath,modelPath,pmPath,1400,100,164,False,10000,1)
-	# UNet2D.deploy(imPath,164,modelPath,pmPath,1)
-
-
-	# ----- test 4 -----
-
-	# imPath = '/home/cicconet/Downloads/TrainSet1'
-	# UNet2D.setup(64,1,2,8,2,2,3,1,0.1,3,4)
-	# UNet2D.train(imPath,logPath,modelPath,pmPath,200,8,8,False,2000,1,0)
-	# # UNet2D.deploy(imPath,164,modelPath,pmPath,1)
\ No newline at end of file
--- a/batchUNet2DtCycif.py	Fri Mar 12 00:17:29 2021 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,553 +0,0 @@
-import numpy as np
-from scipy import misc
-import tensorflow as tf
-import shutil
-import scipy.io as sio
-import os,fnmatch,glob
-import skimage.exposure as sk
-
-import sys
-sys.path.insert(0, 'C:\\Users\\Clarence\\Documents\\UNet code\\ImageScience')
-from toolbox.imtools import *
-from toolbox.ftools import *
-from toolbox.PartitionOfImage import PI2D
-
-
-def concat3(lst):
-		return tf.concat(lst,3)
-
-class UNet2D:
-	hp = None # hyper-parameters
-	nn = None # network
-	tfTraining = None # if training or not (to handle batch norm)
-	tfData = None # data placeholder
-	Session = None
-	DatasetMean = 0
-	DatasetStDev = 0
-
-	def setupWithHP(hp):
-		UNet2D.setup(hp['imSize'],
-					 hp['nChannels'],
-					 hp['nClasses'],
-					 hp['nOut0'],
-					 hp['featMapsFact'],
-					 hp['downSampFact'],
-					 hp['ks'],
-					 hp['nExtraConvs'],
-					 hp['stdDev0'],
-					 hp['nLayers'],
-					 hp['batchSize'])
-
-	def setup(imSize,nChannels,nClasses,nOut0,featMapsFact,downSampFact,kernelSize,nExtraConvs,stdDev0,nDownSampLayers,batchSize):
-		UNet2D.hp = {'imSize':imSize,
-					 'nClasses':nClasses,
-					 'nChannels':nChannels,
-					 'nExtraConvs':nExtraConvs,
-					 'nLayers':nDownSampLayers,
-					 'featMapsFact':featMapsFact,
-					 'downSampFact':downSampFact,
-					 'ks':kernelSize,
-					 'nOut0':nOut0,
-					 'stdDev0':stdDev0,
-					 'batchSize':batchSize}
-
-		nOutX = [UNet2D.hp['nChannels'],UNet2D.hp['nOut0']]
-		dsfX = []
-		for i in range(UNet2D.hp['nLayers']):
-			nOutX.append(nOutX[-1]*UNet2D.hp['featMapsFact'])
-			dsfX.append(UNet2D.hp['downSampFact'])
-
-
-		# --------------------------------------------------
-		# downsampling layer
-		# --------------------------------------------------
-
-		with tf.name_scope('placeholders'):
-			UNet2D.tfTraining = tf.placeholder(tf.bool, name='training')
-			UNet2D.tfData = tf.placeholder("float", shape=[None,UNet2D.hp['imSize'],UNet2D.hp['imSize'],UNet2D.hp['nChannels']],name='data')
-
-		def down_samp_layer(data,index):
-			with tf.name_scope('ld%d' % index):
-				ldXWeights1 = tf.Variable(tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index], nOutX[index+1]], stddev=stdDev0),name='kernel1')
-				ldXWeightsExtra = []
-				for i in range(nExtraConvs):
-					ldXWeightsExtra.append(tf.Variable(tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index+1], nOutX[index+1]], stddev=stdDev0),name='kernelExtra%d' % i))
-				
-				c00 = tf.nn.conv2d(data, ldXWeights1, strides=[1, 1, 1, 1], padding='SAME')
-				for i in range(nExtraConvs):
-					c00 = tf.nn.conv2d(tf.nn.relu(c00), ldXWeightsExtra[i], strides=[1, 1, 1, 1], padding='SAME')
-
-				ldXWeightsShortcut = tf.Variable(tf.truncated_normal([1, 1, nOutX[index], nOutX[index+1]], stddev=stdDev0),name='shortcutWeights')
-				shortcut = tf.nn.conv2d(data, ldXWeightsShortcut, strides=[1, 1, 1, 1], padding='SAME')
-
-				bn = tf.layers.batch_normalization(tf.nn.relu(c00+shortcut), training=UNet2D.tfTraining)
-
-				return tf.nn.max_pool(bn, ksize=[1, dsfX[index], dsfX[index], 1], strides=[1, dsfX[index], dsfX[index], 1], padding='SAME',name='maxpool')
-
-		# --------------------------------------------------
-		# bottom layer
-		# --------------------------------------------------
-
-		with tf.name_scope('lb'):
-			lbWeights1 = tf.Variable(tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[UNet2D.hp['nLayers']], nOutX[UNet2D.hp['nLayers']+1]], stddev=stdDev0),name='kernel1')
-			def lb(hidden):
-				return tf.nn.relu(tf.nn.conv2d(hidden, lbWeights1, strides=[1, 1, 1, 1], padding='SAME'),name='conv')
-
-		# --------------------------------------------------
-		# downsampling
-		# --------------------------------------------------
-
-		with tf.name_scope('downsampling'):    
-			dsX = []
-			dsX.append(UNet2D.tfData)
-
-			for i in range(UNet2D.hp['nLayers']):
-				dsX.append(down_samp_layer(dsX[i],i))
-
-			b = lb(dsX[UNet2D.hp['nLayers']])
-
-		# --------------------------------------------------
-		# upsampling layer
-		# --------------------------------------------------
-
-		def up_samp_layer(data,index):
-			with tf.name_scope('lu%d' % index):
-				luXWeights1    = tf.Variable(tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index+1], nOutX[index+2]], stddev=stdDev0),name='kernel1')
-				luXWeights2    = tf.Variable(tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index]+nOutX[index+1], nOutX[index+1]], stddev=stdDev0),name='kernel2')
-				luXWeightsExtra = []
-				for i in range(nExtraConvs):
-					luXWeightsExtra.append(tf.Variable(tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index+1], nOutX[index+1]], stddev=stdDev0),name='kernel2Extra%d' % i))
-				
-				outSize = UNet2D.hp['imSize']
-				for i in range(index):
-					outSize /= dsfX[i]
-				outSize = int(outSize)
-
-				outputShape = [UNet2D.hp['batchSize'],outSize,outSize,nOutX[index+1]]
-				us = tf.nn.relu(tf.nn.conv2d_transpose(data, luXWeights1, outputShape, strides=[1, dsfX[index], dsfX[index], 1], padding='SAME'),name='conv1')
-				cc = concat3([dsX[index],us]) 
-				cv = tf.nn.relu(tf.nn.conv2d(cc, luXWeights2, strides=[1, 1, 1, 1], padding='SAME'),name='conv2')
-				for i in range(nExtraConvs):
-					cv = tf.nn.relu(tf.nn.conv2d(cv, luXWeightsExtra[i], strides=[1, 1, 1, 1], padding='SAME'),name='conv2Extra%d' % i)
-				return cv
-
-		# --------------------------------------------------
-		# final (top) layer
-		# --------------------------------------------------
-
-		with tf.name_scope('lt'):
-			ltWeights1    = tf.Variable(tf.truncated_normal([1, 1, nOutX[1], nClasses], stddev=stdDev0),name='kernel')
-			def lt(hidden):
-				return tf.nn.conv2d(hidden, ltWeights1, strides=[1, 1, 1, 1], padding='SAME',name='conv')
-
-
-		# --------------------------------------------------
-		# upsampling
-		# --------------------------------------------------
-
-		with tf.name_scope('upsampling'):
-			usX = []
-			usX.append(b)
-
-			for i in range(UNet2D.hp['nLayers']):
-				usX.append(up_samp_layer(usX[i],UNet2D.hp['nLayers']-1-i))
-
-			t = lt(usX[UNet2D.hp['nLayers']])
-
-
-		sm = tf.nn.softmax(t,-1)
-		UNet2D.nn = sm
-
-
-	def train(imPath,logPath,modelPath,pmPath,nTrain,nValid,nTest,restoreVariables,nSteps,gpuIndex,testPMIndex):
-		os.environ['CUDA_VISIBLE_DEVICES']= '%d' % gpuIndex
-
-		outLogPath = logPath
-		trainWriterPath = pathjoin(logPath,'Train')
-		validWriterPath = pathjoin(logPath,'Valid')
-		outModelPath = pathjoin(modelPath,'model.ckpt')
-		outPMPath = pmPath
-		
-		batchSize = UNet2D.hp['batchSize']
-		imSize = UNet2D.hp['imSize']
-		nChannels = UNet2D.hp['nChannels']
-		nClasses = UNet2D.hp['nClasses']
-
-		# --------------------------------------------------
-		# data
-		# --------------------------------------------------
-
-		Train = np.zeros((nTrain,imSize,imSize,nChannels))
-		Valid = np.zeros((nValid,imSize,imSize,nChannels))
-		Test = np.zeros((nTest,imSize,imSize,nChannels))
-		LTrain = np.zeros((nTrain,imSize,imSize,nClasses))
-		LValid = np.zeros((nValid,imSize,imSize,nClasses))
-		LTest = np.zeros((nTest,imSize,imSize,nClasses))
-
-		print('loading data, computing mean / st dev')
-		if not os.path.exists(modelPath):
-			os.makedirs(modelPath)
-		if restoreVariables:
-			datasetMean = loadData(pathjoin(modelPath,'datasetMean.data'))
-			datasetStDev = loadData(pathjoin(modelPath,'datasetStDev.data'))
-		else:
-			datasetMean = 0
-			datasetStDev = 0
-			for iSample in range(nTrain+nValid+nTest):
-				I = im2double(tifread('%s/I%05d_Img.tif' % (imPath,iSample)))
-				datasetMean += np.mean(I)
-				datasetStDev += np.std(I)
-			datasetMean /= (nTrain+nValid+nTest)
-			datasetStDev /= (nTrain+nValid+nTest)
-			saveData(datasetMean, pathjoin(modelPath,'datasetMean.data'))
-			saveData(datasetStDev, pathjoin(modelPath,'datasetStDev.data'))
-
-		perm = np.arange(nTrain+nValid+nTest)
-		np.random.shuffle(perm)
-
-		for iSample in range(0, nTrain):
-			path = '%s/I%05d_Img.tif' % (imPath,perm[iSample])
-			im = im2double(tifread(path))
-			Train[iSample,:,:,0] = (im-datasetMean)/datasetStDev
-			path = '%s/I%05d_Ant.tif' % (imPath,perm[iSample])
-			im = tifread(path)
-			for i in range(nClasses):
-				LTrain[iSample,:,:,i] = (im == i+1)
-
-		for iSample in range(0, nValid):
-			path = '%s/I%05d_Img.tif' % (imPath,perm[nTrain+iSample])
-			im = im2double(tifread(path))
-			Valid[iSample,:,:,0] = (im-datasetMean)/datasetStDev
-			path = '%s/I%05d_Ant.tif' % (imPath,perm[nTrain+iSample])
-			im = tifread(path)
-			for i in range(nClasses):
-				LValid[iSample,:,:,i] = (im == i+1)
-
-		for iSample in range(0, nTest):
-			path = '%s/I%05d_Img.tif' % (imPath,perm[nTrain+nValid+iSample])
-			im = im2double(tifread(path))
-			Test[iSample,:,:,0] = (im-datasetMean)/datasetStDev
-			path = '%s/I%05d_Ant.tif' % (imPath,perm[nTrain+nValid+iSample])
-			im = tifread(path)
-			for i in range(nClasses):
-				LTest[iSample,:,:,i] = (im == i+1)
-
-		# --------------------------------------------------
-		# optimization
-		# --------------------------------------------------
-
-		tfLabels = tf.placeholder("float", shape=[None,imSize,imSize,nClasses],name='labels')
-
-		globalStep = tf.Variable(0,trainable=False)
-		learningRate0 = 0.01
-		decaySteps = 1000
-		decayRate = 0.95
-		learningRate = tf.train.exponential_decay(learningRate0,globalStep,decaySteps,decayRate,staircase=True)
-
-		with tf.name_scope('optim'):
-			loss = tf.reduce_mean(-tf.reduce_sum(tf.multiply(tfLabels,tf.log(UNet2D.nn)),3))
-			updateOps = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
-			# optimizer = tf.train.MomentumOptimizer(1e-3,0.9)
-			optimizer = tf.train.MomentumOptimizer(learningRate,0.9)
-			# optimizer = tf.train.GradientDescentOptimizer(learningRate)
-			with tf.control_dependencies(updateOps):
-				optOp = optimizer.minimize(loss,global_step=globalStep)
-
-		with tf.name_scope('eval'):
-			error = []
-			for iClass in range(nClasses):
-				labels0 = tf.reshape(tf.to_int32(tf.slice(tfLabels,[0,0,0,iClass],[-1,-1,-1,1])),[batchSize,imSize,imSize])
-				predict0 = tf.reshape(tf.to_int32(tf.equal(tf.argmax(UNet2D.nn,3),iClass)),[batchSize,imSize,imSize])
-				correct = tf.multiply(labels0,predict0)
-				nCorrect0 = tf.reduce_sum(correct)
-				nLabels0 = tf.reduce_sum(labels0)
-				error.append(1-tf.to_float(nCorrect0)/tf.to_float(nLabels0))
-			errors = tf.tuple(error)
-
-		# --------------------------------------------------
-		# inspection
-		# --------------------------------------------------
-
-		with tf.name_scope('scalars'):
-			tf.summary.scalar('avg_cross_entropy', loss)
-			for iClass in range(nClasses):
-				tf.summary.scalar('avg_pixel_error_%d' % iClass, error[iClass])
-			tf.summary.scalar('learning_rate', learningRate)
-		with tf.name_scope('images'):
-			split0 = tf.slice(UNet2D.nn,[0,0,0,0],[-1,-1,-1,1])
-			split1 = tf.slice(UNet2D.nn,[0,0,0,1],[-1,-1,-1,1])
-			if nClasses > 2:
-				split2 = tf.slice(UNet2D.nn,[0,0,0,2],[-1,-1,-1,1])
-			tf.summary.image('pm0',split0)
-			tf.summary.image('pm1',split1)
-			if nClasses > 2:
-				tf.summary.image('pm2',split2)
-		merged = tf.summary.merge_all()
-
-
-		# --------------------------------------------------
-		# session
-		# --------------------------------------------------
-
-		saver = tf.train.Saver()
-		sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # config parameter needed to save variables when using GPU
-
-		if os.path.exists(outLogPath):
-			shutil.rmtree(outLogPath)
-		trainWriter = tf.summary.FileWriter(trainWriterPath, sess.graph)
-		validWriter = tf.summary.FileWriter(validWriterPath, sess.graph)
-
-		if restoreVariables:
-			saver.restore(sess, outModelPath)
-			print("Model restored.")
-		else:
-			sess.run(tf.global_variables_initializer())
-
-		# --------------------------------------------------
-		# train
-		# --------------------------------------------------
-
-		batchData = np.zeros((batchSize,imSize,imSize,nChannels))
-		batchLabels = np.zeros((batchSize,imSize,imSize,nClasses))
-		for i in range(nSteps):
-			# train
-
-			perm = np.arange(nTrain)
-			np.random.shuffle(perm)
-
-			for j in range(batchSize):
-				batchData[j,:,:,:] = Train[perm[j],:,:,:]
-				batchLabels[j,:,:,:] = LTrain[perm[j],:,:,:]
-
-			summary,_ = sess.run([merged,optOp],feed_dict={UNet2D.tfData: batchData, tfLabels: batchLabels, UNet2D.tfTraining: 1})
-			trainWriter.add_summary(summary, i)
-
-			# validation
-
-			perm = np.arange(nValid)
-			np.random.shuffle(perm)
-
-			for j in range(batchSize):
-				batchData[j,:,:,:] = Valid[perm[j],:,:,:]
-				batchLabels[j,:,:,:] = LValid[perm[j],:,:,:]
-
-			summary, es = sess.run([merged, errors],feed_dict={UNet2D.tfData: batchData, tfLabels: batchLabels, UNet2D.tfTraining: 0})
-			validWriter.add_summary(summary, i)
-
-			e = np.mean(es)
-			print('step %05d, e: %f' % (i,e))
-
-			if i == 0:
-				if restoreVariables:
-					lowestError = e
-				else:
-					lowestError = np.inf
-
-			if np.mod(i,100) == 0 and e < lowestError:
-				lowestError = e
-				print("Model saved in file: %s" % saver.save(sess, outModelPath))
-
-
-		# --------------------------------------------------
-		# test
-		# --------------------------------------------------
-
-		if not os.path.exists(outPMPath):
-			os.makedirs(outPMPath)
-
-		for i in range(nTest):
-			j = np.mod(i,batchSize)
-
-			batchData[j,:,:,:] = Test[i,:,:,:]
-			batchLabels[j,:,:,:] = LTest[i,:,:,:]
-		 
-			if j == batchSize-1 or i == nTest-1:
-
-				output = sess.run(UNet2D.nn,feed_dict={UNet2D.tfData: batchData, tfLabels: batchLabels, UNet2D.tfTraining: 0})
-
-				for k in range(j+1):
-					pm = output[k,:,:,testPMIndex]
-					gt = batchLabels[k,:,:,testPMIndex]
-					im = np.sqrt(normalize(batchData[k,:,:,0]))
-					imwrite(np.uint8(255*np.concatenate((im,np.concatenate((pm,gt),axis=1)),axis=1)),'%s/I%05d.png' % (outPMPath,i-j+k+1))
-
-
-		# --------------------------------------------------
-		# save hyper-parameters, clean-up
-		# --------------------------------------------------
-
-		saveData(UNet2D.hp,pathjoin(modelPath,'hp.data'))
-
-		trainWriter.close()
-		validWriter.close()
-		sess.close()
-
-	def deploy(imPath,nImages,modelPath,pmPath,gpuIndex,pmIndex):
-		os.environ['CUDA_VISIBLE_DEVICES']= '%d' % gpuIndex
-
-		variablesPath = pathjoin(modelPath,'model.ckpt')
-		outPMPath = pmPath
-
-		hp = loadData(pathjoin(modelPath,'hp.data'))
-		UNet2D.setupWithHP(hp)
-		
-		batchSize = UNet2D.hp['batchSize']
-		imSize = UNet2D.hp['imSize']
-		nChannels = UNet2D.hp['nChannels']
-		nClasses = UNet2D.hp['nClasses']
-
-		# --------------------------------------------------
-		# data
-		# --------------------------------------------------
-
-		Data = np.zeros((nImages,imSize,imSize,nChannels))
-
-		datasetMean = loadData(pathjoin(modelPath,'datasetMean.data'))
-		datasetStDev = loadData(pathjoin(modelPath,'datasetStDev.data'))
-
-		for iSample in range(0, nImages):
-			path = '%s/I%05d_Img.tif' % (imPath,iSample)
-			im = im2double(tifread(path))
-			Data[iSample,:,:,0] = (im-datasetMean)/datasetStDev
-
-		# --------------------------------------------------
-		# session
-		# --------------------------------------------------
-
-		saver = tf.train.Saver()
-		sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # config parameter needed to save variables when using GPU
-
-		saver.restore(sess, variablesPath)
-		print("Model restored.")
-
-		# --------------------------------------------------
-		# deploy
-		# --------------------------------------------------
-
-		batchData = np.zeros((batchSize,imSize,imSize,nChannels))
-
-		if not os.path.exists(outPMPath):
-			os.makedirs(outPMPath)
-
-		for i in range(nImages):
-			print(i,nImages)
-
-			j = np.mod(i,batchSize)
-
-			batchData[j,:,:,:] = Data[i,:,:,:]
-		 
-			if j == batchSize-1 or i == nImages-1:
-
-				output = sess.run(UNet2D.nn,feed_dict={UNet2D.tfData: batchData, UNet2D.tfTraining: 0})
-
-				for k in range(j+1):
-					pm = output[k,:,:,pmIndex]
-					im = np.sqrt(normalize(batchData[k,:,:,0]))
-					# imwrite(np.uint8(255*np.concatenate((im,pm),axis=1)),'%s/I%05d.png' % (outPMPath,i-j+k+1))
-					imwrite(np.uint8(255*im),'%s/I%05d_Im.png' % (outPMPath,i-j+k+1))
-					imwrite(np.uint8(255*pm),'%s/I%05d_PM.png' % (outPMPath,i-j+k+1))
-
-
-		# --------------------------------------------------
-		# clean-up
-		# --------------------------------------------------
-
-		sess.close()
-
-	def singleImageInferenceSetup(modelPath,gpuIndex):
-		#os.environ['CUDA_VISIBLE_DEVICES']= '%d' % gpuIndex
-
-		variablesPath = pathjoin(modelPath,'model.ckpt')
-
-		hp = loadData(pathjoin(modelPath,'hp.data'))
-		UNet2D.setupWithHP(hp)
-
-		UNet2D.DatasetMean = loadData(pathjoin(modelPath,'datasetMean.data'))
-		UNet2D.DatasetStDev = loadData(pathjoin(modelPath,'datasetStDev.data'))
-		print(UNet2D.DatasetMean)
-		print(UNet2D.DatasetStDev)
-
-		# --------------------------------------------------
-		# session
-		# --------------------------------------------------
-
-		saver = tf.train.Saver()
-		UNet2D.Session = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # config parameter needed to save variables when using GPU
-
-		saver.restore(UNet2D.Session, variablesPath)
-		print("Model restored.")
-
-	def singleImageInferenceCleanup():
-		UNet2D.Session.close()
-
-	def singleImageInference(image,mode,pmIndex):
-		print('Inference...')
-
-		batchSize = UNet2D.hp['batchSize']
-		imSize = UNet2D.hp['imSize']
-		nChannels = UNet2D.hp['nChannels']
-
-		PI2D.setup(image,imSize,int(imSize/8),mode)
-		PI2D.createOutput(nChannels)
-
-		batchData = np.zeros((batchSize,imSize,imSize,nChannels))
-		for i in range(PI2D.NumPatches):
-			j = np.mod(i,batchSize)
-			batchData[j,:,:,0] = (PI2D.getPatch(i)-UNet2D.DatasetMean)/UNet2D.DatasetStDev
-			if j == batchSize-1 or i == PI2D.NumPatches-1:
-				output = UNet2D.Session.run(UNet2D.nn,feed_dict={UNet2D.tfData: batchData, UNet2D.tfTraining: 0})
-				for k in range(j+1):
-					pm = output[k,:,:,pmIndex]
-					PI2D.patchOutput(i-j+k,pm)
-					# PI2D.patchOutput(i-j+k,normalize(imgradmag(PI2D.getPatch(i-j+k),1)))
-
-		return PI2D.getValidOutput()
-
-
-if __name__ == '__main__':
-	logPath = 'C://Users//Clarence//Documents//UNet code//TFLogs'
-	modelPath = 'D:\\LSP\\UNet\\tonsil20x1bin1chan\\TFModel - 3class 16 kernels 5ks 2 layers'
-	pmPath = 'C://Users//Clarence//Documents//UNet code//TFProbMaps'
-
-
-
-	UNet2D.singleImageInferenceSetup(modelPath, 0)
-	imagePath = 'D:\\LSP\\cycif\\testsets'
-	sampleList = glob.glob(imagePath + '//exemplar-001*')
-	dapiChannel = 0
-	dsFactor = 1
-	for iSample in sampleList:
-		fileList = glob.glob(iSample + '//registration//*.tif')
-		print(fileList)
-		for iFile in fileList:
-			fileName = os.path.basename(iFile)
-			fileNamePrefix = fileName.split(os.extsep, 1)
-			I = tifffile.imread(iFile, key=dapiChannel)
-			rawI = I
-			hsize = int((float(I.shape[0])*float(dsFactor)))
-			vsize = int((float(I.shape[1])*float(dsFactor)))
-			I = resize(I,(hsize,vsize))
-			I = im2double(sk.rescale_intensity(I, in_range=(np.min(I), np.max(I)), out_range=(0, 0.983)))
-			rawI = im2double(rawI)/np.max(im2double(rawI))
-			outputPath = iSample + '//prob_maps'
-			if not os.path.exists(outputPath):
-				os.makedirs(outputPath)
-			K = np.zeros((2,rawI.shape[0],rawI.shape[1]))
-			contours = UNet2D.singleImageInference(I,'accumulate',1)
-			hsize = int((float(I.shape[0]) * float(1/dsFactor)))
-			vsize = int((float(I.shape[1]) * float(1/dsFactor)))
-			contours = resize(contours, (rawI.shape[0], rawI.shape[1]))
-			K[1,:,:] = rawI
-			K[0,:,:] = contours
-			tifwrite(np.uint8(255 * K),
-					 outputPath + '//' + fileNamePrefix[0] + '_ContoursPM_' + str(dapiChannel + 1) + '.tif')
-			del K
-			K = np.zeros((1, rawI.shape[0], rawI.shape[1]))
-			nuclei = UNet2D.singleImageInference(I,'accumulate',2)
-			nuclei = resize(nuclei, (rawI.shape[0], rawI.shape[1]))
-			K[0, :, :] = nuclei
-			tifwrite(np.uint8(255 * K),
-					 outputPath + '//' + fileNamePrefix[0] + '_NucleiPM_' + str(dapiChannel + 1) + '.tif')
-			del K
-	UNet2D.singleImageInferenceCleanup()
-
--- a/batchUnMicst.py	Fri Mar 12 00:17:29 2021 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,588 +0,0 @@
-import numpy as np
-from scipy import misc
-import tensorflow as tf
-import shutil
-import scipy.io as sio
-import os, fnmatch, PIL, glob
-import skimage.exposure as sk
-import argparse
-
-import sys
-
-sys.path.insert(0, 'C:\\Users\\Public\\Documents\\ImageScience')
-from toolbox.imtools import *
-from toolbox.ftools import *
-from toolbox.PartitionOfImage import PI2D
-
-
-def concat3(lst):
-	return tf.concat(lst, 3)
-
-
-class UNet2D:
-	hp = None  # hyper-parameters
-	nn = None  # network
-	tfTraining = None  # if training or not (to handle batch norm)
-	tfData = None  # data placeholder
-	Session = None
-	DatasetMean = 0
-	DatasetStDev = 0
-
-	def setupWithHP(hp):
-		UNet2D.setup(hp['imSize'],
-					 hp['nChannels'],
-					 hp['nClasses'],
-					 hp['nOut0'],
-					 hp['featMapsFact'],
-					 hp['downSampFact'],
-					 hp['ks'],
-					 hp['nExtraConvs'],
-					 hp['stdDev0'],
-					 hp['nLayers'],
-					 hp['batchSize'])
-
-	def setup(imSize, nChannels, nClasses, nOut0, featMapsFact, downSampFact, kernelSize, nExtraConvs, stdDev0,
-			  nDownSampLayers, batchSize):
-		UNet2D.hp = {'imSize': imSize,
-					 'nClasses': nClasses,
-					 'nChannels': nChannels,
-					 'nExtraConvs': nExtraConvs,
-					 'nLayers': nDownSampLayers,
-					 'featMapsFact': featMapsFact,
-					 'downSampFact': downSampFact,
-					 'ks': kernelSize,
-					 'nOut0': nOut0,
-					 'stdDev0': stdDev0,
-					 'batchSize': batchSize}
-
-		nOutX = [UNet2D.hp['nChannels'], UNet2D.hp['nOut0']]
-		dsfX = []
-		for i in range(UNet2D.hp['nLayers']):
-			nOutX.append(nOutX[-1] * UNet2D.hp['featMapsFact'])
-			dsfX.append(UNet2D.hp['downSampFact'])
-
-		# --------------------------------------------------
-		# downsampling layer
-		# --------------------------------------------------
-
-		with tf.name_scope('placeholders'):
-			UNet2D.tfTraining = tf.placeholder(tf.bool, name='training')
-			UNet2D.tfData = tf.placeholder("float", shape=[None, UNet2D.hp['imSize'], UNet2D.hp['imSize'],
-														   UNet2D.hp['nChannels']], name='data')
-
-		def down_samp_layer(data, index):
-			with tf.name_scope('ld%d' % index):
-				ldXWeights1 = tf.Variable(
-					tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index], nOutX[index + 1]],
-										stddev=stdDev0), name='kernel1')
-				ldXWeightsExtra = []
-				for i in range(nExtraConvs):
-					ldXWeightsExtra.append(tf.Variable(
-						tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index + 1], nOutX[index + 1]],
-											stddev=stdDev0), name='kernelExtra%d' % i))
-
-				c00 = tf.nn.conv2d(data, ldXWeights1, strides=[1, 1, 1, 1], padding='SAME')
-				for i in range(nExtraConvs):
-					c00 = tf.nn.conv2d(tf.nn.relu(c00), ldXWeightsExtra[i], strides=[1, 1, 1, 1], padding='SAME')
-
-				ldXWeightsShortcut = tf.Variable(
-					tf.truncated_normal([1, 1, nOutX[index], nOutX[index + 1]], stddev=stdDev0), name='shortcutWeights')
-				shortcut = tf.nn.conv2d(data, ldXWeightsShortcut, strides=[1, 1, 1, 1], padding='SAME')
-
-				bn = tf.layers.batch_normalization(tf.nn.relu(c00 + shortcut), training=UNet2D.tfTraining)
-
-				return tf.nn.max_pool(bn, ksize=[1, dsfX[index], dsfX[index], 1],
-									  strides=[1, dsfX[index], dsfX[index], 1], padding='SAME', name='maxpool')
-
-		# --------------------------------------------------
-		# bottom layer
-		# --------------------------------------------------
-
-		with tf.name_scope('lb'):
-			lbWeights1 = tf.Variable(tf.truncated_normal(
-				[UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[UNet2D.hp['nLayers']], nOutX[UNet2D.hp['nLayers'] + 1]],
-				stddev=stdDev0), name='kernel1')
-
-			def lb(hidden):
-				return tf.nn.relu(tf.nn.conv2d(hidden, lbWeights1, strides=[1, 1, 1, 1], padding='SAME'), name='conv')
-
-		# --------------------------------------------------
-		# downsampling
-		# --------------------------------------------------
-
-		with tf.name_scope('downsampling'):
-			dsX = []
-			dsX.append(UNet2D.tfData)
-
-			for i in range(UNet2D.hp['nLayers']):
-				dsX.append(down_samp_layer(dsX[i], i))
-
-			b = lb(dsX[UNet2D.hp['nLayers']])
-
-		# --------------------------------------------------
-		# upsampling layer
-		# --------------------------------------------------
-
-		def up_samp_layer(data, index):
-			with tf.name_scope('lu%d' % index):
-				luXWeights1 = tf.Variable(
-					tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index + 1], nOutX[index + 2]],
-										stddev=stdDev0), name='kernel1')
-				luXWeights2 = tf.Variable(tf.truncated_normal(
-					[UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index] + nOutX[index + 1], nOutX[index + 1]],
-					stddev=stdDev0), name='kernel2')
-				luXWeightsExtra = []
-				for i in range(nExtraConvs):
-					luXWeightsExtra.append(tf.Variable(
-						tf.truncated_normal([UNet2D.hp['ks'], UNet2D.hp['ks'], nOutX[index + 1], nOutX[index + 1]],
-											stddev=stdDev0), name='kernel2Extra%d' % i))
-
-				outSize = UNet2D.hp['imSize']
-				for i in range(index):
-					outSize /= dsfX[i]
-				outSize = int(outSize)
-
-				outputShape = [UNet2D.hp['batchSize'], outSize, outSize, nOutX[index + 1]]
-				us = tf.nn.relu(
-					tf.nn.conv2d_transpose(data, luXWeights1, outputShape, strides=[1, dsfX[index], dsfX[index], 1],
-										   padding='SAME'), name='conv1')
-				cc = concat3([dsX[index], us])
-				cv = tf.nn.relu(tf.nn.conv2d(cc, luXWeights2, strides=[1, 1, 1, 1], padding='SAME'), name='conv2')
-				for i in range(nExtraConvs):
-					cv = tf.nn.relu(tf.nn.conv2d(cv, luXWeightsExtra[i], strides=[1, 1, 1, 1], padding='SAME'),
-									name='conv2Extra%d' % i)
-				return cv
-
-		# --------------------------------------------------
-		# final (top) layer
-		# --------------------------------------------------
-
-		with tf.name_scope('lt'):
-			ltWeights1 = tf.Variable(tf.truncated_normal([1, 1, nOutX[1], nClasses], stddev=stdDev0), name='kernel')
-
-			def lt(hidden):
-				return tf.nn.conv2d(hidden, ltWeights1, strides=[1, 1, 1, 1], padding='SAME', name='conv')
-
-		# --------------------------------------------------
-		# upsampling
-		# --------------------------------------------------
-
-		with tf.name_scope('upsampling'):
-			usX = []
-			usX.append(b)
-
-			for i in range(UNet2D.hp['nLayers']):
-				usX.append(up_samp_layer(usX[i], UNet2D.hp['nLayers'] - 1 - i))
-
-			t = lt(usX[UNet2D.hp['nLayers']])
-
-		sm = tf.nn.softmax(t, -1)
-		UNet2D.nn = sm
-
-	def train(imPath, logPath, modelPath, pmPath, nTrain, nValid, nTest, restoreVariables, nSteps, gpuIndex,
-			  testPMIndex):
-		os.environ['CUDA_VISIBLE_DEVICES'] = '%d' % gpuIndex
-
-		outLogPath = logPath
-		trainWriterPath = pathjoin(logPath, 'Train')
-		validWriterPath = pathjoin(logPath, 'Valid')
-		outModelPath = pathjoin(modelPath, 'model.ckpt')
-		outPMPath = pmPath
-
-		batchSize = UNet2D.hp['batchSize']
-		imSize = UNet2D.hp['imSize']
-		nChannels = UNet2D.hp['nChannels']
-		nClasses = UNet2D.hp['nClasses']
-
-		# --------------------------------------------------
-		# data
-		# --------------------------------------------------
-
-		Train = np.zeros((nTrain, imSize, imSize, nChannels))
-		Valid = np.zeros((nValid, imSize, imSize, nChannels))
-		Test = np.zeros((nTest, imSize, imSize, nChannels))
-		LTrain = np.zeros((nTrain, imSize, imSize, nClasses))
-		LValid = np.zeros((nValid, imSize, imSize, nClasses))
-		LTest = np.zeros((nTest, imSize, imSize, nClasses))
-
-		print('loading data, computing mean / st dev')
-		if not os.path.exists(modelPath):
-			os.makedirs(modelPath)
-		if restoreVariables:
-			datasetMean = loadData(pathjoin(modelPath, 'datasetMean.data'))
-			datasetStDev = loadData(pathjoin(modelPath, 'datasetStDev.data'))
-		else:
-			datasetMean = 0
-			datasetStDev = 0
-			for iSample in range(nTrain + nValid + nTest):
-				I = im2double(tifread('%s/I%05d_Img.tif' % (imPath, iSample)))
-				datasetMean += np.mean(I)
-				datasetStDev += np.std(I)
-			datasetMean /= (nTrain + nValid + nTest)
-			datasetStDev /= (nTrain + nValid + nTest)
-			saveData(datasetMean, pathjoin(modelPath, 'datasetMean.data'))
-			saveData(datasetStDev, pathjoin(modelPath, 'datasetStDev.data'))
-
-		perm = np.arange(nTrain + nValid + nTest)
-		np.random.shuffle(perm)
-
-		for iSample in range(0, nTrain):
-			path = '%s/I%05d_Img.tif' % (imPath, perm[iSample])
-			im = im2double(tifread(path))
-			Train[iSample, :, :, 0] = (im - datasetMean) / datasetStDev
-			path = '%s/I%05d_Ant.tif' % (imPath, perm[iSample])
-			im = tifread(path)
-			for i in range(nClasses):
-				LTrain[iSample, :, :, i] = (im == i + 1)
-
-		for iSample in range(0, nValid):
-			path = '%s/I%05d_Img.tif' % (imPath, perm[nTrain + iSample])
-			im = im2double(tifread(path))
-			Valid[iSample, :, :, 0] = (im - datasetMean) / datasetStDev
-			path = '%s/I%05d_Ant.tif' % (imPath, perm[nTrain + iSample])
-			im = tifread(path)
-			for i in range(nClasses):
-				LValid[iSample, :, :, i] = (im == i + 1)
-
-		for iSample in range(0, nTest):
-			path = '%s/I%05d_Img.tif' % (imPath, perm[nTrain + nValid + iSample])
-			im = im2double(tifread(path))
-			Test[iSample, :, :, 0] = (im - datasetMean) / datasetStDev
-			path = '%s/I%05d_Ant.tif' % (imPath, perm[nTrain + nValid + iSample])
-			im = tifread(path)
-			for i in range(nClasses):
-				LTest[iSample, :, :, i] = (im == i + 1)
-
-		# --------------------------------------------------
-		# optimization
-		# --------------------------------------------------
-
-		tfLabels = tf.placeholder("float", shape=[None, imSize, imSize, nClasses], name='labels')
-
-		globalStep = tf.Variable(0, trainable=False)
-		learningRate0 = 0.01
-		decaySteps = 1000
-		decayRate = 0.95
-		learningRate = tf.train.exponential_decay(learningRate0, globalStep, decaySteps, decayRate, staircase=True)
-
-		with tf.name_scope('optim'):
-			loss = tf.reduce_mean(-tf.reduce_sum(tf.multiply(tfLabels, tf.log(UNet2D.nn)), 3))
-			updateOps = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
-			# optimizer = tf.train.MomentumOptimizer(1e-3,0.9)
-			optimizer = tf.train.MomentumOptimizer(learningRate, 0.9)
-			# optimizer = tf.train.GradientDescentOptimizer(learningRate)
-			with tf.control_dependencies(updateOps):
-				optOp = optimizer.minimize(loss, global_step=globalStep)
-
-		with tf.name_scope('eval'):
-			error = []
-			for iClass in range(nClasses):
-				labels0 = tf.reshape(tf.to_int32(tf.slice(tfLabels, [0, 0, 0, iClass], [-1, -1, -1, 1])),
-									 [batchSize, imSize, imSize])
-				predict0 = tf.reshape(tf.to_int32(tf.equal(tf.argmax(UNet2D.nn, 3), iClass)),
-									  [batchSize, imSize, imSize])
-				correct = tf.multiply(labels0, predict0)
-				nCorrect0 = tf.reduce_sum(correct)
-				nLabels0 = tf.reduce_sum(labels0)
-				error.append(1 - tf.to_float(nCorrect0) / tf.to_float(nLabels0))
-			errors = tf.tuple(error)
-
-		# --------------------------------------------------
-		# inspection
-		# --------------------------------------------------
-
-		with tf.name_scope('scalars'):
-			tf.summary.scalar('avg_cross_entropy', loss)
-			for iClass in range(nClasses):
-				tf.summary.scalar('avg_pixel_error_%d' % iClass, error[iClass])
-			tf.summary.scalar('learning_rate', learningRate)
-		with tf.name_scope('images'):
-			split0 = tf.slice(UNet2D.nn, [0, 0, 0, 0], [-1, -1, -1, 1])
-			split1 = tf.slice(UNet2D.nn, [0, 0, 0, 1], [-1, -1, -1, 1])
-			if nClasses > 2:
-				split2 = tf.slice(UNet2D.nn, [0, 0, 0, 2], [-1, -1, -1, 1])
-			tf.summary.image('pm0', split0)
-			tf.summary.image('pm1', split1)
-			if nClasses > 2:
-				tf.summary.image('pm2', split2)
-		merged = tf.summary.merge_all()
-
-		# --------------------------------------------------
-		# session
-		# --------------------------------------------------
-
-		saver = tf.train.Saver()
-		sess = tf.Session(config=tf.ConfigProto(
-			allow_soft_placement=True))  # config parameter needed to save variables when using GPU
-
-		if os.path.exists(outLogPath):
-			shutil.rmtree(outLogPath)
-		trainWriter = tf.summary.FileWriter(trainWriterPath, sess.graph)
-		validWriter = tf.summary.FileWriter(validWriterPath, sess.graph)
-
-		if restoreVariables:
-			saver.restore(sess, outModelPath)
-			print("Model restored.")
-		else:
-			sess.run(tf.global_variables_initializer())
-
-		# --------------------------------------------------
-		# train
-		# --------------------------------------------------
-
-		batchData = np.zeros((batchSize, imSize, imSize, nChannels))
-		batchLabels = np.zeros((batchSize, imSize, imSize, nClasses))
-		for i in range(nSteps):
-			# train
-
-			perm = np.arange(nTrain)
-			np.random.shuffle(perm)
-
-			for j in range(batchSize):
-				batchData[j, :, :, :] = Train[perm[j], :, :, :]
-				batchLabels[j, :, :, :] = LTrain[perm[j], :, :, :]
-
-			summary, _ = sess.run([merged, optOp],
-								  feed_dict={UNet2D.tfData: batchData, tfLabels: batchLabels, UNet2D.tfTraining: 1})
-			trainWriter.add_summary(summary, i)
-
-			# validation
-
-			perm = np.arange(nValid)
-			np.random.shuffle(perm)
-
-			for j in range(batchSize):
-				batchData[j, :, :, :] = Valid[perm[j], :, :, :]
-				batchLabels[j, :, :, :] = LValid[perm[j], :, :, :]
-
-			summary, es = sess.run([merged, errors],
-								   feed_dict={UNet2D.tfData: batchData, tfLabels: batchLabels, UNet2D.tfTraining: 0})
-			validWriter.add_summary(summary, i)
-
-			e = np.mean(es)
-			print('step %05d, e: %f' % (i, e))
-
-			if i == 0:
-				if restoreVariables:
-					lowestError = e
-				else:
-					lowestError = np.inf
-
-			if np.mod(i, 100) == 0 and e < lowestError:
-				lowestError = e
-				print("Model saved in file: %s" % saver.save(sess, outModelPath))
-
-		# --------------------------------------------------
-		# test
-		# --------------------------------------------------
-
-		if not os.path.exists(outPMPath):
-			os.makedirs(outPMPath)
-
-		for i in range(nTest):
-			j = np.mod(i, batchSize)
-
-			batchData[j, :, :, :] = Test[i, :, :, :]
-			batchLabels[j, :, :, :] = LTest[i, :, :, :]
-
-			if j == batchSize - 1 or i == nTest - 1:
-
-				output = sess.run(UNet2D.nn,
-								  feed_dict={UNet2D.tfData: batchData, tfLabels: batchLabels, UNet2D.tfTraining: 0})
-
-				for k in range(j + 1):
-					pm = output[k, :, :, testPMIndex]
-					gt = batchLabels[k, :, :, testPMIndex]
-					im = np.sqrt(normalize(batchData[k, :, :, 0]))
-					imwrite(np.uint8(255 * np.concatenate((im, np.concatenate((pm, gt), axis=1)), axis=1)),
-							'%s/I%05d.png' % (outPMPath, i - j + k + 1))
-
-		# --------------------------------------------------
-		# save hyper-parameters, clean-up
-		# --------------------------------------------------
-
-		saveData(UNet2D.hp, pathjoin(modelPath, 'hp.data'))
-
-		trainWriter.close()
-		validWriter.close()
-		sess.close()
-
-	def deploy(imPath, nImages, modelPath, pmPath, gpuIndex, pmIndex):
-		os.environ['CUDA_VISIBLE_DEVICES'] = '%d' % gpuIndex
-
-		variablesPath = pathjoin(modelPath, 'model.ckpt')
-		outPMPath = pmPath
-
-		hp = loadData(pathjoin(modelPath, 'hp.data'))
-		UNet2D.setupWithHP(hp)
-
-		batchSize = UNet2D.hp['batchSize']
-		imSize = UNet2D.hp['imSize']
-		nChannels = UNet2D.hp['nChannels']
-		nClasses = UNet2D.hp['nClasses']
-
-		# --------------------------------------------------
-		# data
-		# --------------------------------------------------
-
-		Data = np.zeros((nImages, imSize, imSize, nChannels))
-
-		datasetMean = loadData(pathjoin(modelPath, 'datasetMean.data'))
-		datasetStDev = loadData(pathjoin(modelPath, 'datasetStDev.data'))
-
-		for iSample in range(0, nImages):
-			path = '%s/I%05d_Img.tif' % (imPath, iSample)
-			im = im2double(tifread(path))
-			Data[iSample, :, :, 0] = (im - datasetMean) / datasetStDev
-
-		# --------------------------------------------------
-		# session
-		# --------------------------------------------------
-
-		saver = tf.train.Saver()
-		sess = tf.Session(config=tf.ConfigProto(
-			allow_soft_placement=True))  # config parameter needed to save variables when using GPU
-
-		saver.restore(sess, variablesPath)
-		print("Model restored.")
-
-		# --------------------------------------------------
-		# deploy
-		# --------------------------------------------------
-
-		batchData = np.zeros((batchSize, imSize, imSize, nChannels))
-
-		if not os.path.exists(outPMPath):
-			os.makedirs(outPMPath)
-
-		for i in range(nImages):
-			print(i, nImages)
-
-			j = np.mod(i, batchSize)
-
-			batchData[j, :, :, :] = Data[i, :, :, :]
-
-			if j == batchSize - 1 or i == nImages - 1:
-
-				output = sess.run(UNet2D.nn, feed_dict={UNet2D.tfData: batchData, UNet2D.tfTraining: 0})
-
-				for k in range(j + 1):
-					pm = output[k, :, :, pmIndex]
-					im = np.sqrt(normalize(batchData[k, :, :, 0]))
-					# imwrite(np.uint8(255*np.concatenate((im,pm),axis=1)),'%s/I%05d.png' % (outPMPath,i-j+k+1))
-					imwrite(np.uint8(255 * im), '%s/I%05d_Im.png' % (outPMPath, i - j + k + 1))
-					imwrite(np.uint8(255 * pm), '%s/I%05d_PM.png' % (outPMPath, i - j + k + 1))
-
-		# --------------------------------------------------
-		# clean-up
-		# --------------------------------------------------
-
-		sess.close()
-
-	def singleImageInferenceSetup(modelPath, gpuIndex):
-		os.environ['CUDA_VISIBLE_DEVICES'] = '%d' % gpuIndex
-
-		variablesPath = pathjoin(modelPath, 'model.ckpt')
-
-		hp = loadData(pathjoin(modelPath, 'hp.data'))
-		UNet2D.setupWithHP(hp)
-
-		UNet2D.DatasetMean = loadData(pathjoin(modelPath, 'datasetMean.data'))
-		UNet2D.DatasetStDev = loadData(pathjoin(modelPath, 'datasetStDev.data'))
-		print(UNet2D.DatasetMean)
-		print(UNet2D.DatasetStDev)
-
-		# --------------------------------------------------
-		# session
-		# --------------------------------------------------
-
-		saver = tf.train.Saver()
-		UNet2D.Session = tf.Session(config=tf.ConfigProto(
-			allow_soft_placement=True))  # config parameter needed to save variables when using GPU
-
-		saver.restore(UNet2D.Session, variablesPath)
-		print("Model restored.")
-
-	def singleImageInferenceCleanup():
-		UNet2D.Session.close()
-
-	def singleImageInference(image, mode, pmIndex):
-		print('Inference...')
-
-		batchSize = UNet2D.hp['batchSize']
-		imSize = UNet2D.hp['imSize']
-		nChannels = UNet2D.hp['nChannels']
-
-		PI2D.setup(image, imSize, int(imSize / 8), mode)
-		PI2D.createOutput(nChannels)
-
-		batchData = np.zeros((batchSize, imSize, imSize, nChannels))
-		for i in range(PI2D.NumPatches):
-			j = np.mod(i, batchSize)
-			batchData[j, :, :, 0] = (PI2D.getPatch(i) - UNet2D.DatasetMean) / UNet2D.DatasetStDev
-			if j == batchSize - 1 or i == PI2D.NumPatches - 1:
-				output = UNet2D.Session.run(UNet2D.nn, feed_dict={UNet2D.tfData: batchData, UNet2D.tfTraining: 0})
-				for k in range(j + 1):
-					pm = output[k, :, :, pmIndex]
-					PI2D.patchOutput(i - j + k, pm)
-			# PI2D.patchOutput(i-j+k,normalize(imgradmag(PI2D.getPatch(i-j+k),1)))
-
-		return PI2D.getValidOutput()
-
-
-if __name__ == '__main__':
-	parser = argparse.ArgumentParser()
-	parser.add_argument("imagePath", help="path to the .tif file")
-	parser.add_argument("--channel", help="channel to perform inference on", type=int, default=0)
-	parser.add_argument("--TMA", help="specify if TMA", action="store_true")
-	parser.add_argument("--scalingFactor", help="factor by which to increase/decrease image size by", type=float,
-						default=1)
-	args = parser.parse_args()
-
-	logPath = ''
-	modelPath = 'D:\\LSP\\UNet\\tonsil20x1bin1chan\\TFModel - 3class 16 kernels 5ks 2 layers'
-	pmPath = ''
-
-	UNet2D.singleImageInferenceSetup(modelPath, 1)
-	imagePath = args.imagePath
-	sampleList = glob.glob(imagePath + '/exemplar*')
-	dapiChannel = args.channel
-	dsFactor = args.scalingFactor
-	for iSample in sampleList:
-		if args.TMA:
-			fileList = [x for x in glob.glob(iSample + '\\dearray\\*.tif') if x != (iSample + '\\dearray\\TMA_MAP.tif')]
-			print(iSample)
-		else:
-			fileList = glob.glob(iSample + '//registration//*ome.tif')
-		print(fileList)
-		for iFile in fileList:
-			fileName = os.path.basename(iFile)
-			fileNamePrefix = fileName.split(os.extsep, 1)
-			I = tifffile.imread(iFile, key=dapiChannel)
-			rawI = I
-			hsize = int((float(I.shape[0]) * float(dsFactor)))
-			vsize = int((float(I.shape[1]) * float(dsFactor)))
-			I = resize(I, (hsize, vsize))
-			I = im2double(sk.rescale_intensity(I, in_range=(np.min(I), np.max(I)), out_range=(0, 0.983)))
-			rawI = im2double(rawI) / np.max(im2double(rawI))
-			outputPath = iSample + '//prob_maps'
-			if not os.path.exists(outputPath):
-				os.makedirs(outputPath)
-			K = np.zeros((2, rawI.shape[0], rawI.shape[1]))
-			contours = UNet2D.singleImageInference(I, 'accumulate', 1)
-			hsize = int((float(I.shape[0]) * float(1 / dsFactor)))
-			vsize = int((float(I.shape[1]) * float(1 / dsFactor)))
-			contours = resize(contours, (rawI.shape[0], rawI.shape[1]))
-			K[1, :, :] = rawI
-			K[0, :, :] = contours
-			tifwrite(np.uint8(255 * K),
-					 outputPath + '//' + fileNamePrefix[0] + '_ContoursPM_' + str(dapiChannel + 1) + '.tif')
-			del K
-			K = np.zeros((1, rawI.shape[0], rawI.shape[1]))
-			nuclei = UNet2D.singleImageInference(I, 'accumulate', 2)
-			nuclei = resize(nuclei, (rawI.shape[0], rawI.shape[1]))
-			K[0, :, :] = nuclei
-			tifwrite(np.uint8(255 * K),
-					 outputPath + '//' + fileNamePrefix[0] + '_NucleiPM_' + str(dapiChannel + 1) + '.tif')
-			del K
-	UNet2D.singleImageInferenceCleanup()
--- a/macros.xml	Fri Mar 12 00:17:29 2021 +0000
+++ b/macros.xml	Wed Sep 07 23:10:14 2022 +0000
@@ -2,6 +2,8 @@
 <macros>
     <xml name="requirements">
         <requirements>
+            <container type="docker">labsyspharm/unmicst:@TOOL_VERSION@</container>
+            <!--
             <requirement type="package" version="3.7">python</requirement>
             <requirement type="package" version="1.15.0">tensorflow</requirement>
             <requirement type="package" version="1.15.1">tensorflow-estimator</requirement>
@@ -12,17 +14,28 @@
             <requirement type="package" version="2020.7.24">tifffile</requirement>
             <requirement type="package" version="2019.7.2">czifile</requirement>
             <requirement type="package" version="3.2.3">nd2reader</requirement>
+            -->
         </requirements>
     </xml>
 
     <xml name="version_cmd">
-        <version_command>echo @VERSION@</version_command>
+        <version_command>@CMD_BEGIN@ --help</version_command> 
     </xml>
     <xml name="citations">
         <citations>
+            <citation type="doi">10.1101/2021.04.02.438285</citation>
         </citations>
     </xml>
 
-    <token name="@VERSION@">3.1.1</token>
-    <token name="@CMD_BEGIN@">python ${__tool_directory__}/UnMicst.py</token>
+    <token name="@TOOL_VERSION@">2.7.1</token>
+    <token name="@VERSION_SUFFIX@">0</token>
+    <token name="@CMD_BEGIN@"><![CDATA[
+    UNMICST_PATH='' &&
+    if [ -f '/app/unmicstWrapper.py' ]; then
+        export UNMICST_PATH='python /app/unmicstWrapper.py';
+    else
+        export UNMICST_PATH='unmicstWrapper.py';
+    fi &&
+    \$UNMICST_PATH
+    ]]></token>
 </macros>
--- a/models/CytoplasmIncell/checkpoint	Fri Mar 12 00:17:29 2021 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2 +0,0 @@
-model_checkpoint_path: "D:\\Dan\\CytoplasmIncell\\model.ckpt"
-all_model_checkpoint_paths: "D:\\Dan\\CytoplasmIncell\\model.ckpt"
Binary file models/CytoplasmIncell/datasetMean.data has changed
Binary file models/CytoplasmIncell/datasetStDev.data has changed
Binary file models/CytoplasmIncell/hp.data has changed
Binary file models/CytoplasmIncell/model.ckpt.data-00000-of-00001 has changed
Binary file models/CytoplasmIncell/model.ckpt.index has changed
Binary file models/CytoplasmIncell/model.ckpt.meta has changed
--- a/models/CytoplasmIncell2/datasetMean.data	Fri Mar 12 00:17:29 2021 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-€G?±ë…¸Qì.
\ No newline at end of file
--- a/models/CytoplasmIncell2/datasetStDev.data	Fri Mar 12 00:17:29 2021 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-€G?±ë…¸Qì.
\ No newline at end of file
Binary file models/CytoplasmIncell2/hp.data has changed
Binary file models/CytoplasmIncell2/model.ckpt.data-00000-of-00001 has changed
Binary file models/CytoplasmIncell2/model.ckpt.index has changed
Binary file models/CytoplasmIncell2/model.ckpt.meta has changed
--- a/models/CytoplasmZeissNikon/checkpoint	Fri Mar 12 00:17:29 2021 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2 +0,0 @@
-model_checkpoint_path: "D:\\Dan\\CytoplasmZeissNikon\\model.ckpt"
-all_model_checkpoint_paths: "D:\\Dan\\CytoplasmZeissNikon\\model.ckpt"
Binary file models/CytoplasmZeissNikon/datasetMean.data has changed
Binary file models/CytoplasmZeissNikon/datasetStDev.data has changed
Binary file models/CytoplasmZeissNikon/hp.data has changed
Binary file models/CytoplasmZeissNikon/model.ckpt.data-00000-of-00001 has changed
Binary file models/CytoplasmZeissNikon/model.ckpt.index has changed
Binary file models/CytoplasmZeissNikon/model.ckpt.meta has changed
--- a/models/mousenucleiDAPI/checkpoint	Fri Mar 12 00:17:29 2021 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2 +0,0 @@
-model_checkpoint_path: "D:\\Olesja\\UNet\\nuclei20x2bin1chan 3layers ks3 bs16 20input\\model.ckpt"
-all_model_checkpoint_paths: "D:\\Olesja\\UNet\\nuclei20x2bin1chan 3layers ks3 bs16 20input\\model.ckpt"
Binary file models/mousenucleiDAPI/datasetMean.data has changed
Binary file models/mousenucleiDAPI/datasetStDev.data has changed
Binary file models/mousenucleiDAPI/hp.data has changed
Binary file models/mousenucleiDAPI/model.ckpt.data-00000-of-00001 has changed
Binary file models/mousenucleiDAPI/model.ckpt.index has changed
Binary file models/mousenucleiDAPI/model.ckpt.meta has changed
Binary file models/mousenucleiDAPI/nuclei20x2bin1chan.data-00000-of-00001 has changed
Binary file models/mousenucleiDAPI/nuclei20x2bin1chan.index has changed
Binary file models/mousenucleiDAPI/nuclei20x2bin1chan.meta has changed
--- a/models/nucleiDAPI/checkpoint	Fri Mar 12 00:17:29 2021 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2 +0,0 @@
-model_checkpoint_path: "D:\\LSP\\UNet\\tonsil20x1bin1chan\\TFModel\\model.ckpt"
-all_model_checkpoint_paths: "D:\\LSP\\UNet\\tonsil20x1bin1chan\\TFModel\\model.ckpt"
Binary file models/nucleiDAPI/datasetMean.data has changed
Binary file models/nucleiDAPI/datasetStDev.data has changed
Binary file models/nucleiDAPI/hp.data has changed
Binary file models/nucleiDAPI/model.ckpt.data-00000-of-00001 has changed
Binary file models/nucleiDAPI/model.ckpt.index has changed
Binary file models/nucleiDAPI/model.ckpt.meta has changed
--- a/models/nucleiDAPI1-5/checkpoint	Fri Mar 12 00:17:29 2021 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2 +0,0 @@
-model_checkpoint_path: "D:\\LSP\\UNet\\TuuliaLPTBdapiTFv2\\model.ckpt"
-all_model_checkpoint_paths: "D:\\LSP\\UNet\\TuuliaLPTBdapiTFv2\\model.ckpt"
--- a/models/nucleiDAPI1-5/datasetMean.data	Fri Mar 12 00:17:29 2021 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-€G?ÕÂ\(õÃ.
\ No newline at end of file
Binary file models/nucleiDAPI1-5/datasetStDev.data has changed
Binary file models/nucleiDAPI1-5/hp.data has changed
Binary file models/nucleiDAPI1-5/model.ckpt.index has changed
Binary file models/nucleiDAPI1-5/model.ckpt.meta has changed
--- a/models/nucleiDAPILAMIN/checkpoint	Fri Mar 12 00:17:29 2021 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2 +0,0 @@
-model_checkpoint_path: "/home/cy101/files/CellBiology/IDAC/Clarence/LSP/UNet models/LPTCdapilamin5-36/model.ckpt"
-all_model_checkpoint_paths: "/home/cy101/files/CellBiology/IDAC/Clarence/LSP/UNet models/LPTCdapilamin5-36/model.ckpt"
--- a/models/nucleiDAPILAMIN/datasetMean.data	Fri Mar 12 00:17:29 2021 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,3 +0,0 @@
-€G?Ç
-=p£×
-.
\ No newline at end of file
--- a/models/nucleiDAPILAMIN/datasetStDev.data	Fri Mar 12 00:17:29 2021 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-€G?ÅÂ\(õÃ.
\ No newline at end of file
Binary file models/nucleiDAPILAMIN/hp.data has changed
Binary file models/nucleiDAPILAMIN/model.ckpt.index has changed
Binary file models/nucleiDAPILAMIN/model.ckpt.meta has changed
Binary file test-data/105.tif has changed
Binary file test-data/105_ContoursPM_1.tif has changed
Binary file test-data/105_NucleiPM_1.tif has changed
--- a/toolbox/GPUselect.py	Fri Mar 12 00:17:29 2021 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,19 +0,0 @@
-import subprocess, re
-import numpy as np
-
-def pick_gpu_lowest_memory():
-    output = subprocess.Popen("nvidia-smi", stdout=subprocess.PIPE, shell=True).communicate()[0]
-    output=output.decode("ascii")
-    gpu_output = output[output.find("Memory-Usage"):]
-        # lines of the form
-        # |    0      8734    C   python                                       11705MiB |
-    memory_regex = re.compile(r"[|]\s+?\D+?.+[ ](?P<gpu_memory>\d+)MiB /")
-    rows = gpu_output.split("\n")
-    result=[]
-    for row in gpu_output.split("\n"):
-        m = memory_regex.search(row)
-        if not m:
-            continue
-        gpu_memory = int(m.group("gpu_memory"))
-        result.append(gpu_memory)
-    return np.argsort(result)[0]
\ No newline at end of file
--- a/toolbox/PartitionOfImage.py	Fri Mar 12 00:17:29 2021 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,305 +0,0 @@
-import numpy as np
-from toolbox.imtools import *
-# from toolbox.ftools import *
-# import sys
-
-class PI2D:
-    Image = None
-    PaddedImage = None
-    PatchSize = 128
-    Margin = 14
-    SubPatchSize = 100
-    PC = None # patch coordinates
-    NumPatches = 0
-    Output = None
-    Count = None
-    NR = None
-    NC = None
-    NRPI = None
-    NCPI = None
-    Mode = None
-    W = None
-
-    def setup(image,patchSize,margin,mode):
-        PI2D.Image = image
-        PI2D.PatchSize = patchSize
-        PI2D.Margin = margin
-        subPatchSize = patchSize-2*margin
-        PI2D.SubPatchSize = subPatchSize
-
-        W = np.ones((patchSize,patchSize))
-        W[[0,-1],:] = 0
-        W[:,[0,-1]] = 0
-        for i in range(1,2*margin):
-            v = i/(2*margin)
-            W[i,i:-i] = v
-            W[-i-1,i:-i] = v
-            W[i:-i,i] = v
-            W[i:-i,-i-1] = v
-        PI2D.W = W
-
-        if len(image.shape) == 2:
-            nr,nc = image.shape
-        elif len(image.shape) == 3: # multi-channel image
-            nz,nr,nc = image.shape
-
-        PI2D.NR = nr
-        PI2D.NC = nc
-
-        npr = int(np.ceil(nr/subPatchSize)) # number of patch rows
-        npc = int(np.ceil(nc/subPatchSize)) # number of patch cols
-
-        nrpi = npr*subPatchSize+2*margin # number of rows in padded image 
-        ncpi = npc*subPatchSize+2*margin # number of cols in padded image 
-
-        PI2D.NRPI = nrpi
-        PI2D.NCPI = ncpi
-
-        if len(image.shape) == 2:
-            PI2D.PaddedImage = np.zeros((nrpi,ncpi))
-            PI2D.PaddedImage[margin:margin+nr,margin:margin+nc] = image
-        elif len(image.shape) == 3:
-            PI2D.PaddedImage = np.zeros((nz,nrpi,ncpi))
-            PI2D.PaddedImage[:,margin:margin+nr,margin:margin+nc] = image
-
-        PI2D.PC = [] # patch coordinates [r0,r1,c0,c1]
-        for i in range(npr):
-            r0 = i*subPatchSize
-            r1 = r0+patchSize
-            for j in range(npc):
-                c0 = j*subPatchSize
-                c1 = c0+patchSize
-                PI2D.PC.append([r0,r1,c0,c1])
-
-        PI2D.NumPatches = len(PI2D.PC)
-        PI2D.Mode = mode # 'replace' or 'accumulate'
-
-    def getPatch(i):
-        r0,r1,c0,c1 = PI2D.PC[i]
-        if len(PI2D.PaddedImage.shape) == 2:
-            return PI2D.PaddedImage[r0:r1,c0:c1]
-        if len(PI2D.PaddedImage.shape) == 3:
-            return PI2D.PaddedImage[:,r0:r1,c0:c1]
-
-    def createOutput(nChannels):
-        if nChannels == 1:
-            PI2D.Output = np.zeros((PI2D.NRPI,PI2D.NCPI),np.float16)
-        else:
-            PI2D.Output = np.zeros((nChannels,PI2D.NRPI,PI2D.NCPI),np.float16)
-        if PI2D.Mode == 'accumulate':
-            PI2D.Count = np.zeros((PI2D.NRPI,PI2D.NCPI),np.float16)
-
-    def patchOutput(i,P):
-        r0,r1,c0,c1 = PI2D.PC[i]
-        if PI2D.Mode == 'accumulate':
-            PI2D.Count[r0:r1,c0:c1] += PI2D.W
-        if len(P.shape) == 2:
-            if PI2D.Mode == 'accumulate':
-                PI2D.Output[r0:r1,c0:c1] += np.multiply(P,PI2D.W)
-            elif PI2D.Mode == 'replace':
-                PI2D.Output[r0:r1,c0:c1] = P
-        elif len(P.shape) == 3:
-            if PI2D.Mode == 'accumulate':
-                for i in range(P.shape[0]):
-                    PI2D.Output[i,r0:r1,c0:c1] += np.multiply(P[i,:,:],PI2D.W)
-            elif PI2D.Mode == 'replace':
-                PI2D.Output[:,r0:r1,c0:c1] = P
-
-    def getValidOutput():
-        margin = PI2D.Margin
-        nr, nc = PI2D.NR, PI2D.NC
-        if PI2D.Mode == 'accumulate':
-            C = PI2D.Count[margin:margin+nr,margin:margin+nc]
-        if len(PI2D.Output.shape) == 2:
-            if PI2D.Mode == 'accumulate':
-                return np.divide(PI2D.Output[margin:margin+nr,margin:margin+nc],C)
-            if PI2D.Mode == 'replace':
-                return PI2D.Output[margin:margin+nr,margin:margin+nc]
-        if len(PI2D.Output.shape) == 3:
-            if PI2D.Mode == 'accumulate':
-                for i in range(PI2D.Output.shape[0]):
-                    PI2D.Output[i,margin:margin+nr,margin:margin+nc] = np.divide(PI2D.Output[i,margin:margin+nr,margin:margin+nc],C)
-            return PI2D.Output[:,margin:margin+nr,margin:margin+nc]
-
-
-    def demo():
-        I = np.random.rand(128,128)
-        # PI2D.setup(I,128,14)
-        PI2D.setup(I,64,4,'replace')
-
-        nChannels = 2
-        PI2D.createOutput(nChannels)
-
-        for i in range(PI2D.NumPatches):
-            P = PI2D.getPatch(i)
-            Q = np.zeros((nChannels,P.shape[0],P.shape[1]))
-            for j in range(nChannels):
-                Q[j,:,:] = P
-            PI2D.patchOutput(i,Q)
-
-        J = PI2D.getValidOutput()
-        J = J[0,:,:]
-
-        D = np.abs(I-J)
-        print(np.max(D))
-
-        K = cat(1,cat(1,I,J),D)
-        imshow(K)
-
-
-class PI3D:
-    Image = None
-    PaddedImage = None
-    PatchSize = 128
-    Margin = 14
-    SubPatchSize = 100
-    PC = None # patch coordinates
-    NumPatches = 0
-    Output = None
-    Count = None
-    NR = None # rows
-    NC = None # cols
-    NZ = None # planes
-    NRPI = None
-    NCPI = None
-    NZPI = None
-    Mode = None
-    W = None
-
-    def setup(image,patchSize,margin,mode):
-        PI3D.Image = image
-        PI3D.PatchSize = patchSize
-        PI3D.Margin = margin
-        subPatchSize = patchSize-2*margin
-        PI3D.SubPatchSize = subPatchSize
-
-        W = np.ones((patchSize,patchSize,patchSize))
-        W[[0,-1],:,:] = 0
-        W[:,[0,-1],:] = 0
-        W[:,:,[0,-1]] = 0
-        for i in range(1,2*margin):
-            v = i/(2*margin)
-            W[[i,-i-1],i:-i,i:-i] = v
-            W[i:-i,[i,-i-1],i:-i] = v
-            W[i:-i,i:-i,[i,-i-1]] = v
-
-        PI3D.W = W
-
-        if len(image.shape) == 3:
-            nz,nr,nc = image.shape
-        elif len(image.shape) == 4: # multi-channel image
-            nz,nw,nr,nc = image.shape
-
-        PI3D.NR = nr
-        PI3D.NC = nc
-        PI3D.NZ = nz
-
-        npr = int(np.ceil(nr/subPatchSize)) # number of patch rows
-        npc = int(np.ceil(nc/subPatchSize)) # number of patch cols
-        npz = int(np.ceil(nz/subPatchSize)) # number of patch planes
-
-        nrpi = npr*subPatchSize+2*margin # number of rows in padded image 
-        ncpi = npc*subPatchSize+2*margin # number of cols in padded image 
-        nzpi = npz*subPatchSize+2*margin # number of plns in padded image 
-
-        PI3D.NRPI = nrpi
-        PI3D.NCPI = ncpi
-        PI3D.NZPI = nzpi
-
-        if len(image.shape) == 3:
-            PI3D.PaddedImage = np.zeros((nzpi,nrpi,ncpi))
-            PI3D.PaddedImage[margin:margin+nz,margin:margin+nr,margin:margin+nc] = image
-        elif len(image.shape) == 4:
-            PI3D.PaddedImage = np.zeros((nzpi,nw,nrpi,ncpi))
-            PI3D.PaddedImage[margin:margin+nz,:,margin:margin+nr,margin:margin+nc] = image
-
-        PI3D.PC = [] # patch coordinates [z0,z1,r0,r1,c0,c1]
-        for iZ in range(npz):
-            z0 = iZ*subPatchSize
-            z1 = z0+patchSize
-            for i in range(npr):
-                r0 = i*subPatchSize
-                r1 = r0+patchSize
-                for j in range(npc):
-                    c0 = j*subPatchSize
-                    c1 = c0+patchSize
-                    PI3D.PC.append([z0,z1,r0,r1,c0,c1])
-
-        PI3D.NumPatches = len(PI3D.PC)
-        PI3D.Mode = mode # 'replace' or 'accumulate'
-
-    def getPatch(i):
-        z0,z1,r0,r1,c0,c1 = PI3D.PC[i]
-        if len(PI3D.PaddedImage.shape) == 3:
-            return PI3D.PaddedImage[z0:z1,r0:r1,c0:c1]
-        if len(PI3D.PaddedImage.shape) == 4:
-            return PI3D.PaddedImage[z0:z1,:,r0:r1,c0:c1]
-
-    def createOutput(nChannels):
-        if nChannels == 1:
-            PI3D.Output = np.zeros((PI3D.NZPI,PI3D.NRPI,PI3D.NCPI))
-        else:
-            PI3D.Output = np.zeros((PI3D.NZPI,nChannels,PI3D.NRPI,PI3D.NCPI))
-        if PI3D.Mode == 'accumulate':
-            PI3D.Count = np.zeros((PI3D.NZPI,PI3D.NRPI,PI3D.NCPI))
-
-    def patchOutput(i,P):
-        z0,z1,r0,r1,c0,c1 = PI3D.PC[i]
-        if PI3D.Mode == 'accumulate':
-            PI3D.Count[z0:z1,r0:r1,c0:c1] += PI3D.W
-        if len(P.shape) == 3:
-            if PI3D.Mode == 'accumulate':
-                PI3D.Output[z0:z1,r0:r1,c0:c1] += np.multiply(P,PI3D.W)
-            elif PI3D.Mode == 'replace':
-                PI3D.Output[z0:z1,r0:r1,c0:c1] = P
-        elif len(P.shape) == 4:
-            if PI3D.Mode == 'accumulate':
-                for i in range(P.shape[1]):
-                    PI3D.Output[z0:z1,i,r0:r1,c0:c1] += np.multiply(P[:,i,:,:],PI3D.W)
-            elif PI3D.Mode == 'replace':
-                PI3D.Output[z0:z1,:,r0:r1,c0:c1] = P
-
-    def getValidOutput():
-        margin = PI3D.Margin
-        nz, nr, nc = PI3D.NZ, PI3D.NR, PI3D.NC
-        if PI3D.Mode == 'accumulate':
-            C = PI3D.Count[margin:margin+nz,margin:margin+nr,margin:margin+nc]
-        if len(PI3D.Output.shape) == 3:
-            if PI3D.Mode == 'accumulate':
-                return np.divide(PI3D.Output[margin:margin+nz,margin:margin+nr,margin:margin+nc],C)
-            if PI3D.Mode == 'replace':
-                return PI3D.Output[margin:margin+nz,margin:margin+nr,margin:margin+nc]
-        if len(PI3D.Output.shape) == 4:
-            if PI3D.Mode == 'accumulate':
-                for i in range(PI3D.Output.shape[1]):
-                    PI3D.Output[margin:margin+nz,i,margin:margin+nr,margin:margin+nc] = np.divide(PI3D.Output[margin:margin+nz,i,margin:margin+nr,margin:margin+nc],C)
-            return PI3D.Output[margin:margin+nz,:,margin:margin+nr,margin:margin+nc]
-
-
-    def demo():
-        I = np.random.rand(128,128,128)
-        PI3D.setup(I,64,4,'accumulate')
-
-        nChannels = 2
-        PI3D.createOutput(nChannels)
-
-        for i in range(PI3D.NumPatches):
-            P = PI3D.getPatch(i)
-            Q = np.zeros((P.shape[0],nChannels,P.shape[1],P.shape[2]))
-            for j in range(nChannels):
-                Q[:,j,:,:] = P
-            PI3D.patchOutput(i,Q)
-
-        J = PI3D.getValidOutput()
-        J = J[:,0,:,:]
-
-        D = np.abs(I-J)
-        print(np.max(D))
-
-        pI = I[64,:,:]
-        pJ = J[64,:,:]
-        pD = D[64,:,:]
-
-        K = cat(1,cat(1,pI,pJ),pD)
-        imshow(K)
-
Binary file toolbox/__pycache__/GPUselect.cpython-37.pyc has changed
Binary file toolbox/__pycache__/PartitionOfImage.cpython-36.pyc has changed
Binary file toolbox/__pycache__/PartitionOfImage.cpython-37.pyc has changed
Binary file toolbox/__pycache__/__init__.cpython-36.pyc has changed
Binary file toolbox/__pycache__/ftools.cpython-36.pyc has changed
Binary file toolbox/__pycache__/ftools.cpython-37.pyc has changed
Binary file toolbox/__pycache__/imtools.cpython-36.pyc has changed
Binary file toolbox/__pycache__/imtools.cpython-37.pyc has changed
--- a/toolbox/ftools.py	Fri Mar 12 00:17:29 2021 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,55 +0,0 @@
-from os.path import *
-from os import listdir, makedirs, remove
-import pickle
-import shutil
-
-def fileparts(path): # path = file path
-    [p,f] = split(path)
-    [n,e] = splitext(f)
-    return [p,n,e]
-
-def listfiles(path,token): # path = folder path
-    l = []
-    for f in listdir(path):
-        fullPath = join(path,f)
-        if isfile(fullPath) and token in f:
-            l.append(fullPath)
-    l.sort()
-    return l
-
-def listsubdirs(path): # path = folder path
-    l = []
-    for f in listdir(path):
-        fullPath = join(path,f)
-        if isdir(fullPath):
-            l.append(fullPath)
-    l.sort()
-    return l
-
-def pathjoin(p,ne): # '/path/to/folder', 'name.extension' (or a subfolder)
-    return join(p,ne)
-
-def saveData(data,path):
-    print('saving data')
-    dataFile = open(path, 'wb')
-    pickle.dump(data, dataFile)
-
-def loadData(path):
-    print('loading data')
-    dataFile = open(path, 'rb')
-    return pickle.load(dataFile)
-
-def createFolderIfNonExistent(path):
-    if not exists(path): # from os.path
-        makedirs(path)
-
-def moveFile(fullPathSource,folderPathDestination):
-    [p,n,e] = fileparts(fullPathSource)
-    shutil.move(fullPathSource,pathjoin(folderPathDestination,n+e))
-
-def copyFile(fullPathSource,folderPathDestination):
-    [p,n,e] = fileparts(fullPathSource)
-    shutil.copy(fullPathSource,pathjoin(folderPathDestination,n+e))
-
-def removeFile(path):
-    remove(path)
\ No newline at end of file
--- a/toolbox/imtools.py	Fri Mar 12 00:17:29 2021 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,312 +0,0 @@
-import matplotlib.pyplot as plt
-import tifffile
-import os
-import numpy as np
-from skimage import io as skio
-from scipy.ndimage import *
-from skimage.morphology import *
-from skimage.transform import resize
-
-def tifread(path):
-    return tifffile.imread(path)
-
-def tifwrite(I,path):
-    tifffile.imsave(path, I)
-
-def imshow(I,**kwargs):
-    if not kwargs:
-        plt.imshow(I,cmap='gray')
-    else:
-        plt.imshow(I,**kwargs)
-        
-    plt.axis('off')
-    plt.show()
-
-def imshowlist(L,**kwargs):
-    n = len(L)
-    for i in range(n):
-        plt.subplot(1, n, i+1)
-        if not kwargs:
-            plt.imshow(L[i],cmap='gray')
-        else:
-            plt.imshow(L[i],**kwargs)
-        plt.axis('off')
-    plt.show()
-
-def imread(path):
-    return skio.imread(path)
-
-def imwrite(I,path):
-    skio.imsave(path,I)
-
-def im2double(I):
-    if I.dtype == 'uint16':
-        return I.astype('float64')/65535
-    elif I.dtype == 'uint8':
-        return I.astype('float64')/255
-    elif I.dtype == 'float32':
-        return I.astype('float64')
-    elif I.dtype == 'float64':
-        return I
-    else:
-        print('returned original image type: ', I.dtype)
-        return I
-
-def size(I):
-    return list(I.shape)
-
-def imresizeDouble(I,sizeOut): # input and output are double
-    return resize(I,(sizeOut[0],sizeOut[1]),mode='reflect')
-
-def imresize3Double(I,sizeOut): # input and output are double
-    return resize(I,(sizeOut[0],sizeOut[1],sizeOut[2]),mode='reflect')
-
-def imresizeUInt8(I,sizeOut): # input and output are UInt8
-    return np.uint8(resize(I.astype(float),(sizeOut[0],sizeOut[1]),mode='reflect',order=0))
-
-def imresize3UInt8(I,sizeOut): # input and output are UInt8
-    return np.uint8(resize(I.astype(float),(sizeOut[0],sizeOut[1],sizeOut[2]),mode='reflect',order=0))
-
-def normalize(I):
-    m = np.min(I)
-    M = np.max(I)
-    if M > m:
-        return (I-m)/(M-m)
-    else:
-        return I
-
-def snormalize(I):
-    m = np.mean(I)
-    s = np.std(I)
-    if s > 0:
-        return (I-m)/s
-    else:
-        return I
-
-def cat(a,I,J):
-    return np.concatenate((I,J),axis=a)
-
-def imerode(I,r):
-    return binary_erosion(I, disk(r))
-
-def imdilate(I,r):
-    return binary_dilation(I, disk(r))
-
-def imerode3(I,r):
-    return morphology.binary_erosion(I, ball(r))
-
-def imdilate3(I,r):
-    return morphology.binary_dilation(I, ball(r))
-
-def sphericalStructuralElement(imShape,fRadius):
-    if len(imShape) == 2:
-        return disk(fRadius,dtype=float)
-    if len(imShape) == 3:
-        return ball(fRadius,dtype=float)
-
-def medfilt(I,filterRadius):
-    return median_filter(I,footprint=sphericalStructuralElement(I.shape,filterRadius))
-
-def maxfilt(I,filterRadius):
-    return maximum_filter(I,footprint=sphericalStructuralElement(I.shape,filterRadius))
-
-def minfilt(I,filterRadius):
-    return minimum_filter(I,footprint=sphericalStructuralElement(I.shape,filterRadius))
-
-def ptlfilt(I,percentile,filterRadius):
-    return percentile_filter(I,percentile,footprint=sphericalStructuralElement(I.shape,filterRadius))
-
-def imgaussfilt(I,sigma,**kwargs):
-    return gaussian_filter(I,sigma,**kwargs)
-
-def imlogfilt(I,sigma,**kwargs):
-    return -gaussian_laplace(I,sigma,**kwargs)
-
-def imgradmag(I,sigma):
-    if len(I.shape) == 2:
-        dx = imgaussfilt(I,sigma,order=[0,1])
-        dy = imgaussfilt(I,sigma,order=[1,0])
-        return np.sqrt(dx**2+dy**2)
-    if len(I.shape) == 3:
-        dx = imgaussfilt(I,sigma,order=[0,0,1])
-        dy = imgaussfilt(I,sigma,order=[0,1,0])
-        dz = imgaussfilt(I,sigma,order=[1,0,0])
-        return np.sqrt(dx**2+dy**2+dz**2)
-
-def localstats(I,radius,justfeatnames=False):
-    ptls = [10,30,50,70,90]
-    featNames = []
-    for i in range(len(ptls)):
-        featNames.append('locPtl%d' % ptls[i])
-    if justfeatnames == True:
-        return featNames
-    sI = size(I)
-    nFeats = len(ptls)
-    F = np.zeros((sI[0],sI[1],nFeats))
-    for i in range(nFeats):
-        F[:,:,i] = ptlfilt(I,ptls[i],radius)
-    return F
-
-def localstats3(I,radius,justfeatnames=False):
-    ptls = [10,30,50,70,90]
-    featNames = []
-    for i in range(len(ptls)):
-        featNames.append('locPtl%d' % ptls[i])
-    if justfeatnames == True:
-        return featNames
-    sI = size(I)
-    nFeats = len(ptls)
-    F = np.zeros((sI[0],sI[1],sI[2],nFeats))
-    for i in range(nFeats):
-        F[:,:,:,i] = ptlfilt(I,ptls[i],radius)
-    return F
-
-def imderivatives(I,sigmas,justfeatnames=False):
-    if type(sigmas) is not list:
-        sigmas = [sigmas]
-    derivPerSigmaFeatNames = ['d0','dx','dy','dxx','dxy','dyy','normGrad','normHessDiag']
-    if justfeatnames == True:
-        featNames = [];
-        for i in range(len(sigmas)):
-            for j in range(len(derivPerSigmaFeatNames)):
-                featNames.append('derivSigma%d%s' % (sigmas[i],derivPerSigmaFeatNames[j]))
-        return featNames
-    nDerivativesPerSigma = len(derivPerSigmaFeatNames)
-    nDerivatives = len(sigmas)*nDerivativesPerSigma
-    sI = size(I)
-    D = np.zeros((sI[0],sI[1],nDerivatives))
-    for i in range(len(sigmas)):
-        sigma = sigmas[i]
-        dx = imgaussfilt(I,sigma,order=[0,1])
-        dy = imgaussfilt(I,sigma,order=[1,0])
-        dxx = imgaussfilt(I,sigma,order=[0,2])
-        dyy = imgaussfilt(I,sigma,order=[2,0])
-        D[:,:,nDerivativesPerSigma*i  ] = imgaussfilt(I,sigma)
-        D[:,:,nDerivativesPerSigma*i+1] = dx
-        D[:,:,nDerivativesPerSigma*i+2] = dy
-        D[:,:,nDerivativesPerSigma*i+3] = dxx
-        D[:,:,nDerivativesPerSigma*i+4] = imgaussfilt(I,sigma,order=[1,1])
-        D[:,:,nDerivativesPerSigma*i+5] = dyy
-        D[:,:,nDerivativesPerSigma*i+6] = np.sqrt(dx**2+dy**2)
-        D[:,:,nDerivativesPerSigma*i+7] = np.sqrt(dxx**2+dyy**2)
-    return D
-    # derivatives are indexed by the last dimension, which is good for ML features but not for visualization,
-    # in which case the expected dimensions are [plane,channel,y(row),x(col)]; to obtain that ordering, do
-    # D = np.moveaxis(D,[0,3,1,2],[0,1,2,3])
-
-def imderivatives3(I,sigmas,justfeatnames=False):
-    if type(sigmas) is not list:
-        sigmas = [sigmas]
-
-    derivPerSigmaFeatNames = ['d0','dx','dy','dz','dxx','dxy','dxz','dyy','dyz','dzz','normGrad','normHessDiag']
-
-    # derivPerSigmaFeatNames = ['d0','normGrad','normHessDiag']
-
-    if justfeatnames == True:
-        featNames = [];
-        for i in range(len(sigmas)):
-            for j in range(len(derivPerSigmaFeatNames)):
-                featNames.append('derivSigma%d%s' % (sigmas[i],derivPerSigmaFeatNames[j]))
-        return featNames
-    nDerivativesPerSigma = len(derivPerSigmaFeatNames)
-    nDerivatives = len(sigmas)*nDerivativesPerSigma
-    sI = size(I)
-    D = np.zeros((sI[0],sI[1],sI[2],nDerivatives)) # plane, channel, y, x
-    for i in range(len(sigmas)):
-        sigma = sigmas[i]
-        dx  = imgaussfilt(I,sigma,order=[0,0,1]) # z, y, x
-        dy  = imgaussfilt(I,sigma,order=[0,1,0])
-        dz  = imgaussfilt(I,sigma,order=[1,0,0])
-        dxx = imgaussfilt(I,sigma,order=[0,0,2])
-        dyy = imgaussfilt(I,sigma,order=[0,2,0])
-        dzz = imgaussfilt(I,sigma,order=[2,0,0])
-
-        D[:,:,:,nDerivativesPerSigma*i   ] = imgaussfilt(I,sigma)
-        D[:,:,:,nDerivativesPerSigma*i+1 ] = dx
-        D[:,:,:,nDerivativesPerSigma*i+2 ] = dy
-        D[:,:,:,nDerivativesPerSigma*i+3 ] = dz
-        D[:,:,:,nDerivativesPerSigma*i+4 ] = dxx
-        D[:,:,:,nDerivativesPerSigma*i+5 ] = imgaussfilt(I,sigma,order=[0,1,1])
-        D[:,:,:,nDerivativesPerSigma*i+6 ] = imgaussfilt(I,sigma,order=[1,0,1])
-        D[:,:,:,nDerivativesPerSigma*i+7 ] = dyy
-        D[:,:,:,nDerivativesPerSigma*i+8 ] = imgaussfilt(I,sigma,order=[1,1,0])
-        D[:,:,:,nDerivativesPerSigma*i+9 ] = dzz
-        D[:,:,:,nDerivativesPerSigma*i+10] = np.sqrt(dx**2+dy**2+dz**2)
-        D[:,:,:,nDerivativesPerSigma*i+11] = np.sqrt(dxx**2+dyy**2+dzz**2)
-
-        # D[:,:,:,nDerivativesPerSigma*i   ] = imgaussfilt(I,sigma)
-        # D[:,:,:,nDerivativesPerSigma*i+1 ] = np.sqrt(dx**2+dy**2+dz**2)
-        # D[:,:,:,nDerivativesPerSigma*i+2 ] = np.sqrt(dxx**2+dyy**2+dzz**2)
-    return D
-    # derivatives are indexed by the last dimension, which is good for ML features but not for visualization,
-    # in which case the expected dimensions are [plane,y(row),x(col)]; to obtain that ordering, do
-    # D = np.moveaxis(D,[2,0,1],[0,1,2])
-
-def imfeatures(I=[],sigmaDeriv=1,sigmaLoG=1,locStatsRad=0,justfeatnames=False):
-    if type(sigmaDeriv) is not list:
-        sigmaDeriv = [sigmaDeriv]
-    if type(sigmaLoG) is not list:
-        sigmaLoG = [sigmaLoG]
-    derivFeatNames = imderivatives([],sigmaDeriv,justfeatnames=True)
-    nLoGFeats = len(sigmaLoG)
-    locStatsFeatNames = []
-    if locStatsRad > 1:
-        locStatsFeatNames = localstats([],locStatsRad,justfeatnames=True)
-    nLocStatsFeats = len(locStatsFeatNames)
-    if justfeatnames == True:
-        featNames = derivFeatNames
-        for i in range(nLoGFeats):
-            featNames.append('logSigma%d' % sigmaLoG[i])
-        for i in range(nLocStatsFeats):
-            featNames.append(locStatsFeatNames[i])
-        return featNames
-    nDerivFeats = len(derivFeatNames)
-    nFeatures = nDerivFeats+nLoGFeats+nLocStatsFeats
-    sI = size(I)
-    F = np.zeros((sI[0],sI[1],nFeatures))
-    F[:,:,:nDerivFeats] = imderivatives(I,sigmaDeriv)
-    for i in range(nLoGFeats):
-        F[:,:,nDerivFeats+i] = imlogfilt(I,sigmaLoG[i])
-    if locStatsRad > 1:
-        F[:,:,nDerivFeats+nLoGFeats:] = localstats(I,locStatsRad)
-    return F
-
-def imfeatures3(I=[],sigmaDeriv=2,sigmaLoG=2,locStatsRad=0,justfeatnames=False):
-    if type(sigmaDeriv) is not list:
-        sigmaDeriv = [sigmaDeriv]
-    if type(sigmaLoG) is not list:
-        sigmaLoG = [sigmaLoG]
-    derivFeatNames = imderivatives3([],sigmaDeriv,justfeatnames=True)
-    nLoGFeats = len(sigmaLoG)
-    locStatsFeatNames = []
-    if locStatsRad > 1:
-        locStatsFeatNames = localstats3([],locStatsRad,justfeatnames=True)
-    nLocStatsFeats = len(locStatsFeatNames)
-    if justfeatnames == True:
-        featNames = derivFeatNames
-        for i in range(nLoGFeats):
-            featNames.append('logSigma%d' % sigmaLoG[i])
-        for i in range(nLocStatsFeats):
-            featNames.append(locStatsFeatNames[i])
-        return featNames
-    nDerivFeats = len(derivFeatNames)
-    nFeatures = nDerivFeats+nLoGFeats+nLocStatsFeats
-    sI = size(I)
-    F = np.zeros((sI[0],sI[1],sI[2],nFeatures))
-    F[:,:,:,:nDerivFeats] = imderivatives3(I,sigmaDeriv)
-    for i in range(nLoGFeats):
-        F[:,:,:,nDerivFeats+i] = imlogfilt(I,sigmaLoG[i])
-    if locStatsRad > 1:
-        F[:,:,:,nDerivFeats+nLoGFeats:] = localstats3(I,locStatsRad)
-    return F
-
-def stack2list(S):
-    L = []
-    for i in range(size(S)[2]):
-        L.append(S[:,:,i])
-    return L
-
-def thrsegment(I,wsBlr,wsThr): # basic threshold segmentation
-    G = imgaussfilt(I,sigma=(1-wsBlr)+wsBlr*5) # min 1, max 5
-    M = G > wsThr
-    return M
\ No newline at end of file
--- a/unmicst.xml	Fri Mar 12 00:17:29 2021 +0000
+++ b/unmicst.xml	Wed Sep 07 23:10:14 2022 +0000
@@ -1,53 +1,71 @@
-<tool id="unmicst" name="UnMicst" version="@VERSION@.1" profile="17.09">
-    <description>UNet Model for Identifying Cells and Segmenting Tissue</description>
+<tool id="unmicst" name="UnMicst" version="@TOOL_VERSION@+galaxy@VERSION_SUFFIX@" profile="19.01">
+    <description>Image segmentation - probability map generation</description>
+
     <macros>
         <import>macros.xml</import>
     </macros>
  
     <expand macro="requirements"/>
-    @VERSION_CMD@
+    <expand macro="version_cmd"/>
 
     <command detect_errors="exit_code"><![CDATA[
-    #set $typeCorrected = str($image.name).replace('.ome.tiff','').replace('.ome.tif','').replace('.tiff','').replace('.tif','')+'.ome.tif'
+    #set $ext = str($image.file_ext)
+    #if $ext == 'tiff'
+        #set $ext = 'tif'
+    #end if
+    #set $input = 'image.' + str($ext) 
 
-    ln -s $image '$typeCorrected';
+    ln -s '$image' '$input' &&
+    
+    @CMD_BEGIN@ '$input'
 
-    @CMD_BEGIN@ '$typeCorrected'
+    --tool $tool
     
     #if $stackoutput
-    --stackOutput
+        --stackOutput
     #end if
 
-    --outputPath `pwd`
+    --outputPath '.'
     --channel $channel
     --model $model
     --mean $mean
     --std $stdev
-    --scalingFactor $scalingfactor;
+    --scalingFactor $scalingfactor
+    --outlier $outlier &&
 
     ## Move files to different files for from_work_dir differentiation
     #if $stackoutput
-    mv *Probabilities*.tif Probabilities.tif;
-    mv *Preview*.tif Preview.tif
+        mv *Probabilities*.tif Probabilities.tif &&
+        mv qc/*Preview*.tif Preview.tif
     #else
-    mv *ContoursPM*.tif ContoursPM.tif;
-    mv *NucleiPM*.tif NucleiPM.tif
+        mv *ContoursPM*.tif ContoursPM.tif &&
+        mv *NucleiPM*.tif NucleiPM.tif 
     #end if
     ]]></command>
 
     <inputs>
+        <param name="tool" type="select" label="UnMicst Tool">
+            <option selected="true" value="unmicst-solo">unmicst-solo</option>
+            <option value="unmicst-duo">unmicst-duo</option>
+            <option value="unmicst-legacy">unmicst-legacy</option>
+            <option value="UnMicstCyto2">UnMicstCyto2</option>
+        </param>
         <param name="image" type="data" format="tiff" label="Registered TIFF"/>
         <param name="model" type="select" label="Model">
             <option value="nucleiDAPI">nucleiDAPI</option>
+            <option value="CytoplasmIncell">CytoplasmIncell</option>
+            <option value="CytoplasmIncell2">CytoplasmIncell2</option>
+            <option value="CytoplasmZeissNikon">CytoplasmZeissNikon</option>
             <option value="mousenucleiDAPI">mousenucleiDAPI</option>
-            <option value="CytoplasmIncell">CytoplasmIncell</option>
-            <option value="CytoplasmZeissNikon">CytoplasmZeissNikon</option>
+            <option value="nucleiDAPI1-5">nucleiDAPI1-5</option>
+            <option value="nucleiDAPILAMIN">nucleiDAPILAMIN</option>
         </param>
         <param name="mean" type="float" value="-1" label="Mean (-1 for model default)"/>
         <param name="stdev" type="float" value="-1" label="Standard Deviation (-1 for model default)"/>
         <param name="channel" type="integer" value="0" label="Channel to perform inference on"/>
         <param name="stackoutput" type="boolean"  label="Stack probability map outputs"/>
         <param name="scalingfactor" type="float" value="1.0" label="Factor to scale by"/>
+        <param name="outlier" type="float" value="-1.0" label="Map percentile intensity to max when rescaling intensity values"/>
     </inputs>
 
     <outputs>
@@ -64,41 +82,28 @@
             <filter>not stackoutput</filter>
         </data>
     </outputs>
+    <tests>
+        <test expect_num_outputs="2">
+            <param name="image" value="105.tif" ftype="tiff" />
+            <param name="model" value="nucleiDAPI" />
+            <param name="tool" value="unmicst-legacy"/>
+            <param name="channel" value="1"/>
+            <output name="nuclei" file="105_NucleiPM_1.tif" compare="sim_size" delta="10" />
+            <output name="contours" file="105_ContoursPM_1.tif" compare="sim_size" delta="10" />
+        </test>
+    </tests>
     <help><![CDATA[
-UnMicst - UNet Model for Identifying Cells and Segmenting Tissue
-Image Preprocessing
-Images can be preprocessed by inferring nuclei contours via a pretrained UNet model. The model is trained on 3 classes : background, nuclei contours and nuclei centers. The resulting probability maps can then be loaded into any modular segmentation pipeline that may use (but not limited to) a marker controlled watershed algorithm.
-
-The only input file is: an .ome.tif or .tif (preferably flat field corrected, minimal saturated pixels, and in focus. The model is trained on images acquired at 20x with binning 2x2 or a pixel size of 0.65 microns/px. If your settings differ, you can upsample/downsample to some extent.
-
-Running as a Docker container
-
-The docker image is distributed through Dockerhub and includes UnMicst with all of its dependencies. Parallel images with and without gpu support are available.
-
-docker pull labsyspharm/unmicst:latest
-docker pull labsyspharm/unmicst:latest-gpu
-Instatiate a container and mount the input directory containing your image.
-
-docker run -it --runtime=nvidia -v /path/to/data:/data labsyspharm/unmicst:latest-gpu bash
-When using the CPU-only image, --runtime=nvidia can be omitted:
+-------
+UNMICST
+-------
 
-docker run -it -v /path/to/data:/data labsyspharm/unmicst:latest bash
-UnMicst resides in the /app directory inside the container:
-
-root@0ea0cdc46c8f:/# python app/UnMicst.py /data/input/my.tif --outputPath /data/results
-Running in a Conda environment
-
-If Docker is not available on your system, you can run the tool locally by creating a Conda environment. Ensure conda is installed on your system, then clone the repo and use conda.yml to create the environment.
-
-git clone https://github.com/HMS-IDAC/UnMicst.git
-cd UnMicst
-conda env create -f conda.yml
-conda activate unmicst
-python UnMicst.py /path/to/input.tif --outputPath /path/to/results/directory
-References:
-S Saka, Y Wang, J Kishi, A Zhu, Y Zeng, W Xie, K Kirli, C Yapp, M Cicconet, BJ Beliveau, SW Lapan, S Yin, M Lin, E Boyde, PS Kaeser, G Pihan, GM Church, P Yin, Highly multiplexed in situ protein imaging with signal amplification by Immuno-SABER, Nat Biotechnology (accepted)
-
-OHSU Wrapper Repo: https://github.com/ohsu-comp-bio/UnMicst
+**UnMICST** uses a convolutional neural network to annotate each pixel with the probability that it belongs to a given subcellular component (nucleus, cytoplasm, cell boundary). Check the UnMICST website for the most up-to-date documentation.
+**Input**
+An .ome.tif, preferably flat field corrected. The model is trained on images acquired at a pixelsize of 0.65 microns/px. If your settings differ, you can upsample/downsample to some extent.
+**Output**
+1. a .tif stack where the different probability maps for each class are concatenated in the Z-axis in the order: nuclei foreground, nuclei contours, and background.
+2. a QC image with the DNA image concatenated with the nuclei contour probability map with suffix Preview
+More infortion available at: https://labsyspharm.github.io/UnMICST-info/.
     ]]></help>
     <expand macro="citations" />
 </tool>