水下图像评估指标 UCIQE UIQM PCQI (Python 代码)

水下图像评估指标 UCIQE UIQM PCQI (Python 代码)

UCIQE

论文:An Underwater Colour Image Quality Evaluation Metric
用色度、饱和度和对比度的线性组合来量化水下工程和监控图像的不均匀色偏、模糊和低对比度特征。
链接: https://github.com/JOU-UIP/UCIQE

"""
UCIQE
======================================
Trained coefficients are c1=0.4680, c2=0.2745, c3=0.2576.
UCIQE= c1*var_chr+c2*con_lum+c3*aver_sat
var_chr   is σc : the standard deviation of chroma
con_lum is conl: the contrast of luminance
aver_sat  is μs : the average of saturation
coe_metric=[c1, c2, c3]are weighted coefficients.
---------------------------------------------------------
When you want to use the uciqe function, you must give the values of two parameters,
one is the nargin value you calculated and the location and name format of the image
you want to calculate the uciqe value.The format of the function is UCIQE.uciqe(nargin,loc)
---------------------------------------------------------
The input image must be RGB image
======================================
"""

import cv2
import numpy as np


def uciqe(nargin,loc):
    img_bgr = cv2.imread(loc)        # Used to read image files
    img_lab = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2LAB)  # Transform to Lab color space

    if nargin == 1:                                 # According to training result mentioned in the paper:
        coe_metric = [0.4680, 0.2745, 0.2576]      # Obtained coefficients are: c1=0.4680, c2=0.2745, c3=0.2576.
    img_lum = img_lab[..., 0]/255
    img_a = img_lab[..., 1]/255
    img_b = img_lab[..., 2]/255

    img_chr = np.sqrt(np.square(img_a)+np.square(img_b))              # Chroma

    img_sat = img_chr/np.sqrt(np.square(img_chr)+np.square(img_lum))  # Saturation
    aver_sat = np.mean(img_sat)                                       # Average of saturation

    aver_chr = np.mean(img_chr)                                       # Average of Chroma

    var_chr = np.sqrt(np.mean(abs(1-np.square(aver_chr/img_chr))))    # Variance of Chroma

    dtype = img_lum.dtype                                             # Determine the type of img_lum
    if dtype == 'uint8':
        nbins = 256
    else:
        nbins = 65536

    hist, bins = np.histogram(img_lum, nbins)                        # Contrast of luminance
    cdf = np.cumsum(hist)/np.sum(hist)

    ilow = np.where(cdf > 0.0100)
    ihigh = np.where(cdf >= 0.9900)
    tol = [(ilow[0][0]-1)/(nbins-1), (ihigh[0][0]-1)/(nbins-1)]
    con_lum = tol[1]-tol[0]

    quality_val = coe_metric[0]*var_chr+coe_metric[1]*con_lum + coe_metric[2]*aver_sat         # get final quality value
    # print("quality_val is", quality_val)
    return quality_val

if __name__ == "__main__":
    d_1 = uciqe(1,"data/demo/2.jpg")
    d_2 = uciqe(1,"data/demo/res_2.jpg")
    print(d_1)
    print(d_2)

UIQM

论文:Human-Visual-System-Inspired UnderwaterImage Quality Measures
UIQM包括三个水下图像属性测量:水下图像色彩测量(UICM)、水下图像清晰度测量(UISM)和水下图像对比度测量(UIConM)。
链接: https://github.com/xueleichen/PSNR-SSIM-UCIQE-UIQM-Python
删去了该部分计算UCIQE的部分。

import numpy as np
import cv2 as cv
from skimage import io, color, filters
import math

def nmetrics(a):
    rgb = a
    lab = color.rgb2lab(a)
    gray = color.rgb2gray(a)

    # UIQM
    p1 = 0.0282
    p2 = 0.2953
    p3 = 3.5753

    #1st term UICM
    rg = rgb[:,:,0] - rgb[:,:,1]
    yb = (rgb[:,:,0] + rgb[:,:,1]) / 2 - rgb[:,:,2]
    rgl = np.sort(rg,axis=None)
    ybl = np.sort(yb,axis=None)
    al1 = 0.1
    al2 = 0.1
    T1 = np.int_(al1 * len(rgl))
    T2 = np.int_(al2 * len(rgl))
    rgl_tr = rgl[T1:-T2]
    ybl_tr = ybl[T1:-T2]

    urg = np.mean(rgl_tr)
    s2rg = np.mean((rgl_tr - urg) ** 2)
    uyb = np.mean(ybl_tr)
    s2yb = np.mean((ybl_tr- uyb) ** 2)

    uicm =-0.0268 * np.sqrt(urg**2 + uyb**2) + 0.1586 * np.sqrt(s2rg + s2yb)

    #2nd term UISM (k1k2=8x8)
    Rsobel = rgb[:,:,0] * filters.sobel(rgb[:,:,0])
    Gsobel = rgb[:,:,1] * filters.sobel(rgb[:,:,1])
    Bsobel = rgb[:,:,2] * filters.sobel(rgb[:,:,2])

    Rsobel=np.round(Rsobel).astype(np.uint8)
    Gsobel=np.round(Gsobel).astype(np.uint8)
    Bsobel=np.round(Bsobel).astype(np.uint8)

    Reme = eme(Rsobel)
    Geme = eme(Gsobel)
    Beme = eme(Bsobel)

    uism = 0.299 * Reme + 0.587 * Geme + 0.114 * Beme

    #3rd term UIConM
    uiconm = logamee(gray)

    uiqm = p1 * uicm + p2 * uism + p3 * uiconm

    return uiqm


def eme(ch, blocksize=8):
    num_x = math.ceil(ch.shape[0] / blocksize)
    num_y = math.ceil(ch.shape[1] / blocksize)

    eme = 0
    w = 2. / (num_x * num_y)
    for i in range(num_x):

        xlb = i * blocksize
        if i < num_x - 1:
            xrb = (i + 1) * blocksize
        else:
            xrb = ch.shape[0]

        for j in range(num_y):

            ylb = j * blocksize
            if j < num_y - 1:
                yrb = (j + 1) * blocksize
            else:
                yrb = ch.shape[1]

            block = ch[xlb:xrb, ylb:yrb]

            blockmin = np.float_(np.min(block))
            blockmax = np.float_(np.max(block))

            # # old version
            # if blockmin == 0.0: eme += 0
            # elif blockmax == 0.0: eme += 0
            # else: eme += w * math.log(blockmax / blockmin)

            # new version
            if blockmin == 0: blockmin += 1
            if blockmax == 0: blockmax += 1
            eme += w * math.log(blockmax / blockmin)
    return eme

def plipsum(i,j,gamma=1026):
    return i + j - i * j / gamma

def plipsub(i,j,k=1026):
    return k * (i - j) / (k - j)

def plipmult(c,j,gamma=1026):
    return gamma - gamma * (1 - j / gamma)**c

def logamee(ch, blocksize=8):
    num_x = math.ceil(ch.shape[0] / blocksize)
    num_y = math.ceil(ch.shape[1] / blocksize)

    s = 0
    w = 1. / (num_x * num_y)
    for i in range(num_x):

        xlb = i * blocksize
        if i < num_x - 1:
            xrb = (i + 1) * blocksize
        else:
            xrb = ch.shape[0]

        for j in range(num_y):

            ylb = j * blocksize
            if j < num_y - 1:
                yrb = (j + 1) * blocksize
            else:
                yrb = ch.shape[1]

            block = ch[xlb:xrb, ylb:yrb]
            blockmin = np.float_(np.min(block))
            blockmax = np.float_(np.max(block))

            top = plipsub(blockmax, blockmin)
            bottom = plipsum(blockmax, blockmin)

            m = top / bottom
            if m == 0.:
                s += 0
            else:
                s += (m) * np.log(m)

    return plipmult(w, s)

PCQI

论文:A Patch-Structure Representation Method for Quality
Assessment of Contrast Changed Images
每个patch中计算平均强度(mean intensity),信号强度(signal strength)和信号结构 (signal structure).并在这三个角度对图像的失真进行评价。。
链接: https://github.com/deepxzy/PCQI-python

import cv2
import numpy as np
import scipy.signal
def PCQI(img1, img2):


    window = np.multiply(cv2.getGaussianKernel(11, 1.5), (cv2.getGaussianKernel(11, 1.5)).T)


    L = 256


    window = window / np.sum(np.sum(window))

    mu1 = scipy.signal.correlate2d(img1, window, 'valid')
    mu2 = scipy.signal.correlate2d(img2, window, 'valid')
    mu1_sq = mu1 * mu1

    mu2_sq = mu2 * mu2
    mu1_mu2 = mu1 * mu2


    sigma1_sq = scipy.signal.correlate2d(img1 * img1, window, 'valid')- mu1_sq
    sigma2_sq = scipy.signal.correlate2d(img2 * img2, window, 'valid') - mu2_sq
    sigma12 = scipy.signal.correlate2d(img1 * img2, window, 'valid') - mu1_mu2

    sigma1_sq[sigma1_sq<0]=0
    sigma2_sq[sigma2_sq<0]=0


    C = 3

    pcqi_map = (4 / np.pi) * np.arctan((sigma12 + C) / (sigma1_sq + C))
    pcqi_map = pcqi_map * ((sigma12 + C) / (np.sqrt(sigma1_sq) * np.sqrt(sigma2_sq) + C))
    pcqi_map = pcqi_map * np.exp(-abs(mu1 - mu2) / L)

    mpcqi = np.mean(pcqi_map)

    return mpcqi, pcqi_map

if __name__ == '__main__':

    ref = np.array(cv2.imread("data/demo/2.jpg",0).tolist())
    raw = np.array(cv2.imread("data/demo/res_2.jpg",0).tolist())

    mpcqi, pcqi_map=PCQI(ref,raw)
    print(mpcqi)

版权声明:本文为博主作者:robonano原创文章,版权归属原作者,如果侵权,请联系我们删除!

原文链接:https://blog.csdn.net/robot123654/article/details/132122509

共计人评分,平均

到目前为止还没有投票!成为第一位评论此文章。

(0)
社会演员多的头像社会演员多普通用户
上一篇 2024年1月16日
下一篇 2024年1月16日

相关推荐