opencv实现盲水印

篇幅有限

完整内容及源码关注公众号:ReverseCode,发送

主要用于图片版权追溯,用户指纹识别等场景,保证用户版权以及作为防盗反爬关键性法律证据。

opencv

windows

解压opencv-3.4.2.tar.gz,项目运行是在VM options中加入库环境-Djava.library.path=D:\opencv3.4.2\opencv\build\java\x64;D:\opencv3.4.2\opencv\build\x64\vc14\bin

linux

配置docker容器

vim Dockerfile

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
FROM docker.io/centos:7
MAINTAINER OneJane
WORKDIR /usr
RUN mkdir /usr/local/java
ADD jdk-8u60-linux-x64.tar.gz /usr/local/java/
ENV JAVA_HOME /usr/local/java/jdk1.8.0_60
ENV JRE_HOME $JAVA_HOME/jre
ENV CLASSPATH $JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib:$CLASSPATH
ENV PATH $JAVA_HOME/bin:$PATH
RUN mkdir -p /usr/local/opencv_make/build
ADD opencv-3.4.2 /usr/local/opencv_make
RUN yum -y install gtk2 gimp-libs zlib libtiff libjpeg libpng gstreamer libavc1394 libraw1394 jasper-utils swig python libtool nasm
RUN cd /usr/local/opencv_make/build && cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local/opencv_make/build -DBUILD_TESTS=OFF ..
RUN cd /usr/local/opencv_make/build && make
RUN cp /usr/local/opencv_make/build/lib/libopencv_java342.so /usr/lib
ADD apache-tomcat-8.5.46 /usr/local/tomcat
RUN echo "tail -f /usr/local/tomcat/logs/catalina.out " >> /usr/local/tomcat/bin/catalina.sh
EXPOSE 8080
ENTRYPOINT ["/usr/local/tomcat/bin/catalina.sh"]
CMD ["start"]

打包上传到docker hub

1
2
3
4
5
6
docker build -t='motor-tomcat8-opencv3' .
docker login
docker tag 9d98e987fc07 onejane/motor-tomcat8-opencv3:latest
docker push onejane/motor-tomcat8-opencv3:latest
docker pull docker.io/onejane/motor-tomcat8-opencv3
docker run -di --name opencv -p 8080:8080 9d98e987fc07

盲水印

加入依赖

1
2
3
4
5
6
7
<dependencies>
<dependency>
<groupId>org.openpnp</groupId>
<artifactId>opencv</artifactId>
<version>3.4.2-1</version>
</dependency>
</dependencies>

加解盲水印

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
import org.opencv.core.Point;
import org.opencv.core.*;
import org.opencv.imgcodecs.Imgcodecs;

import javax.imageio.ImageIO;
import java.awt.*;
import java.awt.image.BufferedImage;
import java.awt.image.DataBufferByte;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;

import static org.opencv.core.Core.BORDER_CONSTANT;
import static org.opencv.core.Core.copyMakeBorder;
import static org.opencv.imgproc.Imgproc.putText;


public class DarkWatermarkUtil {
private static List<Mat> planes = new ArrayList<Mat>();
private static List<Mat> allPlanes = new ArrayList<Mat>();

public static Mat addImageWatermarkWithText(Mat image, String watermarkText) {
//优化图像的尺寸
//Mat padded = optimizeImageDim(image);
Mat complexImage = new Mat();
Mat padded = splitSrc(image);
padded.convertTo(padded, CvType.CV_32F);
planes.add(padded);
planes.add(Mat.zeros(padded.size(), CvType.CV_32F));
Core.merge(planes, complexImage);
// dft
Core.dft(complexImage, complexImage);
// 添加文本水印
Scalar scalar = new Scalar(0, 0, 0);
Point point = new Point(60, 60);

putText(complexImage, watermarkText, point, Core.FONT_HERSHEY_DUPLEX, 0.8D, scalar, 2);
Core.flip(complexImage, complexImage, -1);

putText(complexImage, watermarkText, point, Core.FONT_HERSHEY_DUPLEX, 0.8D, scalar, 2);
Core.flip(complexImage, complexImage, -1);

return antitransformImage(complexImage, allPlanes,padded);
}

public static Mat getImageWatermarkWithText(Mat image) {
List<Mat> planes = new ArrayList<Mat>();
Mat complexImage = new Mat();
Mat padded = splitSrc(image);
padded.convertTo(padded, CvType.CV_32F);
planes.add(padded);
planes.add(Mat.zeros(padded.size(), CvType.CV_32F));
Core.merge(planes, complexImage);
// dft
Core.dft(complexImage, complexImage);
Mat magnitude = createOptimizedMagnitude(complexImage);
planes.clear();
return magnitude;
}

private static Mat splitSrc(Mat mat) {
// mat = optimizeImageDim(mat);
Mat padded = new Mat();
Core.split(mat, allPlanes);
if (allPlanes.size() > 1) {
for (int i = 0; i < allPlanes.size(); i++) {
if (i == 0) {
padded = allPlanes.get(i);
break;
}
}
} else {
padded = mat;
}
return padded;
}

private static Mat antitransformImage(Mat complexImage, List<Mat> allPlanes,Mat padded) {
Mat invDFT = new Mat();
Core.idft(complexImage, invDFT, Core.DFT_SCALE | Core.DFT_REAL_OUTPUT, 0);
Mat restoredImage = new Mat();
invDFT.convertTo(restoredImage, CvType.CV_8U);
if (allPlanes.size() == 0) {
allPlanes.add(restoredImage);
} else {
allPlanes.set(0, restoredImage);
}
Mat lastImage = new Mat();
Core.merge(allPlanes, lastImage);
planes.clear();
allPlanes.clear();

complexImage.release();
invDFT.release();
restoredImage.release();
padded.release();
return lastImage;
}

private static Mat optimizeImageDim(Mat image) {
Mat padded = new Mat();
int addPixelRows = Core.getOptimalDFTSize(image.rows());
int addPixelCols = Core.getOptimalDFTSize(image.cols());
copyMakeBorder(image, padded, 0, addPixelRows - image.rows(), 0, addPixelCols - image.cols(),
BORDER_CONSTANT, Scalar.all(0));

return padded;
}

private static Mat createOptimizedMagnitude(Mat complexImage) {
List<Mat> newPlanes = new ArrayList<Mat>();
Mat mag = new Mat();
Core.split(complexImage, newPlanes);
Core.magnitude(newPlanes.get(0), newPlanes.get(1), mag);
Core.add(Mat.ones(mag.size(), CvType.CV_32F), mag, mag);
Core.log(mag, mag);
shiftDFT(mag);
mag.convertTo(mag, CvType.CV_8UC1);
Core.normalize(mag, mag, 0, 255, Core.NORM_MINMAX, CvType.CV_8UC1);
return mag;
}

private static void shiftDFT(Mat image) {
image = image.submat(new Rect(0, 0, image.cols() & -2, image.rows() & -2));
int cx = image.cols() / 2;
int cy = image.rows() / 2;

Mat q0 = new Mat(image, new Rect(0, 0, cx, cy));
Mat q1 = new Mat(image, new Rect(cx, 0, cx, cy));
Mat q2 = new Mat(image, new Rect(0, cy, cx, cy));
Mat q3 = new Mat(image, new Rect(cx, cy, cx, cy));
Mat tmp = new Mat();
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
q1.copyTo(tmp);
q2.copyTo(q1);
tmp.copyTo(q2);
}

public static BufferedImage Mat2BufImg(Mat matrix, String fileExtension) {
// convert the matrix into a matrix of bytes appropriate for
// this file extension
MatOfByte mob = new MatOfByte();
Imgcodecs.imencode(fileExtension, matrix, mob);
// convert the "matrix of bytes" into a byte array
byte[] byteArray = mob.toArray();
BufferedImage bufImage = null;
try {
InputStream in = new ByteArrayInputStream(byteArray);
bufImage = ImageIO.read(in);
} catch (Exception e) {
e.printStackTrace();
}
return bufImage;
}


public static Mat BufImg2Mat(BufferedImage original, int imgType, int matType) {
if (original == null) {
throw new IllegalArgumentException("original == null");
}
// System.loadLibrary("opencv_java342");
// System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
// System.load("D:\\opencv3.4.2\\opencv\\build\\java\\x64\\opencv_java342.dll");
//System.out.println(Core.NATIVE_LIBRARY_NAME);
// Don't convert if it already has correct type
if (original.getType() != imgType) {
// Create a buffered image
BufferedImage image = new BufferedImage(original.getWidth(), original.getHeight(), imgType);

// Draw the image onto the new buffer
Graphics2D g = image.createGraphics();
try {
g.setComposite(AlphaComposite.Src);
g.drawImage(original, 0, 0, null);
} finally {
g.dispose();
}
}

byte[] pixels = ((DataBufferByte) original.getRaster().getDataBuffer()).getData();
Mat mat = Mat.eye(original.getHeight(), original.getWidth(), matType);
mat.put(0, 0, pixels);
return mat;
}


static {
//加载opencv动态库
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
}

public static void main(String[] args) {
// 加盲水印
// Mat img = Imgcodecs.imread("/tmp/s.jpg");
// Mat watermarkImg = addImageWatermarkWithText(img,"onejane");
// Imgcodecs.imwrite("/tmp/s_encode.jpg", watermarkImg);

// 解盲水印
Mat img = Imgcodecs.imread("/tmp/s_encode.jpg");
Mat watermarkImg = getImageWatermarkWithText(img);
Imgcodecs.imwrite("s_decode.jpg", watermarkImg);

}
}

在maven中配置插件,将依赖打包入jar

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.1</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
<encoding>utf8</encoding>
</configuration>
</plugin>
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<archive>
<manifest>
<mainClass>com.allen.capturewebdata.Main</mainClass>
</manifest>
</archive>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
</configuration>
</plugin>

java -cp pirate_tools_aliyun_image-1.2.5-jar-with-dependencies.jar com.onejane.image.DarkWatermarkUtil 主动调用实现类,获取解开的盲水印

盲水印

阿里云

  1. 上传待添加水印的图片到OSS Bucket

  2. 根据添加的盲水印类型准备水印信息。

  • 图片类型:准备水印图片并上传水印图片到OSS Bucket。
  • 文字类型:准备水印文字内容。
  1. 调用PutProject接口创建项目,并设置Type为PhotoStarter,或者通过智能媒体管理控制台新建项目

创建项目

  1. 引入pom依赖
1
2
3
4
5
<dependency>
<groupId>com.aliyun</groupId>
<artifactId>imm20170906</artifactId>
<version>1.0.0</version>
</dependency>
  1. 实现加解盲水印
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
public static com.aliyun.imm20170906.Client createClient(String accessKeyId, String accessKeySecret) throws Exception {
Config config = new Config()
.setAccessKeyId(accessKeyId)
.setAccessKeySecret(accessKeySecret);
config.endpoint = "imm.cn-beijing.aliyuncs.com";
return new com.aliyun.imm20170906.Client(config);
}
public static void main(String ss[]) {
try {

com.aliyun.imm20170906.Client Imgclient = createClient(accessKeyId, accessKeySecret);

EncodeBlindWatermarkRequest encodeBlindWatermarkRequest = new EncodeBlindWatermarkRequest();
encodeBlindWatermarkRequest.setModel("DWT_IBG");
encodeBlindWatermarkRequest.setImageUri("oss://onejane-opencv/DSC02804.JPG");
// encodeBlindWatermarkRequest.setWatermarkUri("oss://onejane-opencv/water.png");
// encodeBlindWatermarkRequest.setTargetImageType("png");
encodeBlindWatermarkRequest.setContent("哈罗摩托");
encodeBlindWatermarkRequest.setProject("onejane");
encodeBlindWatermarkRequest.setTargetUri("oss://onejane-opencv/DSC02804_encode.JPG");
// encodeBlindWatermarkRequest.setTargetUri("oss://onejane-opencv/DSC02804_img_encode.JPG");
EncodeBlindWatermarkResponse encodeBlindWatermarkResponse = Imgclient.encodeBlindWatermark(encodeBlindWatermarkRequest);
System.out.println(JSON.toJSONString(encodeBlindWatermarkResponse));

DecodeBlindWatermarkRequest decodeBlindWatermarkRequest = new DecodeBlindWatermarkRequest();
decodeBlindWatermarkRequest.setImageQuality(90);
decodeBlindWatermarkRequest.setModel("DWT_IBG");
// decodeBlindWatermarkRequest.setImageUri("oss://onejane-opencv/DSC02804_img_encode.JPG");
decodeBlindWatermarkRequest.setImageUri("oss://onejane-opencv/DSC02804_encode.JPG");
decodeBlindWatermarkRequest.setOriginalImageUri("oss://onejane-opencv/DSC02804.JPG");
decodeBlindWatermarkRequest.setProject("onejane");
// decodeBlindWatermarkRequest.setTargetUri("oss://onejane-opencv/DSC02804_img_decode.JPG");
decodeBlindWatermarkRequest.setTargetUri("oss://onejane-opencv/DSC02804_decode.JPG");
DecodeBlindWatermarkResponse decodeBlindWatermarkResponse = Imgclient.decodeBlindWatermark(decodeBlindWatermarkRequest);
System.out.println(JSON.toJSONString(decodeBlindWatermarkResponse));
} catch (Exception e) {
e.printStackTrace();
}
}
文章作者: J
文章链接: http://onejane.github.io/2021/03/11/opencv实现盲水印/
版权声明: 本博客所有文章除特别声明外,均采用 CC BY-NC-SA 4.0 许可协议。转载请注明来自 万物皆可逆向
支付宝打赏
微信打赏