Skip to content

Commit f3e5754

Browse files
bshashikantguangxuCheng
authored andcommitted
THADOOP-42 HDFS-15488. Add a command to list all snapshots for a snaphottable root with snapshot Ids. (apache#2166)
1 parent 680adfa commit f3e5754

File tree

28 files changed

+791
-6
lines changed

28 files changed

+791
-6
lines changed

hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
<Class name="org.apache.hadoop.hdfs.util.StripedBlockUtil$ChunkByteArray"/>
2222
<Class name="org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing$DiffReportListingEntry"/>
2323
<Class name="org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing"/>
24+
<Class name="org.apache.hadoop.hdfs.protocol.SnapshotStatus"/>
2425
</Or>
2526
<Bug pattern="EI_EXPOSE_REP,EI_EXPOSE_REP2" />
2627
</Match>

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -146,6 +146,7 @@
146146
import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
147147
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
148148
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
149+
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
149150
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
150151
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
151152
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
@@ -2212,6 +2213,24 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
22122213
}
22132214
}
22142215

2216+
/**
2217+
* Get listing of all the snapshots for a snapshottable directory.
2218+
*
2219+
* @return Information about all the snapshots for a snapshottable directory
2220+
* @throws IOException If an I/O error occurred
2221+
* @see ClientProtocol#getSnapshotListing(String)
2222+
*/
2223+
public SnapshotStatus[] getSnapshotListing(String snapshotRoot)
2224+
throws IOException {
2225+
checkOpen();
2226+
try (TraceScope ignored = tracer.newScope("getSnapshotListing")) {
2227+
return namenode.getSnapshotListing(snapshotRoot);
2228+
} catch (RemoteException re) {
2229+
throw re.unwrapRemoteException();
2230+
}
2231+
}
2232+
2233+
22152234
/**
22162235
* Allow snapshot on a directory.
22172236
*

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,7 @@ public enum OpType {
111111
SET_XATTR("op_set_xattr"),
112112
GET_SNAPSHOT_DIFF("op_get_snapshot_diff"),
113113
GET_SNAPSHOTTABLE_DIRECTORY_LIST("op_get_snapshottable_directory_list"),
114+
GET_SNAPSHOT_LIST("op_get_snapshot_list"),
114115
TRUNCATE(CommonStatisticNames.OP_TRUNCATE),
115116
UNSET_EC_POLICY("op_unset_ec_policy"),
116117
UNSET_STORAGE_POLICY("op_unset_storage_policy");

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@
112112
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing.DiffReportListingEntry;
113113
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
114114
import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
115+
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
115116
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
116117
import org.apache.hadoop.io.Text;
117118
import org.apache.hadoop.net.NetUtils;
@@ -2017,6 +2018,19 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
20172018
return dfs.getSnapshottableDirListing();
20182019
}
20192020

2021+
/**
2022+
* @return all the snapshots for a snapshottable directory
2023+
* @throws IOException
2024+
*/
2025+
public SnapshotStatus[] getSnapshotListing(Path snapshotRoot)
2026+
throws IOException {
2027+
Path absF = fixRelativePart(snapshotRoot);
2028+
statistics.incrementReadOps(1);
2029+
storageStatistics
2030+
.incrementOpCounter(OpType.GET_SNAPSHOT_LIST);
2031+
return dfs.getSnapshotListing(getPathName(absF));
2032+
}
2033+
20202034
@Override
20212035
public void deleteSnapshot(final Path snapshotDir, final String snapshotName)
20222036
throws IOException {

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -709,6 +709,18 @@ DirectoryListing getListing(String src, byte[] startAfter,
709709
SnapshottableDirectoryStatus[] getSnapshottableDirListing()
710710
throws IOException;
711711

712+
/**
713+
* Get listing of all the snapshots for a snapshottable directory.
714+
*
715+
* @return Information about all the snapshots for a snapshottable directory
716+
* @throws IOException If an I/O error occurred
717+
*/
718+
@Idempotent
719+
@ReadOnly(isCoordinated = true)
720+
SnapshotStatus[] getSnapshotListing(String snapshotRoot)
721+
throws IOException;
722+
723+
712724
///////////////////////////////////////
713725
// System issues and management
714726
///////////////////////////////////////
Lines changed: 226 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,226 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
* <p>
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
* <p>
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hdfs.protocol;
19+
20+
import java.io.PrintStream;
21+
import java.text.SimpleDateFormat;
22+
import java.util.Date;
23+
import java.util.EnumSet;
24+
25+
import org.apache.hadoop.fs.Path;
26+
import org.apache.hadoop.fs.permission.FsPermission;
27+
import org.apache.hadoop.hdfs.DFSUtilClient;
28+
29+
/**
30+
* Metadata about a snapshottable directory.
31+
*/
32+
public class SnapshotStatus {
33+
/**
34+
* Basic information of the snapshot directory.
35+
*/
36+
private final HdfsFileStatus dirStatus;
37+
38+
/**
39+
* Snapshot ID for the snapshot.
40+
*/
41+
private final int snapshotID;
42+
43+
/**
44+
* Full path of the parent.
45+
*/
46+
private byte[] parentFullPath;
47+
48+
public SnapshotStatus(long modificationTime, long accessTime,
49+
FsPermission permission,
50+
EnumSet<HdfsFileStatus.Flags> flags,
51+
String owner, String group, byte[] localName,
52+
long inodeId, int childrenNum, int snapshotID,
53+
byte[] parentFullPath) {
54+
this.dirStatus = new HdfsFileStatus.Builder()
55+
.isdir(true)
56+
.mtime(modificationTime)
57+
.atime(accessTime)
58+
.perm(permission)
59+
.flags(flags)
60+
.owner(owner)
61+
.group(group)
62+
.path(localName)
63+
.fileId(inodeId)
64+
.children(childrenNum)
65+
.build();
66+
this.snapshotID = snapshotID;
67+
this.parentFullPath = parentFullPath;
68+
}
69+
70+
public SnapshotStatus(HdfsFileStatus dirStatus,
71+
int snapshotNumber, byte[] parentFullPath) {
72+
this.dirStatus = dirStatus;
73+
this.snapshotID = snapshotNumber;
74+
this.parentFullPath = parentFullPath;
75+
}
76+
77+
/**
78+
* sets the prent path name.
79+
* @param path parent path
80+
*/
81+
public void setParentFullPath(byte[] path) {
82+
parentFullPath = path;
83+
}
84+
85+
/**
86+
* @return snapshot id for the snapshot
87+
*/
88+
public int getSnapshotID() {
89+
return snapshotID;
90+
}
91+
92+
/**
93+
* @return The basic information of the directory
94+
*/
95+
public HdfsFileStatus getDirStatus() {
96+
return dirStatus;
97+
}
98+
99+
/**
100+
* @return Full path of the file
101+
*/
102+
public byte[] getParentFullPath() {
103+
return parentFullPath;
104+
}
105+
106+
/**
107+
* @return Full path of the snapshot
108+
*/
109+
public Path getFullPath() {
110+
String parentFullPathStr =
111+
(parentFullPath == null || parentFullPath.length == 0) ?
112+
"/" : DFSUtilClient.bytes2String(parentFullPath);
113+
return new Path(getSnapshotPath(parentFullPathStr,
114+
dirStatus.getLocalName()));
115+
}
116+
117+
/**
118+
* Print a list of {@link SnapshotStatus} out to a given stream.
119+
*
120+
* @param stats The list of {@link SnapshotStatus}
121+
* @param out The given stream for printing.
122+
*/
123+
public static void print(SnapshotStatus[] stats,
124+
PrintStream out) {
125+
if (stats == null || stats.length == 0) {
126+
out.println();
127+
return;
128+
}
129+
int maxRepl = 0, maxLen = 0, maxOwner = 0, maxGroup = 0;
130+
int maxSnapshotID = 0;
131+
for (SnapshotStatus status : stats) {
132+
maxRepl = maxLength(maxRepl, status.dirStatus.getReplication());
133+
maxLen = maxLength(maxLen, status.dirStatus.getLen());
134+
maxOwner = maxLength(maxOwner, status.dirStatus.getOwner());
135+
maxGroup = maxLength(maxGroup, status.dirStatus.getGroup());
136+
maxSnapshotID = maxLength(maxSnapshotID, status.snapshotID);
137+
}
138+
139+
String lineFormat = "%s%s " // permission string
140+
+ "%" + maxRepl + "s "
141+
+ (maxOwner > 0 ? "%-" + maxOwner + "s " : "%s")
142+
+ (maxGroup > 0 ? "%-" + maxGroup + "s " : "%s")
143+
+ "%" + maxLen + "s "
144+
+ "%s " // mod time
145+
+ "%" + maxSnapshotID + "s "
146+
+ "%s"; // path
147+
SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm");
148+
149+
for (SnapshotStatus status : stats) {
150+
String line = String.format(lineFormat, "d",
151+
status.dirStatus.getPermission(),
152+
status.dirStatus.getReplication(),
153+
status.dirStatus.getOwner(),
154+
status.dirStatus.getGroup(),
155+
String.valueOf(status.dirStatus.getLen()),
156+
dateFormat.format(new Date(status.dirStatus.getModificationTime())),
157+
status.snapshotID,
158+
getSnapshotPath(DFSUtilClient.bytes2String(status.parentFullPath),
159+
status.dirStatus.getLocalName())
160+
);
161+
out.println(line);
162+
}
163+
}
164+
165+
private static int maxLength(int n, Object value) {
166+
return Math.max(n, String.valueOf(value).length());
167+
}
168+
169+
public static class Bean {
170+
private final String path;
171+
private final int snapshotID;
172+
private final long modificationTime;
173+
private final short permission;
174+
private final String owner;
175+
private final String group;
176+
177+
public Bean(String path, int snapshotID, long
178+
modificationTime, short permission, String owner, String group) {
179+
this.path = path;
180+
this.snapshotID = snapshotID;
181+
this.modificationTime = modificationTime;
182+
this.permission = permission;
183+
this.owner = owner;
184+
this.group = group;
185+
}
186+
187+
public String getPath() {
188+
return path;
189+
}
190+
191+
public int getSnapshotID() {
192+
return snapshotID;
193+
}
194+
195+
public long getModificationTime() {
196+
return modificationTime;
197+
}
198+
199+
public short getPermission() {
200+
return permission;
201+
}
202+
203+
public String getOwner() {
204+
return owner;
205+
}
206+
207+
public String getGroup() {
208+
return group;
209+
}
210+
}
211+
212+
static String getSnapshotPath(String snapshottableDir,
213+
String snapshotRelativePath) {
214+
String parentFullPathStr =
215+
snapshottableDir == null || snapshottableDir.isEmpty() ?
216+
"/" : snapshottableDir;
217+
final StringBuilder b = new StringBuilder(parentFullPathStr);
218+
if (b.charAt(b.length() - 1) != Path.SEPARATOR_CHAR) {
219+
b.append(Path.SEPARATOR);
220+
}
221+
return b.append(HdfsConstants.DOT_SNAPSHOT_DIR)
222+
.append(Path.SEPARATOR)
223+
.append(snapshotRelativePath)
224+
.toString();
225+
}
226+
}

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,7 @@
8383
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
8484
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
8585
import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
86+
import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
8687
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusRequestProto;
8788
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
8889
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.ModifyAclEntriesRequestProto;
@@ -1152,6 +1153,25 @@ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
11521153
}
11531154
}
11541155

1156+
@Override
1157+
public SnapshotStatus[] getSnapshotListing(String path)
1158+
throws IOException {
1159+
GetSnapshotListingRequestProto req =
1160+
GetSnapshotListingRequestProto.newBuilder()
1161+
.setSnapshotRoot(path).build();
1162+
try {
1163+
GetSnapshotListingResponseProto result = rpcProxy
1164+
.getSnapshotListing(null, req);
1165+
1166+
if (result.hasSnapshotList()) {
1167+
return PBHelperClient.convert(result.getSnapshotList());
1168+
}
1169+
return null;
1170+
} catch (ServiceException e) {
1171+
throw ProtobufHelper.getRemoteException(e);
1172+
}
1173+
}
1174+
11551175
@Override
11561176
public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot,
11571177
String fromSnapshot, String toSnapshot) throws IOException {

0 commit comments

Comments
 (0)