001 /**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements. See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership. The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License. You may obtain a copy of the License at
009 *
010 * http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018 package org.apache.hadoop.hdfs.server.datanode;
019
020 import java.io.File;
021 import java.io.FileInputStream;
022 import java.io.FileOutputStream;
023 import java.io.IOException;
024 import java.util.ArrayList;
025 import java.util.HashMap;
026 import java.util.List;
027 import java.util.Map;
028
029 import org.apache.hadoop.classification.InterfaceAudience;
030 import org.apache.hadoop.fs.FileUtil;
031 import org.apache.hadoop.fs.HardLink;
032 import org.apache.hadoop.hdfs.protocol.Block;
033 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
034 import org.apache.hadoop.io.IOUtils;
035
036 import com.google.common.annotations.VisibleForTesting;
037
038 /**
039 * This class is used by datanodes to maintain meta data of its replicas.
040 * It provides a general interface for meta information of a replica.
041 */
042 @InterfaceAudience.Private
043 abstract public class ReplicaInfo extends Block implements Replica {
044
045 /** volume where the replica belongs */
046 private FsVolumeSpi volume;
047
048 /** directory where block & meta files belong */
049
050 /**
051 * Base directory containing numerically-identified sub directories and
052 * possibly blocks.
053 */
054 private File baseDir;
055
056 /**
057 * Ints representing the sub directory path from base dir to the directory
058 * containing this replica.
059 */
060 private int[] subDirs;
061
062 private static final Map<String, File> internedBaseDirs = new HashMap<String, File>();
063
064 /**
065 * Constructor for a zero length replica
066 * @param blockId block id
067 * @param genStamp replica generation stamp
068 * @param vol volume where replica is located
069 * @param dir directory path where block and meta files are located
070 */
071 ReplicaInfo(long blockId, long genStamp, FsVolumeSpi vol, File dir) {
072 this( blockId, 0L, genStamp, vol, dir);
073 }
074
075 /**
076 * Constructor
077 * @param block a block
078 * @param vol volume where replica is located
079 * @param dir directory path where block and meta files are located
080 */
081 ReplicaInfo(Block block, FsVolumeSpi vol, File dir) {
082 this(block.getBlockId(), block.getNumBytes(),
083 block.getGenerationStamp(), vol, dir);
084 }
085
086 /**
087 * Constructor
088 * @param blockId block id
089 * @param len replica length
090 * @param genStamp replica generation stamp
091 * @param vol volume where replica is located
092 * @param dir directory path where block and meta files are located
093 */
094 ReplicaInfo(long blockId, long len, long genStamp,
095 FsVolumeSpi vol, File dir) {
096 super(blockId, len, genStamp);
097 this.volume = vol;
098 setDirInternal(dir);
099 }
100
101 /**
102 * Copy constructor.
103 * @param from
104 */
105 ReplicaInfo(ReplicaInfo from) {
106 this(from, from.getVolume(), from.getDir());
107 }
108
109 /**
110 * Get the full path of this replica's data file
111 * @return the full path of this replica's data file
112 */
113 public File getBlockFile() {
114 return new File(getDir(), getBlockName());
115 }
116
117 /**
118 * Get the full path of this replica's meta file
119 * @return the full path of this replica's meta file
120 */
121 public File getMetaFile() {
122 return new File(getDir(),
123 DatanodeUtil.getMetaName(getBlockName(), getGenerationStamp()));
124 }
125
126 /**
127 * Get the volume where this replica is located on disk
128 * @return the volume where this replica is located on disk
129 */
130 public FsVolumeSpi getVolume() {
131 return volume;
132 }
133
134 /**
135 * Set the volume where this replica is located on disk
136 */
137 void setVolume(FsVolumeSpi vol) {
138 this.volume = vol;
139 }
140
141 /**
142 * Return the parent directory path where this replica is located
143 * @return the parent directory path where this replica is located
144 */
145 File getDir() {
146 if (subDirs == null) {
147 return null;
148 }
149
150 StringBuilder sb = new StringBuilder();
151 for (int i : subDirs) {
152 sb.append(DataStorage.BLOCK_SUBDIR_PREFIX);
153 sb.append(i);
154 sb.append("/");
155 }
156 File ret = new File(baseDir, sb.toString());
157 return ret;
158 }
159
160 /**
161 * Set the parent directory where this replica is located
162 * @param dir the parent directory where the replica is located
163 */
164 public void setDir(File dir) {
165 setDirInternal(dir);
166 }
167
168 private void setDirInternal(File dir) {
169 if (dir == null) {
170 subDirs = null;
171 baseDir = null;
172 return;
173 }
174
175 ReplicaDirInfo replicaDirInfo = parseSubDirs(dir);
176 this.subDirs = replicaDirInfo.subDirs;
177
178 synchronized (internedBaseDirs) {
179 if (!internedBaseDirs.containsKey(replicaDirInfo.baseDirPath)) {
180 // Create a new String path of this file and make a brand new File object
181 // to guarantee we drop the reference to the underlying char[] storage.
182 File baseDir = new File(new String(replicaDirInfo.baseDirPath));
183 internedBaseDirs.put(replicaDirInfo.baseDirPath, baseDir);
184 }
185 this.baseDir = internedBaseDirs.get(replicaDirInfo.baseDirPath);
186 }
187 }
188
189 @VisibleForTesting
190 public static class ReplicaDirInfo {
191 @VisibleForTesting
192 public String baseDirPath;
193
194 @VisibleForTesting
195 public int[] subDirs;
196 }
197
198 @VisibleForTesting
199 public static ReplicaDirInfo parseSubDirs(File dir) {
200 ReplicaDirInfo ret = new ReplicaDirInfo();
201
202 File currentDir = dir;
203 List<Integer> subDirList = new ArrayList<Integer>();
204 while (currentDir.getName().startsWith(DataStorage.BLOCK_SUBDIR_PREFIX)) {
205 // Prepend the integer into the list.
206 subDirList.add(0, Integer.parseInt(currentDir.getName().replaceFirst(
207 DataStorage.BLOCK_SUBDIR_PREFIX, "")));
208 currentDir = currentDir.getParentFile();
209 }
210 ret.subDirs = new int[subDirList.size()];
211 for (int i = 0; i < subDirList.size(); i++) {
212 ret.subDirs[i] = subDirList.get(i);
213 }
214
215 ret.baseDirPath = currentDir.getAbsolutePath();
216
217 return ret;
218 }
219
220 /**
221 * check if this replica has already been unlinked.
222 * @return true if the replica has already been unlinked
223 * or no need to be detached; false otherwise
224 */
225 public boolean isUnlinked() {
226 return true; // no need to be unlinked
227 }
228
229 /**
230 * set that this replica is unlinked
231 */
232 public void setUnlinked() {
233 // no need to be unlinked
234 }
235
236 /**
237 * Copy specified file into a temporary file. Then rename the
238 * temporary file to the original name. This will cause any
239 * hardlinks to the original file to be removed. The temporary
240 * files are created in the same directory. The temporary files will
241 * be recovered (especially on Windows) on datanode restart.
242 */
243 private void unlinkFile(File file, Block b) throws IOException {
244 File tmpFile = DatanodeUtil.createTmpFile(b, DatanodeUtil.getUnlinkTmpFile(file));
245 try {
246 FileInputStream in = new FileInputStream(file);
247 try {
248 FileOutputStream out = new FileOutputStream(tmpFile);
249 try {
250 IOUtils.copyBytes(in, out, 16*1024);
251 } finally {
252 out.close();
253 }
254 } finally {
255 in.close();
256 }
257 if (file.length() != tmpFile.length()) {
258 throw new IOException("Copy of file " + file + " size " + file.length()+
259 " into file " + tmpFile +
260 " resulted in a size of " + tmpFile.length());
261 }
262 FileUtil.replaceFile(tmpFile, file);
263 } catch (IOException e) {
264 boolean done = tmpFile.delete();
265 if (!done) {
266 DataNode.LOG.info("detachFile failed to delete temporary file " +
267 tmpFile);
268 }
269 throw e;
270 }
271 }
272
273 /**
274 * Remove a hard link by copying the block to a temporary place and
275 * then moving it back
276 * @param numLinks number of hard links
277 * @return true if copy is successful;
278 * false if it is already detached or no need to be detached
279 * @throws IOException if there is any copy error
280 */
281 public boolean unlinkBlock(int numLinks) throws IOException {
282 if (isUnlinked()) {
283 return false;
284 }
285 File file = getBlockFile();
286 if (file == null || getVolume() == null) {
287 throw new IOException("detachBlock:Block not found. " + this);
288 }
289 File meta = getMetaFile();
290
291 if (HardLink.getLinkCount(file) > numLinks) {
292 DataNode.LOG.info("CopyOnWrite for block " + this);
293 unlinkFile(file, this);
294 }
295 if (HardLink.getLinkCount(meta) > numLinks) {
296 unlinkFile(meta, this);
297 }
298 setUnlinked();
299 return true;
300 }
301
302 /**
303 * Set this replica's generation stamp to be a newer one
304 * @param newGS new generation stamp
305 * @throws IOException is the new generation stamp is not greater than the current one
306 */
307 void setNewerGenerationStamp(long newGS) throws IOException {
308 long curGS = getGenerationStamp();
309 if (newGS <= curGS) {
310 throw new IOException("New generation stamp (" + newGS
311 + ") must be greater than current one (" + curGS + ")");
312 }
313 setGenerationStamp(newGS);
314 }
315
316 @Override //Object
317 public String toString() {
318 return getClass().getSimpleName()
319 + ", " + super.toString()
320 + ", " + getState()
321 + "\n getNumBytes() = " + getNumBytes()
322 + "\n getBytesOnDisk() = " + getBytesOnDisk()
323 + "\n getVisibleLength()= " + getVisibleLength()
324 + "\n getVolume() = " + getVolume()
325 + "\n getBlockFile() = " + getBlockFile();
326 }
327 }