001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.fs;
019
020import java.io.FileNotFoundException;
021import java.io.IOException;
022import java.lang.reflect.Constructor;
023import java.lang.reflect.InvocationTargetException;
024import java.net.URI;
025import java.net.URISyntaxException;
026import java.util.ArrayList;
027import java.util.EnumSet;
028import java.util.HashMap;
029import java.util.List;
030import java.util.Map;
031import java.util.NoSuchElementException;
032import java.util.StringTokenizer;
033import java.util.concurrent.ConcurrentHashMap;
034
035import org.apache.commons.logging.Log;
036import org.apache.commons.logging.LogFactory;
037import org.apache.hadoop.HadoopIllegalArgumentException;
038import org.apache.hadoop.classification.InterfaceAudience;
039import org.apache.hadoop.classification.InterfaceStability;
040import org.apache.hadoop.conf.Configuration;
041import org.apache.hadoop.fs.FileSystem.Statistics;
042import org.apache.hadoop.fs.Options.ChecksumOpt;
043import org.apache.hadoop.fs.Options.CreateOpts;
044import org.apache.hadoop.fs.Options.Rename;
045import org.apache.hadoop.fs.permission.AclEntry;
046import org.apache.hadoop.fs.permission.AclStatus;
047import org.apache.hadoop.fs.permission.FsAction;
048import org.apache.hadoop.fs.permission.FsPermission;
049import org.apache.hadoop.security.AccessControlException;
050import org.apache.hadoop.security.SecurityUtil;
051import org.apache.hadoop.security.token.Token;
052import org.apache.hadoop.util.Progressable;
053
054import com.google.common.annotations.VisibleForTesting;
055
056/**
057 * This class provides an interface for implementors of a Hadoop file system
058 * (analogous to the VFS of Unix). Applications do not access this class;
059 * instead they access files across all file systems using {@link FileContext}.
060 * 
061 * Pathnames passed to AbstractFileSystem can be fully qualified URI that
062 * matches the "this" file system (ie same scheme and authority) 
063 * or a Slash-relative name that is assumed to be relative
064 * to the root of the "this" file system .
065 */
066@InterfaceAudience.Public
067@InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
068public abstract class AbstractFileSystem {
069  static final Log LOG = LogFactory.getLog(AbstractFileSystem.class);
070
071  /** Recording statistics per a file system class. */
072  private static final Map<URI, Statistics> 
073      STATISTICS_TABLE = new HashMap<URI, Statistics>();
074  
075  /** Cache of constructors for each file system class. */
076  private static final Map<Class<?>, Constructor<?>> CONSTRUCTOR_CACHE = 
077    new ConcurrentHashMap<Class<?>, Constructor<?>>();
078  
079  private static final Class<?>[] URI_CONFIG_ARGS = 
080    new Class[]{URI.class, Configuration.class};
081  
082  /** The statistics for this file system. */
083  protected Statistics statistics;
084
085  @VisibleForTesting
086  static final String NO_ABSTRACT_FS_ERROR = "No AbstractFileSystem configured for scheme";
087  
088  private final URI myUri;
089  
090  public Statistics getStatistics() {
091    return statistics;
092  }
093  
094  /**
095   * Returns true if the specified string is considered valid in the path part
096   * of a URI by this file system.  The default implementation enforces the rules
097   * of HDFS, but subclasses may override this method to implement specific
098   * validation rules for specific file systems.
099   * 
100   * @param src String source filename to check, path part of the URI
101   * @return boolean true if the specified string is considered valid
102   */
103  public boolean isValidName(String src) {
104    // Prohibit ".." "." and anything containing ":"
105    StringTokenizer tokens = new StringTokenizer(src, Path.SEPARATOR);
106    while(tokens.hasMoreTokens()) {
107      String element = tokens.nextToken();
108      if (element.equals("..") ||
109          element.equals(".")  ||
110          (element.indexOf(":") >= 0)) {
111        return false;
112      }
113    }
114    return true;
115  }
116  
117  /** 
118   * Create an object for the given class and initialize it from conf.
119   * @param theClass class of which an object is created
120   * @param conf Configuration
121   * @return a new object
122   */
123  @SuppressWarnings("unchecked")
124  static <T> T newInstance(Class<T> theClass,
125    URI uri, Configuration conf) {
126    T result;
127    try {
128      Constructor<T> meth = (Constructor<T>) CONSTRUCTOR_CACHE.get(theClass);
129      if (meth == null) {
130        meth = theClass.getDeclaredConstructor(URI_CONFIG_ARGS);
131        meth.setAccessible(true);
132        CONSTRUCTOR_CACHE.put(theClass, meth);
133      }
134      result = meth.newInstance(uri, conf);
135    } catch (InvocationTargetException e) {
136      Throwable cause = e.getCause();
137      if (cause instanceof RuntimeException) {
138        throw (RuntimeException) cause;
139      } else {
140        throw new RuntimeException(cause);
141      }
142    } catch (Exception e) {
143      throw new RuntimeException(e);
144    }
145    return result;
146  }
147  
148  /**
149   * Create a file system instance for the specified uri using the conf. The
150   * conf is used to find the class name that implements the file system. The
151   * conf is also passed to the file system for its configuration.
152   *
153   * @param uri URI of the file system
154   * @param conf Configuration for the file system
155   * 
156   * @return Returns the file system for the given URI
157   *
158   * @throws UnsupportedFileSystemException file system for <code>uri</code> is
159   *           not found
160   */
161  public static AbstractFileSystem createFileSystem(URI uri, Configuration conf)
162      throws UnsupportedFileSystemException {
163    final String fsImplConf = String.format("fs.AbstractFileSystem.%s.impl",
164        uri.getScheme());
165
166    Class<?> clazz = conf.getClass(fsImplConf, null);
167    if (clazz == null) {
168      throw new UnsupportedFileSystemException(String.format(
169          "%s=null: %s: %s",
170          fsImplConf, NO_ABSTRACT_FS_ERROR, uri.getScheme()));
171    }
172    return (AbstractFileSystem) newInstance(clazz, uri, conf);
173  }
174
175  /**
176   * Get the statistics for a particular file system.
177   * 
178   * @param uri
179   *          used as key to lookup STATISTICS_TABLE. Only scheme and authority
180   *          part of the uri are used.
181   * @return a statistics object
182   */
183  protected static synchronized Statistics getStatistics(URI uri) {
184    String scheme = uri.getScheme();
185    if (scheme == null) {
186      throw new IllegalArgumentException("Scheme not defined in the uri: "
187          + uri);
188    }
189    URI baseUri = getBaseUri(uri);
190    Statistics result = STATISTICS_TABLE.get(baseUri);
191    if (result == null) {
192      result = new Statistics(scheme);
193      STATISTICS_TABLE.put(baseUri, result);
194    }
195    return result;
196  }
197  
198  private static URI getBaseUri(URI uri) {
199    String scheme = uri.getScheme();
200    String authority = uri.getAuthority();
201    String baseUriString = scheme + "://";
202    if (authority != null) {
203      baseUriString = baseUriString + authority;
204    } else {
205      baseUriString = baseUriString + "/";
206    }
207    return URI.create(baseUriString);
208  }
209  
210  public static synchronized void clearStatistics() {
211    for(Statistics stat: STATISTICS_TABLE.values()) {
212      stat.reset();
213    }
214  }
215
216  /**
217   * Prints statistics for all file systems.
218   */
219  public static synchronized void printStatistics() {
220    for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) {
221      System.out.println("  FileSystem " + pair.getKey().getScheme() + "://"
222          + pair.getKey().getAuthority() + ": " + pair.getValue());
223    }
224  }
225  
226  protected static synchronized Map<URI, Statistics> getAllStatistics() {
227    Map<URI, Statistics> statsMap = new HashMap<URI, Statistics>(
228        STATISTICS_TABLE.size());
229    for (Map.Entry<URI, Statistics> pair : STATISTICS_TABLE.entrySet()) {
230      URI key = pair.getKey();
231      Statistics value = pair.getValue();
232      Statistics newStatsObj = new Statistics(value);
233      statsMap.put(URI.create(key.toString()), newStatsObj);
234    }
235    return statsMap;
236  }
237
238  /**
239   * The main factory method for creating a file system. Get a file system for
240   * the URI's scheme and authority. The scheme of the <code>uri</code>
241   * determines a configuration property name,
242   * <tt>fs.AbstractFileSystem.<i>scheme</i>.impl</tt> whose value names the
243   * AbstractFileSystem class.
244   * 
245   * The entire URI and conf is passed to the AbstractFileSystem factory method.
246   * 
247   * @param uri for the file system to be created.
248   * @param conf which is passed to the file system impl.
249   * 
250   * @return file system for the given URI.
251   * 
252   * @throws UnsupportedFileSystemException if the file system for
253   *           <code>uri</code> is not supported.
254   */
255  public static AbstractFileSystem get(final URI uri, final Configuration conf)
256      throws UnsupportedFileSystemException {
257    return createFileSystem(uri, conf);
258  }
259
260  /**
261   * Constructor to be called by subclasses.
262   * 
263   * @param uri for this file system.
264   * @param supportedScheme the scheme supported by the implementor
265   * @param authorityNeeded if true then theURI must have authority, if false
266   *          then the URI must have null authority.
267   *
268   * @throws URISyntaxException <code>uri</code> has syntax error
269   */
270  public AbstractFileSystem(final URI uri, final String supportedScheme,
271      final boolean authorityNeeded, final int defaultPort)
272      throws URISyntaxException {
273    myUri = getUri(uri, supportedScheme, authorityNeeded, defaultPort);
274    statistics = getStatistics(uri); 
275  }
276  
277  /**
278   * Check that the Uri's scheme matches
279   * @param uri
280   * @param supportedScheme
281   */
282  public void checkScheme(URI uri, String supportedScheme) {
283    String scheme = uri.getScheme();
284    if (scheme == null) {
285      throw new HadoopIllegalArgumentException("Uri without scheme: " + uri);
286    }
287    if (!scheme.equals(supportedScheme)) {
288      throw new HadoopIllegalArgumentException("Uri scheme " + uri
289          + " does not match the scheme " + supportedScheme);
290    }
291  }
292
293  /**
294   * Get the URI for the file system based on the given URI. The path, query
295   * part of the given URI is stripped out and default file system port is used
296   * to form the URI.
297   * 
298   * @param uri FileSystem URI.
299   * @param authorityNeeded if true authority cannot be null in the URI. If
300   *          false authority must be null.
301   * @param defaultPort default port to use if port is not specified in the URI.
302   * 
303   * @return URI of the file system
304   * 
305   * @throws URISyntaxException <code>uri</code> has syntax error
306   */
307  private URI getUri(URI uri, String supportedScheme,
308      boolean authorityNeeded, int defaultPort) throws URISyntaxException {
309    checkScheme(uri, supportedScheme);
310    // A file system implementation that requires authority must always
311    // specify default port
312    if (defaultPort < 0 && authorityNeeded) {
313      throw new HadoopIllegalArgumentException(
314          "FileSystem implementation error -  default port " + defaultPort
315              + " is not valid");
316    }
317    String authority = uri.getAuthority();
318    if (authority == null) {
319       if (authorityNeeded) {
320         throw new HadoopIllegalArgumentException("Uri without authority: " + uri);
321       } else {
322         return new URI(supportedScheme + ":///");
323       }   
324    }
325    // authority is non null  - AuthorityNeeded may be true or false.
326    int port = uri.getPort();
327    port = (port == -1 ? defaultPort : port);
328    if (port == -1) { // no port supplied and default port is not specified
329      return new URI(supportedScheme, authority, "/", null);
330    }
331    return new URI(supportedScheme + "://" + uri.getHost() + ":" + port);
332  }
333  
334  /**
335   * The default port of this file system.
336   * 
337   * @return default port of this file system's Uri scheme
338   *         A uri with a port of -1 => default port;
339   */
340  public abstract int getUriDefaultPort();
341
342  /**
343   * Returns a URI whose scheme and authority identify this FileSystem.
344   * 
345   * @return the uri of this file system.
346   */
347  public URI getUri() {
348    return myUri;
349  }
350  
351  /**
352   * Check that a Path belongs to this FileSystem.
353   * 
354   * If the path is fully qualified URI, then its scheme and authority
355   * matches that of this file system. Otherwise the path must be 
356   * slash-relative name.
357   * 
358   * @throws InvalidPathException if the path is invalid
359   */
360  public void checkPath(Path path) {
361    URI uri = path.toUri();
362    String thatScheme = uri.getScheme();
363    String thatAuthority = uri.getAuthority();
364    if (thatScheme == null) {
365      if (thatAuthority == null) {
366        if (path.isUriPathAbsolute()) {
367          return;
368        }
369        throw new InvalidPathException("relative paths not allowed:" + 
370            path);
371      } else {
372        throw new InvalidPathException(
373            "Path without scheme with non-null authority:" + path);
374      }
375    }
376    String thisScheme = this.getUri().getScheme();
377    String thisHost = this.getUri().getHost();
378    String thatHost = uri.getHost();
379    
380    // Schemes and hosts must match.
381    // Allow for null Authority for file:///
382    if (!thisScheme.equalsIgnoreCase(thatScheme) ||
383       (thisHost != null && 
384            !thisHost.equalsIgnoreCase(thatHost)) ||
385       (thisHost == null && thatHost != null)) {
386      throw new InvalidPathException("Wrong FS: " + path + ", expected: "
387          + this.getUri());
388    }
389    
390    // Ports must match, unless this FS instance is using the default port, in
391    // which case the port may be omitted from the given URI
392    int thisPort = this.getUri().getPort();
393    int thatPort = uri.getPort();
394    if (thatPort == -1) { // -1 => defaultPort of Uri scheme
395      thatPort = this.getUriDefaultPort();
396    }
397    if (thisPort != thatPort) {
398      throw new InvalidPathException("Wrong FS: " + path + ", expected: "
399          + this.getUri());
400    }
401  }
402  
403  /**
404   * Get the path-part of a pathname. Checks that URI matches this file system
405   * and that the path-part is a valid name.
406   * 
407   * @param p path
408   * 
409   * @return path-part of the Path p
410   */
411  public String getUriPath(final Path p) {
412    checkPath(p);
413    String s = p.toUri().getPath();
414    if (!isValidName(s)) {
415      throw new InvalidPathException("Path part " + s + " from URI " + p
416          + " is not a valid filename.");
417    }
418    return s;
419  }
420  
421  /**
422   * Make the path fully qualified to this file system
423   * @param path
424   * @return the qualified path
425   */
426  public Path makeQualified(Path path) {
427    checkPath(path);
428    return path.makeQualified(this.getUri(), null);
429  }
430  
431  /**
432   * Some file systems like LocalFileSystem have an initial workingDir
433   * that is used as the starting workingDir. For other file systems
434   * like HDFS there is no built in notion of an initial workingDir.
435   * 
436   * @return the initial workingDir if the file system has such a notion
437   *         otherwise return a null.
438   */
439  public Path getInitialWorkingDirectory() {
440    return null;
441  }
442  
443  /** 
444   * Return the current user's home directory in this file system.
445   * The default implementation returns "/user/$USER/".
446   * 
447   * @return current user's home directory.
448   */
449  public Path getHomeDirectory() {
450    return new Path("/user/"+System.getProperty("user.name")).makeQualified(
451                                                                getUri(), null);
452  }
453  
454  /**
455   * Return a set of server default configuration values.
456   * 
457   * @return server default configuration values
458   * 
459   * @throws IOException an I/O error occurred
460   */
461  public abstract FsServerDefaults getServerDefaults() throws IOException; 
462
463  /**
464   * Return the fully-qualified path of path f resolving the path
465   * through any internal symlinks or mount point
466   * @param p path to be resolved
467   * @return fully qualified path 
468   * @throws FileNotFoundException, AccessControlException, IOException
469   *         UnresolvedLinkException if symbolic link on path cannot be resolved
470   *          internally
471   */
472   public Path resolvePath(final Path p) throws FileNotFoundException,
473           UnresolvedLinkException, AccessControlException, IOException {
474     checkPath(p);
475     return getFileStatus(p).getPath(); // default impl is to return the path
476   }
477  
478  /**
479   * The specification of this method matches that of
480   * {@link FileContext#create(Path, EnumSet, Options.CreateOpts...)} except
481   * that the Path f must be fully qualified and the permission is absolute
482   * (i.e. umask has been applied).
483   */
484  public final FSDataOutputStream create(final Path f,
485      final EnumSet<CreateFlag> createFlag, Options.CreateOpts... opts)
486      throws AccessControlException, FileAlreadyExistsException,
487      FileNotFoundException, ParentNotDirectoryException,
488      UnsupportedFileSystemException, UnresolvedLinkException, IOException {
489    checkPath(f);
490    int bufferSize = -1;
491    short replication = -1;
492    long blockSize = -1;
493    int bytesPerChecksum = -1;
494    ChecksumOpt checksumOpt = null;
495    FsPermission permission = null;
496    Progressable progress = null;
497    Boolean createParent = null;
498 
499    for (CreateOpts iOpt : opts) {
500      if (CreateOpts.BlockSize.class.isInstance(iOpt)) {
501        if (blockSize != -1) {
502          throw new HadoopIllegalArgumentException(
503              "BlockSize option is set multiple times");
504        }
505        blockSize = ((CreateOpts.BlockSize) iOpt).getValue();
506      } else if (CreateOpts.BufferSize.class.isInstance(iOpt)) {
507        if (bufferSize != -1) {
508          throw new HadoopIllegalArgumentException(
509              "BufferSize option is set multiple times");
510        }
511        bufferSize = ((CreateOpts.BufferSize) iOpt).getValue();
512      } else if (CreateOpts.ReplicationFactor.class.isInstance(iOpt)) {
513        if (replication != -1) {
514          throw new HadoopIllegalArgumentException(
515              "ReplicationFactor option is set multiple times");
516        }
517        replication = ((CreateOpts.ReplicationFactor) iOpt).getValue();
518      } else if (CreateOpts.BytesPerChecksum.class.isInstance(iOpt)) {
519        if (bytesPerChecksum != -1) {
520          throw new HadoopIllegalArgumentException(
521              "BytesPerChecksum option is set multiple times");
522        }
523        bytesPerChecksum = ((CreateOpts.BytesPerChecksum) iOpt).getValue();
524      } else if (CreateOpts.ChecksumParam.class.isInstance(iOpt)) {
525        if (checksumOpt != null) {
526          throw new  HadoopIllegalArgumentException(
527              "CreateChecksumType option is set multiple times");
528        }
529        checksumOpt = ((CreateOpts.ChecksumParam) iOpt).getValue();
530      } else if (CreateOpts.Perms.class.isInstance(iOpt)) {
531        if (permission != null) {
532          throw new HadoopIllegalArgumentException(
533              "Perms option is set multiple times");
534        }
535        permission = ((CreateOpts.Perms) iOpt).getValue();
536      } else if (CreateOpts.Progress.class.isInstance(iOpt)) {
537        if (progress != null) {
538          throw new HadoopIllegalArgumentException(
539              "Progress option is set multiple times");
540        }
541        progress = ((CreateOpts.Progress) iOpt).getValue();
542      } else if (CreateOpts.CreateParent.class.isInstance(iOpt)) {
543        if (createParent != null) {
544          throw new HadoopIllegalArgumentException(
545              "CreateParent option is set multiple times");
546        }
547        createParent = ((CreateOpts.CreateParent) iOpt).getValue();
548      } else {
549        throw new HadoopIllegalArgumentException("Unkown CreateOpts of type " +
550            iOpt.getClass().getName());
551      }
552    }
553    if (permission == null) {
554      throw new HadoopIllegalArgumentException("no permission supplied");
555    }
556
557
558    FsServerDefaults ssDef = getServerDefaults();
559    if (ssDef.getBlockSize() % ssDef.getBytesPerChecksum() != 0) {
560      throw new IOException("Internal error: default blockSize is" + 
561          " not a multiple of default bytesPerChecksum ");
562    }
563    
564    if (blockSize == -1) {
565      blockSize = ssDef.getBlockSize();
566    }
567
568    // Create a checksum option honoring user input as much as possible.
569    // If bytesPerChecksum is specified, it will override the one set in
570    // checksumOpt. Any missing value will be filled in using the default.
571    ChecksumOpt defaultOpt = new ChecksumOpt(
572        ssDef.getChecksumType(),
573        ssDef.getBytesPerChecksum());
574    checksumOpt = ChecksumOpt.processChecksumOpt(defaultOpt,
575        checksumOpt, bytesPerChecksum);
576
577    if (bufferSize == -1) {
578      bufferSize = ssDef.getFileBufferSize();
579    }
580    if (replication == -1) {
581      replication = ssDef.getReplication();
582    }
583    if (createParent == null) {
584      createParent = false;
585    }
586
587    if (blockSize % bytesPerChecksum != 0) {
588      throw new HadoopIllegalArgumentException(
589             "blockSize should be a multiple of checksumsize");
590    }
591
592    return this.createInternal(f, createFlag, permission, bufferSize,
593      replication, blockSize, progress, checksumOpt, createParent);
594  }
595
596  /**
597   * The specification of this method matches that of
598   * {@link #create(Path, EnumSet, Options.CreateOpts...)} except that the opts
599   * have been declared explicitly.
600   */
601  public abstract FSDataOutputStream createInternal(Path f,
602      EnumSet<CreateFlag> flag, FsPermission absolutePermission,
603      int bufferSize, short replication, long blockSize, Progressable progress,
604      ChecksumOpt checksumOpt, boolean createParent)
605      throws AccessControlException, FileAlreadyExistsException,
606      FileNotFoundException, ParentNotDirectoryException,
607      UnsupportedFileSystemException, UnresolvedLinkException, IOException;
608
609  /**
610   * The specification of this method matches that of
611   * {@link FileContext#mkdir(Path, FsPermission, boolean)} except that the Path
612   * f must be fully qualified and the permission is absolute (i.e. 
613   * umask has been applied).
614   */
615  public abstract void mkdir(final Path dir, final FsPermission permission,
616      final boolean createParent) throws AccessControlException,
617      FileAlreadyExistsException, FileNotFoundException,
618      UnresolvedLinkException, IOException;
619
620  /**
621   * The specification of this method matches that of
622   * {@link FileContext#delete(Path, boolean)} except that Path f must be for
623   * this file system.
624   */
625  public abstract boolean delete(final Path f, final boolean recursive)
626      throws AccessControlException, FileNotFoundException,
627      UnresolvedLinkException, IOException;
628
629  /**
630   * The specification of this method matches that of
631   * {@link FileContext#open(Path)} except that Path f must be for this
632   * file system.
633   */
634  public FSDataInputStream open(final Path f) throws AccessControlException,
635      FileNotFoundException, UnresolvedLinkException, IOException {
636    return open(f, getServerDefaults().getFileBufferSize());
637  }
638
639  /**
640   * The specification of this method matches that of
641   * {@link FileContext#open(Path, int)} except that Path f must be for this
642   * file system.
643   */
644  public abstract FSDataInputStream open(final Path f, int bufferSize)
645      throws AccessControlException, FileNotFoundException,
646      UnresolvedLinkException, IOException;
647
648  /**
649   * The specification of this method matches that of
650   * {@link FileContext#setReplication(Path, short)} except that Path f must be
651   * for this file system.
652   */
653  public abstract boolean setReplication(final Path f,
654      final short replication) throws AccessControlException,
655      FileNotFoundException, UnresolvedLinkException, IOException;
656
657  /**
658   * The specification of this method matches that of
659   * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
660   * f must be for this file system.
661   */
662  public final void rename(final Path src, final Path dst,
663      final Options.Rename... options) throws AccessControlException,
664      FileAlreadyExistsException, FileNotFoundException,
665      ParentNotDirectoryException, UnresolvedLinkException, IOException {
666    boolean overwrite = false;
667    if (null != options) {
668      for (Rename option : options) {
669        if (option == Rename.OVERWRITE) {
670          overwrite = true;
671        }
672      }
673    }
674    renameInternal(src, dst, overwrite);
675  }
676  
677  /**
678   * The specification of this method matches that of
679   * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
680   * f must be for this file system and NO OVERWRITE is performed.
681   * 
682   * File systems that do not have a built in overwrite need implement only this
683   * method and can take advantage of the default impl of the other
684   * {@link #renameInternal(Path, Path, boolean)}
685   */
686  public abstract void renameInternal(final Path src, final Path dst)
687      throws AccessControlException, FileAlreadyExistsException,
688      FileNotFoundException, ParentNotDirectoryException,
689      UnresolvedLinkException, IOException;
690  
691  /**
692   * The specification of this method matches that of
693   * {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
694   * f must be for this file system.
695   */
696  public void renameInternal(final Path src, final Path dst,
697      boolean overwrite) throws AccessControlException,
698      FileAlreadyExistsException, FileNotFoundException,
699      ParentNotDirectoryException, UnresolvedLinkException, IOException {
700    // Default implementation deals with overwrite in a non-atomic way
701    final FileStatus srcStatus = getFileLinkStatus(src);
702
703    FileStatus dstStatus;
704    try {
705      dstStatus = getFileLinkStatus(dst);
706    } catch (IOException e) {
707      dstStatus = null;
708    }
709    if (dstStatus != null) {
710      if (dst.equals(src)) {
711        throw new FileAlreadyExistsException(
712            "The source "+src+" and destination "+dst+" are the same");
713      }
714      if (srcStatus.isSymlink() && dst.equals(srcStatus.getSymlink())) {
715        throw new FileAlreadyExistsException(
716            "Cannot rename symlink "+src+" to its target "+dst);
717      }
718      // It's OK to rename a file to a symlink and vice versa
719      if (srcStatus.isDirectory() != dstStatus.isDirectory()) {
720        throw new IOException("Source " + src + " and destination " + dst
721            + " must both be directories");
722      }
723      if (!overwrite) {
724        throw new FileAlreadyExistsException("Rename destination " + dst
725            + " already exists.");
726      }
727      // Delete the destination that is a file or an empty directory
728      if (dstStatus.isDirectory()) {
729        RemoteIterator<FileStatus> list = listStatusIterator(dst);
730        if (list != null && list.hasNext()) {
731          throw new IOException(
732              "Rename cannot overwrite non empty destination directory " + dst);
733        }
734      }
735      delete(dst, false);
736    } else {
737      final Path parent = dst.getParent();
738      final FileStatus parentStatus = getFileStatus(parent);
739      if (parentStatus.isFile()) {
740        throw new ParentNotDirectoryException("Rename destination parent "
741            + parent + " is a file.");
742      }
743    }
744    renameInternal(src, dst);
745  }
746  
747  /**
748   * Returns true if the file system supports symlinks, false otherwise.
749   * @return true if filesystem supports symlinks
750   */
751  public boolean supportsSymlinks() {
752    return false;
753  }
754  
755  /**
756   * The specification of this method matches that of  
757   * {@link FileContext#createSymlink(Path, Path, boolean)};
758   */
759  public void createSymlink(final Path target, final Path link,
760      final boolean createParent) throws IOException, UnresolvedLinkException {
761    throw new IOException("File system does not support symlinks");    
762  }
763
764  /**
765   * Partially resolves the path. This is used during symlink resolution in
766   * {@link FSLinkResolver}, and differs from the similarly named method
767   * {@link FileContext#getLinkTarget(Path)}.
768   * @throws IOException subclass implementations may throw IOException 
769   */
770  public Path getLinkTarget(final Path f) throws IOException {
771    throw new AssertionError("Implementation Error: " + getClass()
772        + " that threw an UnresolvedLinkException, causing this method to be"
773        + " called, needs to override this method.");
774  }
775    
776  /**
777   * The specification of this method matches that of
778   * {@link FileContext#setPermission(Path, FsPermission)} except that Path f
779   * must be for this file system.
780   */
781  public abstract void setPermission(final Path f,
782      final FsPermission permission) throws AccessControlException,
783      FileNotFoundException, UnresolvedLinkException, IOException;
784
785  /**
786   * The specification of this method matches that of
787   * {@link FileContext#setOwner(Path, String, String)} except that Path f must
788   * be for this file system.
789   */
790  public abstract void setOwner(final Path f, final String username,
791      final String groupname) throws AccessControlException,
792      FileNotFoundException, UnresolvedLinkException, IOException;
793
794  /**
795   * The specification of this method matches that of
796   * {@link FileContext#setTimes(Path, long, long)} except that Path f must be
797   * for this file system.
798   */
799  public abstract void setTimes(final Path f, final long mtime,
800    final long atime) throws AccessControlException, FileNotFoundException,
801      UnresolvedLinkException, IOException;
802
803  /**
804   * The specification of this method matches that of
805   * {@link FileContext#getFileChecksum(Path)} except that Path f must be for
806   * this file system.
807   */
808  public abstract FileChecksum getFileChecksum(final Path f)
809      throws AccessControlException, FileNotFoundException,
810      UnresolvedLinkException, IOException;
811  
812  /**
813   * The specification of this method matches that of
814   * {@link FileContext#getFileStatus(Path)} 
815   * except that an UnresolvedLinkException may be thrown if a symlink is 
816   * encountered in the path.
817   */
818  public abstract FileStatus getFileStatus(final Path f)
819      throws AccessControlException, FileNotFoundException,
820      UnresolvedLinkException, IOException;
821
822  /**
823   * The specification of this method matches that of
824   * {@link FileContext#access(Path, FsAction)}
825   * except that an UnresolvedLinkException may be thrown if a symlink is
826   * encountered in the path.
827   */
828  @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
829  public void access(Path path, FsAction mode) throws AccessControlException,
830      FileNotFoundException, UnresolvedLinkException, IOException {
831    FileSystem.checkAccessPermissions(this.getFileStatus(path), mode);
832  }
833
834  /**
835   * The specification of this method matches that of
836   * {@link FileContext#getFileLinkStatus(Path)}
837   * except that an UnresolvedLinkException may be thrown if a symlink is  
838   * encountered in the path leading up to the final path component.
839   * If the file system does not support symlinks then the behavior is
840   * equivalent to {@link AbstractFileSystem#getFileStatus(Path)}.
841   */
842  public FileStatus getFileLinkStatus(final Path f)
843      throws AccessControlException, FileNotFoundException,
844      UnsupportedFileSystemException, IOException {
845    return getFileStatus(f);
846  }
847
848  /**
849   * The specification of this method matches that of
850   * {@link FileContext#getFileBlockLocations(Path, long, long)} except that
851   * Path f must be for this file system.
852   */
853  public abstract BlockLocation[] getFileBlockLocations(final Path f,
854      final long start, final long len) throws AccessControlException,
855      FileNotFoundException, UnresolvedLinkException, IOException;
856
857  /**
858   * The specification of this method matches that of
859   * {@link FileContext#getFsStatus(Path)} except that Path f must be for this
860   * file system.
861   */
862  public FsStatus getFsStatus(final Path f) throws AccessControlException,
863      FileNotFoundException, UnresolvedLinkException, IOException {
864    // default impl gets FsStatus of root
865    return getFsStatus();
866  }
867  
868  /**
869   * The specification of this method matches that of
870   * {@link FileContext#getFsStatus(Path)}.
871   */
872  public abstract FsStatus getFsStatus() throws AccessControlException,
873      FileNotFoundException, IOException;
874
875  /**
876   * The specification of this method matches that of
877   * {@link FileContext#listStatus(Path)} except that Path f must be for this
878   * file system.
879   */
880  public RemoteIterator<FileStatus> listStatusIterator(final Path f)
881      throws AccessControlException, FileNotFoundException,
882      UnresolvedLinkException, IOException {
883    return new RemoteIterator<FileStatus>() {
884      private int i = 0;
885      private FileStatus[] statusList = listStatus(f);
886      
887      @Override
888      public boolean hasNext() {
889        return i < statusList.length;
890      }
891      
892      @Override
893      public FileStatus next() {
894        if (!hasNext()) {
895          throw new NoSuchElementException();
896        }
897        return statusList[i++];
898      }
899    };
900  }
901
902  /**
903   * The specification of this method matches that of
904   * {@link FileContext#listLocatedStatus(Path)} except that Path f 
905   * must be for this file system.
906   */
907  public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f)
908      throws AccessControlException, FileNotFoundException,
909      UnresolvedLinkException, IOException {
910    return new RemoteIterator<LocatedFileStatus>() {
911      private RemoteIterator<FileStatus> itor = listStatusIterator(f);
912      
913      @Override
914      public boolean hasNext() throws IOException {
915        return itor.hasNext();
916      }
917      
918      @Override
919      public LocatedFileStatus next() throws IOException {
920        if (!hasNext()) {
921          throw new NoSuchElementException("No more entry in " + f);
922        }
923        FileStatus result = itor.next();
924        BlockLocation[] locs = null;
925        if (result.isFile()) {
926          locs = getFileBlockLocations(
927              result.getPath(), 0, result.getLen());
928        }
929        return new LocatedFileStatus(result, locs);
930      }
931    };
932  }
933
934  /**
935   * The specification of this method matches that of
936   * {@link FileContext.Util#listStatus(Path)} except that Path f must be 
937   * for this file system.
938   */
939  public abstract FileStatus[] listStatus(final Path f)
940      throws AccessControlException, FileNotFoundException,
941      UnresolvedLinkException, IOException;
942
943  /**
944   * @return an iterator over the corrupt files under the given path
945   * (may contain duplicates if a file has more than one corrupt block)
946   * @throws IOException
947   */
948  public RemoteIterator<Path> listCorruptFileBlocks(Path path)
949    throws IOException {
950    throw new UnsupportedOperationException(getClass().getCanonicalName() +
951                                            " does not support" +
952                                            " listCorruptFileBlocks");
953  }
954
955  /**
956   * The specification of this method matches that of
957   * {@link FileContext#setVerifyChecksum(boolean, Path)} except that Path f
958   * must be for this file system.
959   */
960  public abstract void setVerifyChecksum(final boolean verifyChecksum)
961      throws AccessControlException, IOException;
962  
963  /**
964   * Get a canonical name for this file system.
965   * @return a URI string that uniquely identifies this file system
966   */
967  public String getCanonicalServiceName() {
968    return SecurityUtil.buildDTServiceName(getUri(), getUriDefaultPort());
969  }
970  
971  /**
972   * Get one or more delegation tokens associated with the filesystem. Normally
973   * a file system returns a single delegation token. A file system that manages
974   * multiple file systems underneath, could return set of delegation tokens for
975   * all the file systems it manages
976   * 
977   * @param renewer the account name that is allowed to renew the token.
978   * @return List of delegation tokens.
979   *   If delegation tokens not supported then return a list of size zero.
980   * @throws IOException
981   */
982  @InterfaceAudience.LimitedPrivate( { "HDFS", "MapReduce" })
983  public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
984    return new ArrayList<Token<?>>(0);
985  }
986
987  /**
988   * Modifies ACL entries of files and directories.  This method can add new ACL
989   * entries or modify the permissions on existing ACL entries.  All existing
990   * ACL entries that are not specified in this call are retained without
991   * changes.  (Modifications are merged into the current ACL.)
992   *
993   * @param path Path to modify
994   * @param aclSpec List<AclEntry> describing modifications
995   * @throws IOException if an ACL could not be modified
996   */
997  public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
998      throws IOException {
999    throw new UnsupportedOperationException(getClass().getSimpleName()
1000        + " doesn't support modifyAclEntries");
1001  }
1002
1003  /**
1004   * Removes ACL entries from files and directories.  Other ACL entries are
1005   * retained.
1006   *
1007   * @param path Path to modify
1008   * @param aclSpec List<AclEntry> describing entries to remove
1009   * @throws IOException if an ACL could not be modified
1010   */
1011  public void removeAclEntries(Path path, List<AclEntry> aclSpec)
1012      throws IOException {
1013    throw new UnsupportedOperationException(getClass().getSimpleName()
1014        + " doesn't support removeAclEntries");
1015  }
1016
1017  /**
1018   * Removes all default ACL entries from files and directories.
1019   *
1020   * @param path Path to modify
1021   * @throws IOException if an ACL could not be modified
1022   */
1023  public void removeDefaultAcl(Path path)
1024      throws IOException {
1025    throw new UnsupportedOperationException(getClass().getSimpleName()
1026        + " doesn't support removeDefaultAcl");
1027  }
1028
1029  /**
1030   * Removes all but the base ACL entries of files and directories.  The entries
1031   * for user, group, and others are retained for compatibility with permission
1032   * bits.
1033   *
1034   * @param path Path to modify
1035   * @throws IOException if an ACL could not be removed
1036   */
1037  public void removeAcl(Path path)
1038      throws IOException {
1039    throw new UnsupportedOperationException(getClass().getSimpleName()
1040        + " doesn't support removeAcl");
1041  }
1042
1043  /**
1044   * Fully replaces ACL of files and directories, discarding all existing
1045   * entries.
1046   *
1047   * @param path Path to modify
1048   * @param aclSpec List<AclEntry> describing modifications, must include entries
1049   *   for user, group, and others for compatibility with permission bits.
1050   * @throws IOException if an ACL could not be modified
1051   */
1052  public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
1053    throw new UnsupportedOperationException(getClass().getSimpleName()
1054        + " doesn't support setAcl");
1055  }
1056
1057  /**
1058   * Gets the ACLs of files and directories.
1059   *
1060   * @param path Path to get
1061   * @return RemoteIterator<AclStatus> which returns each AclStatus
1062   * @throws IOException if an ACL could not be read
1063   */
1064  public AclStatus getAclStatus(Path path) throws IOException {
1065    throw new UnsupportedOperationException(getClass().getSimpleName()
1066        + " doesn't support getAclStatus");
1067  }
1068
1069  /**
1070   * Set an xattr of a file or directory.
1071   * The name must be prefixed with the namespace followed by ".". For example,
1072   * "user.attr".
1073   * <p/>
1074   * Refer to the HDFS extended attributes user documentation for details.
1075   *
1076   * @param path Path to modify
1077   * @param name xattr name.
1078   * @param value xattr value.
1079   * @throws IOException
1080   */
1081  public void setXAttr(Path path, String name, byte[] value)
1082      throws IOException {
1083    setXAttr(path, name, value, EnumSet.of(XAttrSetFlag.CREATE,
1084        XAttrSetFlag.REPLACE));
1085  }
1086
1087  /**
1088   * Set an xattr of a file or directory.
1089   * The name must be prefixed with the namespace followed by ".". For example,
1090   * "user.attr".
1091   * <p/>
1092   * Refer to the HDFS extended attributes user documentation for details.
1093   *
1094   * @param path Path to modify
1095   * @param name xattr name.
1096   * @param value xattr value.
1097   * @param flag xattr set flag
1098   * @throws IOException
1099   */
1100  public void setXAttr(Path path, String name, byte[] value,
1101      EnumSet<XAttrSetFlag> flag) throws IOException {
1102    throw new UnsupportedOperationException(getClass().getSimpleName()
1103        + " doesn't support setXAttr");
1104  }
1105
1106  /**
1107   * Get an xattr for a file or directory.
1108   * The name must be prefixed with the namespace followed by ".". For example,
1109   * "user.attr".
1110   * <p/>
1111   * Refer to the HDFS extended attributes user documentation for details.
1112   *
1113   * @param path Path to get extended attribute
1114   * @param name xattr name.
1115   * @return byte[] xattr value.
1116   * @throws IOException
1117   */
1118  public byte[] getXAttr(Path path, String name) throws IOException {
1119    throw new UnsupportedOperationException(getClass().getSimpleName()
1120        + " doesn't support getXAttr");
1121  }
1122
1123  /**
1124   * Get all of the xattrs for a file or directory.
1125   * Only those xattrs for which the logged-in user has permissions to view
1126   * are returned.
1127   * <p/>
1128   * Refer to the HDFS extended attributes user documentation for details.
1129   *
1130   * @param path Path to get extended attributes
1131   * @return Map<String, byte[]> describing the XAttrs of the file or directory
1132   * @throws IOException
1133   */
1134  public Map<String, byte[]> getXAttrs(Path path) throws IOException {
1135    throw new UnsupportedOperationException(getClass().getSimpleName()
1136        + " doesn't support getXAttrs");
1137  }
1138
1139  /**
1140   * Get all of the xattrs for a file or directory.
1141   * Only those xattrs for which the logged-in user has permissions to view
1142   * are returned.
1143   * <p/>
1144   * Refer to the HDFS extended attributes user documentation for details.
1145   *
1146   * @param path Path to get extended attributes
1147   * @param names XAttr names.
1148   * @return Map<String, byte[]> describing the XAttrs of the file or directory
1149   * @throws IOException
1150   */
1151  public Map<String, byte[]> getXAttrs(Path path, List<String> names)
1152      throws IOException {
1153    throw new UnsupportedOperationException(getClass().getSimpleName()
1154        + " doesn't support getXAttrs");
1155  }
1156
1157  /**
1158   * Get all of the xattr names for a file or directory.
1159   * Only the xattr names for which the logged-in user has permissions to view
1160   * are returned.
1161   * <p/>
1162   * Refer to the HDFS extended attributes user documentation for details.
1163   *
1164   * @param path Path to get extended attributes
1165   * @return Map<String, byte[]> describing the XAttrs of the file or directory
1166   * @throws IOException
1167   */
1168  public List<String> listXAttrs(Path path)
1169          throws IOException {
1170    throw new UnsupportedOperationException(getClass().getSimpleName()
1171            + " doesn't support listXAttrs");
1172  }
1173
1174  /**
1175   * Remove an xattr of a file or directory.
1176   * The name must be prefixed with the namespace followed by ".". For example,
1177   * "user.attr".
1178   * <p/>
1179   * Refer to the HDFS extended attributes user documentation for details.
1180   *
1181   * @param path Path to remove extended attribute
1182   * @param name xattr name
1183   * @throws IOException
1184   */
1185  public void removeXAttr(Path path, String name) throws IOException {
1186    throw new UnsupportedOperationException(getClass().getSimpleName()
1187        + " doesn't support removeXAttr");
1188  }
1189
1190  @Override //Object
1191  public int hashCode() {
1192    return myUri.hashCode();
1193  }
1194  
1195  @Override //Object
1196  public boolean equals(Object other) {
1197    if (other == null || !(other instanceof AbstractFileSystem)) {
1198      return false;
1199    }
1200    return myUri.equals(((AbstractFileSystem) other).myUri);
1201  }
1202}