Apache Mesos
hdfs.hpp
Go to the documentation of this file.
1 // Licensed to the Apache Software Foundation (ASF) under one
2 // or more contributor license agreements. See the NOTICE file
3 // distributed with this work for additional information
4 // regarding copyright ownership. The ASF licenses this file
5 // to you under the Apache License, Version 2.0 (the
6 // "License"); you may not use this file except in compliance
7 // with the License. You may obtain a copy of the License at
8 //
9 // http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing, software
12 // distributed under the License is distributed on an "AS IS" BASIS,
13 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 // See the License for the specific language governing permissions and
15 // limitations under the License
16 
17 #ifndef __HDFS_HPP__
18 #define __HDFS_HPP__
19 
20 #include <string>
21 
22 #include <process/future.hpp>
23 #include <process/owned.hpp>
24 
25 #include <stout/bytes.hpp>
26 #include <stout/none.hpp>
27 #include <stout/nothing.hpp>
28 #include <stout/option.hpp>
29 #include <stout/try.hpp>
30 
31 #include <mesos/uri/uri.hpp>
32 
33 
34 // TODO(benh): We should get the hostname:port (or ip:port) of the
35 // server via:
36 //
37 // hadoop dfsadmin -report | grep Name: | awk '{ print $2 }'
38 //
39 // The advantage of doing this is then we can explicitly use the
40 // 'hdfs://hostname' prefix when we're trying to do copies to avoid
41 // silent failures when HDFS is down and the tools just copies
42 // locally.
43 //
44 // Note that if HDFS is not on port 9000 then we'll also need to do an
45 // HTTP GET on hostname:port and grab the information in the
46 // <title>...</title> (this is the best hack I can think of to get
47 // 'fs.default.name' given the tools available).
48 class HDFS
49 {
50 public:
52  const Option<std::string>& hadoop = None());
53 
54  // TODO(gilbert): Remove this helper function once we have URI Parser
55  // support (see MESOS-5254 for details). Ideally, we should support
56  // other schemes (e.g., hftp, s3, s3n etc) with hadoop plugin. It is
57  // hard coded for HDFS for now.
58  static Try<mesos::URI> parse(const std::string& uri);
59 
60  process::Future<bool> exists(const std::string& path);
61  process::Future<Bytes> du(const std::string& path);
62  process::Future<Nothing> rm(const std::string& path);
63 
65  const std::string& from,
66  const std::string& to);
67 
69  const std::string& from,
70  const std::string& to);
71 
72 private:
73  explicit HDFS(const std::string& _hadoop)
74  : hadoop(_hadoop) {}
75 
76  const std::string hadoop;
77 };
78 
79 #endif // __HDFS_HPP__
Definition: path.hpp:29
Definition: check.hpp:33
Definition: hdfs.hpp:48
process::Future< Nothing > copyFromLocal(const std::string &from, const std::string &to)
process::Future< Nothing > copyToLocal(const std::string &from, const std::string &to)
static Try< mesos::URI > parse(const std::string &uri)
process::Future< Nothing > rm(const std::string &path)
process::Future< Bytes > du(const std::string &path)
process::Future< bool > exists(const std::string &path)
Definition: none.hpp:27
static Try< process::Owned< HDFS > > create(const Option< std::string > &hadoop=None())
Definition: uri.hpp:21