执行wordcount

代码

package org.apache.hadoop.examples;

import java.io.IOException;
import java.util.Iterator;
import java.util.StringTokenizer; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat; public class WordCount { public static class Map extends MapReduceBase implements
Mapper<LongWritable, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable();
private Text word = new Text(); public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
String line = value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens()) {
word.set(tokenizer.nextToken());
output.collect(word, one);
}
}
} public static class Reduce extends MapReduceBase implements
Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
int sum = ;
while (values.hasNext()) {
sum += values.next().get();
}
output.collect(key, new IntWritable(sum));
}
} public static void main(String[] args) throws Exception {
JobConf conf = new JobConf(WordCount.class);
conf.setJobName("wordcount"); conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class); conf.setMapperClass(Map.class);
conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class); conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class); FileInputFormat.setInputPaths(conf, new Path(args[]));
FileOutputFormat.setOutputPath(conf, new Path(args[])); JobClient.runJob(conf);
}
}

首先进行编译:

javac -classpath ./share/hadoop/common/hadoop-common-2.7..jar:./share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7..jar -d WordCount ./WordCount/WordCount.java

然后压包

jar -cvf wordcount.jar org/*

在复制到hadoop的工作目录下

然后在hadoop工作目录下面新建一个input目录 mkdir input,在目录里面新建一个文件vi file1,输入以下内容: 
hello world 
hello hadoop 
hello mapreduce 
,把该文件上传到hadoop的分布式文件系统中去

  1. ./bin/hadoop fs -put input/file* input

(6)然后我们开始执行

  1. ./bin/hadoop jar wordcount.jar org.apache.hadoop.examples.WordCount input wordcount_output

(7)最后我们查看运行结果

    1. ./bin/hadoop fs -cat wordcount_output/part-r-00000

参考:

http://cardyn.iteye.com/blog/1356361

https://blog.csdn.net/qichangleixin/article/details/43376587

二.往hdfs写数据

java代码

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils; import java.io.*;
import java.net.URI; /**
* blog: http://www.iteblog.com/
* Date: 14-1-2
* Time: 下午6:09
*/
public class AppendContent {
public static void main(String[] args) {
String hdfs_path = "input/file1";//文件路径
Configuration conf = new Configuration();
conf.setBoolean("dfs.support.append", true); String inpath = "./append.txt";
FileSystem fs = null;
try {
fs = FileSystem.get(URI.create(hdfs_path), conf);
//要追加的文件流,inpath为文件
InputStream in = new
BufferedInputStream(new FileInputStream(inpath));
OutputStream out = fs.append(new Path(hdfs_path));
IOUtils.copyBytes(in, out, , true);
} catch (IOException e) {
e.printStackTrace();
}
}
}

注意指定的hdfs路径,用hdfs://localhost:9000/input/路径一直不行,不知道什么原因。

编译

javac -classpath ./share/hadoop/common/hadoop-common-2.7..jar:./share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7..jar -d ./classes ./my_append/AppendContent.java

压包

jar -cvf ./my_jar/append.jar ./classes/*

运行

./bin/hadoop jar ./my_jar/append.jar AppendContent

AppendContent是类的名字

查看

./bin/hdfs dfs -cat input/*

或者代码可以改为通过args传参的方式传入hdfs路径, 方便多进程操作

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import java.io.*;
import java.net.URI; /**
* blog: http://www.iteblog.com/
* Date: 14-1-2
* Time: 下午6:09
*/
public class AppendContent {
public static void main(String[] args) {
//String hdfs_path = "input/file1";//文件路径
String hdfs_path = args[];
Configuration conf = new Configuration();
conf.setBoolean("dfs.support.append", true); //String inpath = "./append.txt";
FileSystem fs = null;
try {
fs = FileSystem.get(URI.create(hdfs_path), conf);
FSDataOutputStream out = fs.append(new Path(hdfs_path)); String s="";
for(int i=;i<;i++)
{
for(int j=;j<;j++)
{
s+='a';
}
int readLen = s.getBytes().length;
out.write(s.getBytes(), , readLen);
} //int readLen = "0123456789".getBytes().length; //while (-1 != readLen) //out.write("0123456789".getBytes(), 0, readLen); //要追加的文件流,inpath为文件
//InputStream in = new
// BufferedInputStream(new FileInputStream(inpath));
//OutputStream out = fs.append(new Path(hdfs_path));
//IOUtils.copyBytes(in, out, 4096, true);
} catch (IOException e) {
e.printStackTrace();
}
}
}

编译与压包命令同上,执行命令如下

./bin/hadoop jar ./my_jar/append.jar AppendContent input/file1

参考:https://blog.csdn.net/jameshadoop/article/details/24179413

https://blog.csdn.net/wypblog/article/details/17914021

脚本

#!/bin/bash

#开始时间
begin=$(date +%s%N) for ((i=; i<;i++))
do
{
./bin/hadoop jar ./my_jar/append.jar AppendContent input/file${i}
}
done wait
#结束时间
end=$(date +%s%N)
#spend=$(expr $end - $begin) use_tm=`echo $end $begin | awk '{ print ($1 - $2) / 1000000000}'`
echo "花费时间为$use_tm"

二. java在ext3中的测试

程序

import java.io.*;
import java.net.URI;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.RandomAccessFile; public class Toext3 {
public static void main(String[] args) {
//String hdfs_path = "input/file1";//文件路径
String ext3_path = args[];
FileWriter writer = null;
//String inpath = "./append.txt";
try {
String s="";
for(int i=;i<;i++)
{
s="";
for(int j=;j<;j++)
{
s+='b';
}
writer = new FileWriter(ext3_path, true);
writer.write(s);
System.out.println(ext3_path);
} } catch (IOException e) {
e.printStackTrace();
}finally {
try {
if(writer != null){
writer.close();
}
} catch (IOException e) {
e.printStackTrace();
}
} }
}

编译:

javac -classpath ./share/hadoop/common/hadoop-common-2.7..jar:./share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7..jar -d ./data/classes data/Toext3.java

运行

java -cp ./data/classes/ Toext3 ./data/ext3/file0 

在运行中,-cp指明class文件的路径,Toext3指出要运行的类

最新文章

  1. ASP.NET MVC Notes - 01
  2. HTTP协议学习---(三)摘要认证
  3. 关于c#的一些笔记
  4. Oracle 数据类型映射C#
  5. jQuery 的ready事件和 JavaScript 的load事件对比
  6. poj3650---将一个字符串中的特定字符转换
  7. C# Unity游戏开发——Excel中的数据是如何到游戏中的 (二)
  8. 我用Cocos2d-x模拟《Love Live!学院偶像祭》的Live场景(二)
  9. Ubuntu14.04桌面系统允许root登录
  10. Chrome调试工具developer tool技巧
  11. Dubbo广播模式下报错:Can&#39;t assign requested address解决办法
  12. JVM GC笔记
  13. python:前端(HTML)+后端(Django)+数据库(MySQL)
  14. pyqt5-控件是否可用
  15. OpenLayers Node环境安装运行构建-支持Vue集成OpenLayers
  16. sql 数据类型转换
  17. Codeforces Round #396 (Div. 2) D. Mahmoud and a Dictionary 并查集
  18. java new关键字
  19. 三维凸包求内部一点到表面的最近距离(HDU4266)
  20. Hadoop伪分布安装详解(四)

热门文章

  1. UESTC--1272--Final Pan&#39;s prime numbers(水题)
  2. 如何在maven项目中使用spring
  3. line-height与间距总总
  4. C语言整数类型在X86和X64下的字节大小
  5. 学习supervisor
  6. C++ 简单内存泄漏检测方法
  7. 【AnjularJS系列6 】 过滤器
  8. Java从入门到精通一步到位!
  9. video标签实现简单视频背景+遇到问题(视频无法显示,不能自动播放)
  10. Python笔记27----时间解析