Have a look below
Hadoop MapReduce is a software framework. Hadoop MapReduce easily writing applications which process vast amounts of data in-parallel on large clusters of commodity hardware in a reliable, fault-tolerant manner. The Apache Hadoop software library is a framework that allows for the distributed processing of large data sets across clusters of computers using simple programming models.
import java.io.IOExeption; import java.util.StringTokenizer; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce. Mapper; import org.apache.hadoop.mapreduce. Reducer; import org.apache.hadoop.conf. Configuration; import org.apache.hadoop.mapreduce. lib.input.FileInputFormat; import org.apache.hadoop.mapreduce. lib.output.FileOutputFormat;
public class wordCount{ public static class TokenizerMapper extends Mapper<\Object, Text, Text, IntWritable>{ private final static IntWritable one = new IntWritable(1); private Text word = new Text(); public void map(Object key, Text value, Context context) throws IOExeption, InterruptedException{ StringTokenizer itr = new StringTokenizer(value.toString()); while(itr.hasMoreTokens()){ word.set(itr.nextToken()); context.write(word, one); } } } public static class IntSumReducer extends Reducer<\Text, IntWritable, Text, IntWritable>{ private IntWritable result = new IntWritable(1); public void reduce(Text key, Iterable<\IntWritable> value, Context context) throws IOExeption, InterruptedException{ int sum = 0; for(IntWritable val : values){ sum += val.get(); } result.set(sum); context.write(key, result); } } public static void main(String[args]) throws Exception { Configuration conf = new Configuration(); Job job = Job.getInstance(conf, "word count"); job.setJarByClass(wordCount.class); job.setMapperClass(TokenizerMapper. class); job.setCombinerClass(IntSumReducer. class); job.setReducerClass(IntSumReducer. class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable. class); FileInputFormat.addOutputPath(job, new Path(args[0])); FileOutputFormat.setInputPath(job, new Path(args[1])); System.exit(job.waitForCompletion( true ) ? 0 : 1); } }
//import {useEffect, useState} from 'react'; //function use EetchData(url) { //const [data, setData] = useState(null); //const [loading, setLoading] = useState(false); //const [error, setError] = usestate(null); // useEffect(() => { //const fetchData = async () => { //setError(null); //setloading(true); //try { //const response = await fetch (url); // const jsonData = await response.ison(); // setData(ison Data); //} catch(error){ //setError(error); //} finally { //setloading(false); //} // } // fetchData(); // }, [w]); //return {data, loading, error}; // export default useFetchData;
// import {useEffect, useState} from 'react'; // function juse.FetchRata(url){ // const [data, setData] = useState(null); // const [loading, setloading] = useState(false); // const [error, setError] =useState(null); // useEffect(() => { // const fetchData = async () => { //setError(null); //setloading(true); //try { //const response = await fetch(url); //const jsonData = await response.ison(); // setData(ison Data); //} catch(error){ //setError(error); //} finally { //setloading(false); //} //} //fetchData(); //}, [und]); //return {data, loading, error}; //} //export default useFetchData;
{/* // Question 20 // What is wrong with using async/await in a useEffect hook in reference to the below code snippet? //function TestComponent() { //const [data, setData] = useState([]); // useEffect(() => { //const fetchData = async () => { //const response = await fetch('/api/data'); // const json = await response.ison(); //setData(ison); // } //fetchData(); //}, []); // return ( //
{d.text}
///))} //Reach out to my Quora profile for more such contents Harikesh Patel and follow.