抓取网页新闻(Android客户端如何实现从各大网抓取新闻并解析网页的方法 )

优采云 发布时间: 2021-11-29 00:01

  抓取网页新闻(Android客户端如何实现从各大网抓取新闻并解析网页的方法

)

  如何从主要网站捕获新闻并将其格式化到我们的新闻客户端?

  Android 客户端抓取和解析网页的方法有两种:

  一、使用jsoup

  没仔细研究,网上也有类似的,可以参考这两兄弟:

  二、使用htmlparser

  我的项目中使用了htmlparser,抓取并解析腾讯新闻,代码如下:

<p>Java代码 收藏代码

public class NetUtil {

public static List DATALIST = new ArrayList();

public static String[][] CHANNEL_URL = new String[][] {

new String[]{"http://news.qq.com/world_index.shtml","http://news.qq.com"},

new String[]{"http://news.qq.com/china_index.shtml","http://news.qq.com"},

new String[]{"http://news.qq.com/society_index.shtml","http://news.qq.com"},

new String[]{"http://news.qq.com/china_index.shtml","http://news.qq.com"},

new String[]{"http://news.qq.com/china_index.shtml","http://news.qq.com"},

new String[]{"http://news.qq.com/china_index.shtml","http://news.qq.com"},

new String[]{"http://news.qq.com/china_index.shtml","http://news.qq.com"},

new String[]{"http://news.qq.com/china_index.shtml","http://news.qq.com"},

new String[]{"http://news.qq.com/china_index.shtml","http://news.qq.com"},

new String[]{"http://news.qq.com/china_index.shtml","http://news.qq.com"},

new String[]{"http://news.qq.com/china_index.shtml","http://news.qq.com"},

};

public static int getTechNews(List techData, int cId) {

int result = 0;

try {

NodeFilter filter = new AndFilter(new TagNameFilter("div"),

new HasAttributeFilter("id", "listZone"));

Parser parser = new Parser();

parser.setURL(CHANNEL_URL[cId][0]);

parser.setEncoding(parser.getEncoding());

NodeList list = parser.extractAllNodesThatMatch(filter);

for (int i = 0; i < list.size(); i++) {

Tag node = (Tag) list.elementAt(i);

for (int j = 0; j < node.getChildren().size(); j++) {

try {

String textstr = node.getChildren().elementAt(j).toHtml();

if (textstr.trim().length() > 0) {

NodeFilter subFilter = new TagNameFilter("p");

Parser subParser = new Parser();

subParser.setResource(textstr);

NodeList subList = subParser.extractAllNodesThatMatch(subFilter);

NodeFilter titleStrFilter = new AndFilter(new TagNameFilter("a"),

new HasAttributeFilter("class", "linkto"));

Parser titleStrParser = new Parser();

titleStrParser.setResource(textstr);

NodeList titleStrList = titleStrParser.extractAllNodesThatMatch(titleStrFilter);

int linkstart = titleStrList.toHtml().indexOf("href=\"");

int linkend = titleStrList.toHtml().indexOf("\">");

int titleend = titleStrList.toHtml().indexOf("</a>");

String link = CHANNEL_URL[cId][1]+titleStrList.toHtml().substring(linkstart+6, linkend);

String title = titleStrList.toHtml().substring(linkend+2, titleend);

NewsBrief newsBrief = new NewsBrief();

newsBrief.setTitle(title);

newsBrief.setUrl(link);

newsBrief.setSummary(subList.asString());

techData.add(newsBrief);

}

} catch (Exception e) {

e.printStackTrace();

}

}

}

} catch (Exception e) {

result = 1;

e.printStackTrace();

}

return result;

}

public static int getTechNews2(List techData, int cId) {

int result = 0;

try {

// 查询http://tech.qq.com/tech_yejie.htm 页面 滚动新闻的 标签 以及ID

NodeFilter filter = new AndFilter(new TagNameFilter("div"),

new HasAttributeFilter("id", "listZone"));

Parser parser = new Parser();

parser.setURL(CHANNEL_URL[cId][0]);

parser.setEncoding(parser.getEncoding());

// 获取匹配的fileter的节点

NodeList list = parser.extractAllNodesThatMatch(filter);

StringBuilder NewsStr = new StringBuilder("");// 新闻表格字符串

for (int i = 0; i < list.size(); i++) {

Tag node = (Tag) list.elementAt(i);

for (int j = 0; j < node.getChildren().size(); j++) {

String textstr = node.getChildren().elementAt(j).toHtml()

.trim();

if (textstr.length() > 0) {

int linkbegin = 0, linkend = 0, titlebegin = 0, titleend = 0;

while (true) {

linkbegin = textstr.indexOf("href=", titleend);// 截取链接字符串起始位置

// 如果不存在 href了 也就结束了

if (linkbegin < 0)

break;

linkend = textstr.indexOf("\">", linkbegin);// 截取链接字符串结束位置

String sublink = textstr.substring(linkbegin + 6,linkend);

String link = CHANNEL_URL[cId][1] + sublink;

titlebegin = textstr.indexOf("\">", linkend);

titleend = textstr.indexOf("</a>", titlebegin);

String title = textstr.substring(titlebegin + 2,titleend);

NewsStr.append("\r\n\r\n\t<a target=\"_blank\" href=\""/span

+ link + span class="hljs-string""\">");

NewsStr.append(title);

NewsStr.append("</a>");

NewsBrief newsBrief = new NewsBrief();

newsBrief.setTitle(title);

newsBrief.setUrl(link);

techData.add(newsBrief);

}

}

}

}

} catch (Exception e) {

result = 1;

e.printStackTrace();

}

return result;

}

public static int parserURL(String url,NewsBrief newsBrief) {

int result = 0;

try {

Parser parser = new Parser(url);

NodeFilter contentFilter = new AndFilter(

new TagNameFilter("div"),

new HasAttributeFilter("id","Cnt-Main-Article-QQ"));

NodeFilter newsdateFilter = new AndFilter(

new TagNameFilter("span"),

new HasAttributeFilter("class",

"article-time"));

NodeFilter newsauthorFilter = new AndFilter(

new TagNameFilter("span"),

new HasAttributeFilter("class",

"color-a-1"));

NodeFilter imgUrlFilter = new TagNameFilter("IMG");

newsBrief.setContent(parserContent(contentFilter,parser));

parser.reset(); // 记得每次用完parser后,要重置一次parser。要不然就得不到我们想要的内容了。

newsBrief.setPubDate(parserDate(newsdateFilter,parser));

parser.reset();

newsBrief.setSource(parserAuthor(newsauthorFilter, parser));

parser.reset();

newsBrief.setImgUrl(parserImgUrl(contentFilter,imgUrlFilter, parser));

} catch (Exception e) {

result=1;

e.printStackTrace();

}

return result;

}

private static String parserContent(NodeFilter filter, Parser parser) {

String reslut = "";

try {

NodeList contentList = (NodeList) parser.parse(filter);

// 将DIV中的标签都 去掉只留正文

reslut = contentList.asString();

} catch (Exception e) {

e.printStackTrace();

}

return reslut;

}

private static String parserDate(NodeFilter filter, Parser parser) {

String reslut = "";

try {

NodeList datetList = (NodeList) parser.parse(filter);

// 将DIV中的标签都 去掉只留正文

reslut = datetList.asString();

} catch (Exception e) {

e.printStackTrace();

}

return reslut;

}

private static String parserAuthor(NodeFilter filter, Parser parser) {

String reslut = "";

try {

NodeList authorList = (NodeList) parser.parse(filter);

// 将DIV中的标签都 去掉只留正文

reslut = authorList.asString();

} catch (Exception e) {

e.printStackTrace();

}

return reslut;

}

private static List parserImgUrl(NodeFilter bodyfilter,NodeFilter filter, Parser parser) {

List reslut = new ArrayList();

try {

NodeList bodyList = (NodeList) parser.parse(bodyfilter);

Parser imgParser = new Parser();

imgParser.setResource(bodyList.toHtml());

NodeList imgList = imgParser.extractAllNodesThatMatch(filter);

String bodyString = imgList.toHtml();

//正文包含图片

if (bodyString.contains("

0 个评论

要回复文章请先登录注册


官方客服QQ群

微信人工客服

QQ人工客服


线